Bluetooth: Update L2CAP version information
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blobcdd608d72741ae3f6d00ff2e8553890636a5f4a1
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth L2CAP core and sockets. */
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/interrupt.h>
40 #include <linux/socket.h>
41 #include <linux/skbuff.h>
42 #include <linux/list.h>
43 #include <linux/device.h>
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #include <linux/uaccess.h>
47 #include <linux/crc16.h>
48 #include <net/sock.h>
50 #include <asm/system.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55 #include <net/bluetooth/l2cap.h>
57 #define VERSION "2.15"
59 static int enable_ertm = 0;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static const struct proto_ops l2cap_sock_ops;
66 static struct workqueue_struct *_busy_wq;
68 static struct bt_sock_list l2cap_sk_list = {
69 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
72 static void l2cap_busy_work(struct work_struct *work);
74 static void __l2cap_sock_close(struct sock *sk, int reason);
75 static void l2cap_sock_close(struct sock *sk);
76 static void l2cap_sock_kill(struct sock *sk);
78 static int l2cap_build_conf_req(struct sock *sk, void *data);
79 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
80 u8 code, u8 ident, u16 dlen, void *data);
82 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84 /* ---- L2CAP timers ---- */
85 static void l2cap_sock_timeout(unsigned long arg)
87 struct sock *sk = (struct sock *) arg;
88 int reason;
90 BT_DBG("sock %p state %d", sk, sk->sk_state);
92 bh_lock_sock(sk);
94 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
95 reason = ECONNREFUSED;
96 else if (sk->sk_state == BT_CONNECT &&
97 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
98 reason = ECONNREFUSED;
99 else
100 reason = ETIMEDOUT;
102 __l2cap_sock_close(sk, reason);
104 bh_unlock_sock(sk);
106 l2cap_sock_kill(sk);
107 sock_put(sk);
110 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
112 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
113 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
116 static void l2cap_sock_clear_timer(struct sock *sk)
118 BT_DBG("sock %p state %d", sk, sk->sk_state);
119 sk_stop_timer(sk, &sk->sk_timer);
122 /* ---- L2CAP channels ---- */
123 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
125 struct sock *s;
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->dcid == cid)
128 break;
130 return s;
133 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
135 struct sock *s;
136 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
137 if (l2cap_pi(s)->scid == cid)
138 break;
140 return s;
143 /* Find channel with given SCID.
144 * Returns locked socket */
145 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
147 struct sock *s;
148 read_lock(&l->lock);
149 s = __l2cap_get_chan_by_scid(l, cid);
150 if (s)
151 bh_lock_sock(s);
152 read_unlock(&l->lock);
153 return s;
156 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
158 struct sock *s;
159 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
160 if (l2cap_pi(s)->ident == ident)
161 break;
163 return s;
166 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
168 struct sock *s;
169 read_lock(&l->lock);
170 s = __l2cap_get_chan_by_ident(l, ident);
171 if (s)
172 bh_lock_sock(s);
173 read_unlock(&l->lock);
174 return s;
177 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
179 u16 cid = L2CAP_CID_DYN_START;
181 for (; cid < L2CAP_CID_DYN_END; cid++) {
182 if (!__l2cap_get_chan_by_scid(l, cid))
183 return cid;
186 return 0;
189 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
191 sock_hold(sk);
193 if (l->head)
194 l2cap_pi(l->head)->prev_c = sk;
196 l2cap_pi(sk)->next_c = l->head;
197 l2cap_pi(sk)->prev_c = NULL;
198 l->head = sk;
201 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
203 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
205 write_lock_bh(&l->lock);
206 if (sk == l->head)
207 l->head = next;
209 if (next)
210 l2cap_pi(next)->prev_c = prev;
211 if (prev)
212 l2cap_pi(prev)->next_c = next;
213 write_unlock_bh(&l->lock);
215 __sock_put(sk);
218 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
220 struct l2cap_chan_list *l = &conn->chan_list;
222 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
223 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
225 conn->disc_reason = 0x13;
227 l2cap_pi(sk)->conn = conn;
229 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
230 /* Alloc CID for connection-oriented socket */
231 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
232 } else if (sk->sk_type == SOCK_DGRAM) {
233 /* Connectionless socket */
234 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
235 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
236 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
237 } else {
238 /* Raw socket can send/recv signalling messages only */
239 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
240 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
241 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
244 __l2cap_chan_link(l, sk);
246 if (parent)
247 bt_accept_enqueue(parent, sk);
250 /* Delete channel.
251 * Must be called on the locked socket. */
252 static void l2cap_chan_del(struct sock *sk, int err)
254 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
255 struct sock *parent = bt_sk(sk)->parent;
257 l2cap_sock_clear_timer(sk);
259 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
261 if (conn) {
262 /* Unlink from channel list */
263 l2cap_chan_unlink(&conn->chan_list, sk);
264 l2cap_pi(sk)->conn = NULL;
265 hci_conn_put(conn->hcon);
268 sk->sk_state = BT_CLOSED;
269 sock_set_flag(sk, SOCK_ZAPPED);
271 if (err)
272 sk->sk_err = err;
274 if (parent) {
275 bt_accept_unlink(sk);
276 parent->sk_data_ready(parent, 0);
277 } else
278 sk->sk_state_change(sk);
280 skb_queue_purge(TX_QUEUE(sk));
282 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
283 struct srej_list *l, *tmp;
285 del_timer(&l2cap_pi(sk)->retrans_timer);
286 del_timer(&l2cap_pi(sk)->monitor_timer);
287 del_timer(&l2cap_pi(sk)->ack_timer);
289 skb_queue_purge(SREJ_QUEUE(sk));
290 skb_queue_purge(BUSY_QUEUE(sk));
292 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
293 list_del(&l->list);
294 kfree(l);
299 /* Service level security */
300 static inline int l2cap_check_security(struct sock *sk)
302 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
303 __u8 auth_type;
305 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
306 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
307 auth_type = HCI_AT_NO_BONDING_MITM;
308 else
309 auth_type = HCI_AT_NO_BONDING;
311 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
312 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
313 } else {
314 switch (l2cap_pi(sk)->sec_level) {
315 case BT_SECURITY_HIGH:
316 auth_type = HCI_AT_GENERAL_BONDING_MITM;
317 break;
318 case BT_SECURITY_MEDIUM:
319 auth_type = HCI_AT_GENERAL_BONDING;
320 break;
321 default:
322 auth_type = HCI_AT_NO_BONDING;
323 break;
327 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
328 auth_type);
331 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
333 u8 id;
335 /* Get next available identificator.
336 * 1 - 128 are used by kernel.
337 * 129 - 199 are reserved.
338 * 200 - 254 are used by utilities like l2ping, etc.
341 spin_lock_bh(&conn->lock);
343 if (++conn->tx_ident > 128)
344 conn->tx_ident = 1;
346 id = conn->tx_ident;
348 spin_unlock_bh(&conn->lock);
350 return id;
353 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
355 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
357 BT_DBG("code 0x%2.2x", code);
359 if (!skb)
360 return;
362 hci_send_acl(conn->hcon, skb, 0);
365 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
367 struct sk_buff *skb;
368 struct l2cap_hdr *lh;
369 struct l2cap_conn *conn = pi->conn;
370 struct sock *sk = (struct sock *)pi;
371 int count, hlen = L2CAP_HDR_SIZE + 2;
373 if (sk->sk_state != BT_CONNECTED)
374 return;
376 if (pi->fcs == L2CAP_FCS_CRC16)
377 hlen += 2;
379 BT_DBG("pi %p, control 0x%2.2x", pi, control);
381 count = min_t(unsigned int, conn->mtu, hlen);
382 control |= L2CAP_CTRL_FRAME_TYPE;
384 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
385 control |= L2CAP_CTRL_FINAL;
386 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
389 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
390 control |= L2CAP_CTRL_POLL;
391 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
394 skb = bt_skb_alloc(count, GFP_ATOMIC);
395 if (!skb)
396 return;
398 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
399 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
400 lh->cid = cpu_to_le16(pi->dcid);
401 put_unaligned_le16(control, skb_put(skb, 2));
403 if (pi->fcs == L2CAP_FCS_CRC16) {
404 u16 fcs = crc16(0, (u8 *)lh, count - 2);
405 put_unaligned_le16(fcs, skb_put(skb, 2));
408 hci_send_acl(pi->conn->hcon, skb, 0);
411 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
413 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
414 control |= L2CAP_SUPER_RCV_NOT_READY;
415 pi->conn_state |= L2CAP_CONN_RNR_SENT;
416 } else
417 control |= L2CAP_SUPER_RCV_READY;
419 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
421 l2cap_send_sframe(pi, control);
424 static inline int __l2cap_no_conn_pending(struct sock *sk)
426 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
429 static void l2cap_do_start(struct sock *sk)
431 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
433 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
434 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
435 return;
437 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
438 struct l2cap_conn_req req;
439 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
440 req.psm = l2cap_pi(sk)->psm;
442 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
443 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
445 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
446 L2CAP_CONN_REQ, sizeof(req), &req);
448 } else {
449 struct l2cap_info_req req;
450 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
452 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
453 conn->info_ident = l2cap_get_ident(conn);
455 mod_timer(&conn->info_timer, jiffies +
456 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
458 l2cap_send_cmd(conn, conn->info_ident,
459 L2CAP_INFO_REQ, sizeof(req), &req);
463 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
465 u32 local_feat_mask = l2cap_feat_mask;
466 if (enable_ertm)
467 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
469 switch (mode) {
470 case L2CAP_MODE_ERTM:
471 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
472 case L2CAP_MODE_STREAMING:
473 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
474 default:
475 return 0x00;
479 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
481 struct l2cap_disconn_req req;
483 if (!conn)
484 return;
486 skb_queue_purge(TX_QUEUE(sk));
488 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
489 del_timer(&l2cap_pi(sk)->retrans_timer);
490 del_timer(&l2cap_pi(sk)->monitor_timer);
491 del_timer(&l2cap_pi(sk)->ack_timer);
494 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
495 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
496 l2cap_send_cmd(conn, l2cap_get_ident(conn),
497 L2CAP_DISCONN_REQ, sizeof(req), &req);
499 sk->sk_state = BT_DISCONN;
500 sk->sk_err = err;
503 /* ---- L2CAP connections ---- */
504 static void l2cap_conn_start(struct l2cap_conn *conn)
506 struct l2cap_chan_list *l = &conn->chan_list;
507 struct sock_del_list del, *tmp1, *tmp2;
508 struct sock *sk;
510 BT_DBG("conn %p", conn);
512 INIT_LIST_HEAD(&del.list);
514 read_lock(&l->lock);
516 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
517 bh_lock_sock(sk);
519 if (sk->sk_type != SOCK_SEQPACKET &&
520 sk->sk_type != SOCK_STREAM) {
521 bh_unlock_sock(sk);
522 continue;
525 if (sk->sk_state == BT_CONNECT) {
526 struct l2cap_conn_req req;
528 if (!l2cap_check_security(sk) ||
529 !__l2cap_no_conn_pending(sk)) {
530 bh_unlock_sock(sk);
531 continue;
534 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
535 conn->feat_mask)
536 && l2cap_pi(sk)->conf_state &
537 L2CAP_CONF_STATE2_DEVICE) {
538 tmp1 = kzalloc(sizeof(struct sock_del_list),
539 GFP_ATOMIC);
540 tmp1->sk = sk;
541 list_add_tail(&tmp1->list, &del.list);
542 bh_unlock_sock(sk);
543 continue;
546 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
547 req.psm = l2cap_pi(sk)->psm;
549 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
550 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
552 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
553 L2CAP_CONN_REQ, sizeof(req), &req);
555 } else if (sk->sk_state == BT_CONNECT2) {
556 struct l2cap_conn_rsp rsp;
557 char buf[128];
558 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
559 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
561 if (l2cap_check_security(sk)) {
562 if (bt_sk(sk)->defer_setup) {
563 struct sock *parent = bt_sk(sk)->parent;
564 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
565 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
566 parent->sk_data_ready(parent, 0);
568 } else {
569 sk->sk_state = BT_CONFIG;
570 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
571 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
573 } else {
574 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
575 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
578 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
579 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
581 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
582 rsp.result != L2CAP_CR_SUCCESS) {
583 bh_unlock_sock(sk);
584 continue;
587 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
588 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
589 l2cap_build_conf_req(sk, buf), buf);
590 l2cap_pi(sk)->num_conf_req++;
593 bh_unlock_sock(sk);
596 read_unlock(&l->lock);
598 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
599 bh_lock_sock(tmp1->sk);
600 __l2cap_sock_close(tmp1->sk, ECONNRESET);
601 bh_unlock_sock(tmp1->sk);
602 list_del(&tmp1->list);
603 kfree(tmp1);
607 static void l2cap_conn_ready(struct l2cap_conn *conn)
609 struct l2cap_chan_list *l = &conn->chan_list;
610 struct sock *sk;
612 BT_DBG("conn %p", conn);
614 read_lock(&l->lock);
616 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
617 bh_lock_sock(sk);
619 if (sk->sk_type != SOCK_SEQPACKET &&
620 sk->sk_type != SOCK_STREAM) {
621 l2cap_sock_clear_timer(sk);
622 sk->sk_state = BT_CONNECTED;
623 sk->sk_state_change(sk);
624 } else if (sk->sk_state == BT_CONNECT)
625 l2cap_do_start(sk);
627 bh_unlock_sock(sk);
630 read_unlock(&l->lock);
633 /* Notify sockets that we cannot guaranty reliability anymore */
634 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
636 struct l2cap_chan_list *l = &conn->chan_list;
637 struct sock *sk;
639 BT_DBG("conn %p", conn);
641 read_lock(&l->lock);
643 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
644 if (l2cap_pi(sk)->force_reliable)
645 sk->sk_err = err;
648 read_unlock(&l->lock);
651 static void l2cap_info_timeout(unsigned long arg)
653 struct l2cap_conn *conn = (void *) arg;
655 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
656 conn->info_ident = 0;
658 l2cap_conn_start(conn);
661 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
663 struct l2cap_conn *conn = hcon->l2cap_data;
665 if (conn || status)
666 return conn;
668 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
669 if (!conn)
670 return NULL;
672 hcon->l2cap_data = conn;
673 conn->hcon = hcon;
675 BT_DBG("hcon %p conn %p", hcon, conn);
677 conn->mtu = hcon->hdev->acl_mtu;
678 conn->src = &hcon->hdev->bdaddr;
679 conn->dst = &hcon->dst;
681 conn->feat_mask = 0;
683 spin_lock_init(&conn->lock);
684 rwlock_init(&conn->chan_list.lock);
686 setup_timer(&conn->info_timer, l2cap_info_timeout,
687 (unsigned long) conn);
689 conn->disc_reason = 0x13;
691 return conn;
694 static void l2cap_conn_del(struct hci_conn *hcon, int err)
696 struct l2cap_conn *conn = hcon->l2cap_data;
697 struct sock *sk;
699 if (!conn)
700 return;
702 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
704 kfree_skb(conn->rx_skb);
706 /* Kill channels */
707 while ((sk = conn->chan_list.head)) {
708 bh_lock_sock(sk);
709 l2cap_chan_del(sk, err);
710 bh_unlock_sock(sk);
711 l2cap_sock_kill(sk);
714 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
715 del_timer_sync(&conn->info_timer);
717 hcon->l2cap_data = NULL;
718 kfree(conn);
721 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
723 struct l2cap_chan_list *l = &conn->chan_list;
724 write_lock_bh(&l->lock);
725 __l2cap_chan_add(conn, sk, parent);
726 write_unlock_bh(&l->lock);
729 /* ---- Socket interface ---- */
730 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
732 struct sock *sk;
733 struct hlist_node *node;
734 sk_for_each(sk, node, &l2cap_sk_list.head)
735 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
736 goto found;
737 sk = NULL;
738 found:
739 return sk;
742 /* Find socket with psm and source bdaddr.
743 * Returns closest match.
745 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
747 struct sock *sk = NULL, *sk1 = NULL;
748 struct hlist_node *node;
750 sk_for_each(sk, node, &l2cap_sk_list.head) {
751 if (state && sk->sk_state != state)
752 continue;
754 if (l2cap_pi(sk)->psm == psm) {
755 /* Exact match. */
756 if (!bacmp(&bt_sk(sk)->src, src))
757 break;
759 /* Closest match */
760 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
761 sk1 = sk;
764 return node ? sk : sk1;
767 /* Find socket with given address (psm, src).
768 * Returns locked socket */
769 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
771 struct sock *s;
772 read_lock(&l2cap_sk_list.lock);
773 s = __l2cap_get_sock_by_psm(state, psm, src);
774 if (s)
775 bh_lock_sock(s);
776 read_unlock(&l2cap_sk_list.lock);
777 return s;
780 static void l2cap_sock_destruct(struct sock *sk)
782 BT_DBG("sk %p", sk);
784 skb_queue_purge(&sk->sk_receive_queue);
785 skb_queue_purge(&sk->sk_write_queue);
788 static void l2cap_sock_cleanup_listen(struct sock *parent)
790 struct sock *sk;
792 BT_DBG("parent %p", parent);
794 /* Close not yet accepted channels */
795 while ((sk = bt_accept_dequeue(parent, NULL)))
796 l2cap_sock_close(sk);
798 parent->sk_state = BT_CLOSED;
799 sock_set_flag(parent, SOCK_ZAPPED);
802 /* Kill socket (only if zapped and orphan)
803 * Must be called on unlocked socket.
805 static void l2cap_sock_kill(struct sock *sk)
807 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
808 return;
810 BT_DBG("sk %p state %d", sk, sk->sk_state);
812 /* Kill poor orphan */
813 bt_sock_unlink(&l2cap_sk_list, sk);
814 sock_set_flag(sk, SOCK_DEAD);
815 sock_put(sk);
818 static void __l2cap_sock_close(struct sock *sk, int reason)
820 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
822 switch (sk->sk_state) {
823 case BT_LISTEN:
824 l2cap_sock_cleanup_listen(sk);
825 break;
827 case BT_CONNECTED:
828 case BT_CONFIG:
829 if (sk->sk_type == SOCK_SEQPACKET ||
830 sk->sk_type == SOCK_STREAM) {
831 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
833 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
834 l2cap_send_disconn_req(conn, sk, reason);
835 } else
836 l2cap_chan_del(sk, reason);
837 break;
839 case BT_CONNECT2:
840 if (sk->sk_type == SOCK_SEQPACKET ||
841 sk->sk_type == SOCK_STREAM) {
842 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
843 struct l2cap_conn_rsp rsp;
844 __u16 result;
846 if (bt_sk(sk)->defer_setup)
847 result = L2CAP_CR_SEC_BLOCK;
848 else
849 result = L2CAP_CR_BAD_PSM;
851 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
852 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
853 rsp.result = cpu_to_le16(result);
854 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
855 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
856 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
857 } else
858 l2cap_chan_del(sk, reason);
859 break;
861 case BT_CONNECT:
862 case BT_DISCONN:
863 l2cap_chan_del(sk, reason);
864 break;
866 default:
867 sock_set_flag(sk, SOCK_ZAPPED);
868 break;
872 /* Must be called on unlocked socket. */
873 static void l2cap_sock_close(struct sock *sk)
875 l2cap_sock_clear_timer(sk);
876 lock_sock(sk);
877 __l2cap_sock_close(sk, ECONNRESET);
878 release_sock(sk);
879 l2cap_sock_kill(sk);
882 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
884 struct l2cap_pinfo *pi = l2cap_pi(sk);
886 BT_DBG("sk %p", sk);
888 if (parent) {
889 sk->sk_type = parent->sk_type;
890 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
892 pi->imtu = l2cap_pi(parent)->imtu;
893 pi->omtu = l2cap_pi(parent)->omtu;
894 pi->conf_state = l2cap_pi(parent)->conf_state;
895 pi->mode = l2cap_pi(parent)->mode;
896 pi->fcs = l2cap_pi(parent)->fcs;
897 pi->max_tx = l2cap_pi(parent)->max_tx;
898 pi->tx_win = l2cap_pi(parent)->tx_win;
899 pi->sec_level = l2cap_pi(parent)->sec_level;
900 pi->role_switch = l2cap_pi(parent)->role_switch;
901 pi->force_reliable = l2cap_pi(parent)->force_reliable;
902 } else {
903 pi->imtu = L2CAP_DEFAULT_MTU;
904 pi->omtu = 0;
905 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
906 pi->mode = L2CAP_MODE_ERTM;
907 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
908 } else {
909 pi->mode = L2CAP_MODE_BASIC;
911 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
912 pi->fcs = L2CAP_FCS_CRC16;
913 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
914 pi->sec_level = BT_SECURITY_LOW;
915 pi->role_switch = 0;
916 pi->force_reliable = 0;
919 /* Default config options */
920 pi->conf_len = 0;
921 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
922 skb_queue_head_init(TX_QUEUE(sk));
923 skb_queue_head_init(SREJ_QUEUE(sk));
924 skb_queue_head_init(BUSY_QUEUE(sk));
925 INIT_LIST_HEAD(SREJ_LIST(sk));
928 static struct proto l2cap_proto = {
929 .name = "L2CAP",
930 .owner = THIS_MODULE,
931 .obj_size = sizeof(struct l2cap_pinfo)
934 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
936 struct sock *sk;
938 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
939 if (!sk)
940 return NULL;
942 sock_init_data(sock, sk);
943 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
945 sk->sk_destruct = l2cap_sock_destruct;
946 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
948 sock_reset_flag(sk, SOCK_ZAPPED);
950 sk->sk_protocol = proto;
951 sk->sk_state = BT_OPEN;
953 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
955 bt_sock_link(&l2cap_sk_list, sk);
956 return sk;
959 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
960 int kern)
962 struct sock *sk;
964 BT_DBG("sock %p", sock);
966 sock->state = SS_UNCONNECTED;
968 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
969 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
970 return -ESOCKTNOSUPPORT;
972 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
973 return -EPERM;
975 sock->ops = &l2cap_sock_ops;
977 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
978 if (!sk)
979 return -ENOMEM;
981 l2cap_sock_init(sk, NULL);
982 return 0;
985 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
987 struct sock *sk = sock->sk;
988 struct sockaddr_l2 la;
989 int len, err = 0;
991 BT_DBG("sk %p", sk);
993 if (!addr || addr->sa_family != AF_BLUETOOTH)
994 return -EINVAL;
996 memset(&la, 0, sizeof(la));
997 len = min_t(unsigned int, sizeof(la), alen);
998 memcpy(&la, addr, len);
1000 if (la.l2_cid)
1001 return -EINVAL;
1003 lock_sock(sk);
1005 if (sk->sk_state != BT_OPEN) {
1006 err = -EBADFD;
1007 goto done;
1010 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
1011 !capable(CAP_NET_BIND_SERVICE)) {
1012 err = -EACCES;
1013 goto done;
1016 write_lock_bh(&l2cap_sk_list.lock);
1018 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1019 err = -EADDRINUSE;
1020 } else {
1021 /* Save source address */
1022 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1023 l2cap_pi(sk)->psm = la.l2_psm;
1024 l2cap_pi(sk)->sport = la.l2_psm;
1025 sk->sk_state = BT_BOUND;
1027 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1028 __le16_to_cpu(la.l2_psm) == 0x0003)
1029 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1032 write_unlock_bh(&l2cap_sk_list.lock);
1034 done:
1035 release_sock(sk);
1036 return err;
1039 static int l2cap_do_connect(struct sock *sk)
1041 bdaddr_t *src = &bt_sk(sk)->src;
1042 bdaddr_t *dst = &bt_sk(sk)->dst;
1043 struct l2cap_conn *conn;
1044 struct hci_conn *hcon;
1045 struct hci_dev *hdev;
1046 __u8 auth_type;
1047 int err;
1049 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1050 l2cap_pi(sk)->psm);
1052 hdev = hci_get_route(dst, src);
1053 if (!hdev)
1054 return -EHOSTUNREACH;
1056 hci_dev_lock_bh(hdev);
1058 err = -ENOMEM;
1060 if (sk->sk_type == SOCK_RAW) {
1061 switch (l2cap_pi(sk)->sec_level) {
1062 case BT_SECURITY_HIGH:
1063 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1064 break;
1065 case BT_SECURITY_MEDIUM:
1066 auth_type = HCI_AT_DEDICATED_BONDING;
1067 break;
1068 default:
1069 auth_type = HCI_AT_NO_BONDING;
1070 break;
1072 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1073 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1074 auth_type = HCI_AT_NO_BONDING_MITM;
1075 else
1076 auth_type = HCI_AT_NO_BONDING;
1078 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1079 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1080 } else {
1081 switch (l2cap_pi(sk)->sec_level) {
1082 case BT_SECURITY_HIGH:
1083 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1084 break;
1085 case BT_SECURITY_MEDIUM:
1086 auth_type = HCI_AT_GENERAL_BONDING;
1087 break;
1088 default:
1089 auth_type = HCI_AT_NO_BONDING;
1090 break;
1094 hcon = hci_connect(hdev, ACL_LINK, dst,
1095 l2cap_pi(sk)->sec_level, auth_type);
1096 if (!hcon)
1097 goto done;
1099 conn = l2cap_conn_add(hcon, 0);
1100 if (!conn) {
1101 hci_conn_put(hcon);
1102 goto done;
1105 err = 0;
1107 /* Update source addr of the socket */
1108 bacpy(src, conn->src);
1110 l2cap_chan_add(conn, sk, NULL);
1112 sk->sk_state = BT_CONNECT;
1113 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1115 if (hcon->state == BT_CONNECTED) {
1116 if (sk->sk_type != SOCK_SEQPACKET &&
1117 sk->sk_type != SOCK_STREAM) {
1118 l2cap_sock_clear_timer(sk);
1119 sk->sk_state = BT_CONNECTED;
1120 } else
1121 l2cap_do_start(sk);
1124 done:
1125 hci_dev_unlock_bh(hdev);
1126 hci_dev_put(hdev);
1127 return err;
1130 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1132 struct sock *sk = sock->sk;
1133 struct sockaddr_l2 la;
1134 int len, err = 0;
1136 BT_DBG("sk %p", sk);
1138 if (!addr || alen < sizeof(addr->sa_family) ||
1139 addr->sa_family != AF_BLUETOOTH)
1140 return -EINVAL;
1142 memset(&la, 0, sizeof(la));
1143 len = min_t(unsigned int, sizeof(la), alen);
1144 memcpy(&la, addr, len);
1146 if (la.l2_cid)
1147 return -EINVAL;
1149 lock_sock(sk);
1151 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1152 && !la.l2_psm) {
1153 err = -EINVAL;
1154 goto done;
1157 switch (l2cap_pi(sk)->mode) {
1158 case L2CAP_MODE_BASIC:
1159 break;
1160 case L2CAP_MODE_ERTM:
1161 case L2CAP_MODE_STREAMING:
1162 if (enable_ertm)
1163 break;
1164 /* fall through */
1165 default:
1166 err = -ENOTSUPP;
1167 goto done;
1170 switch (sk->sk_state) {
1171 case BT_CONNECT:
1172 case BT_CONNECT2:
1173 case BT_CONFIG:
1174 /* Already connecting */
1175 goto wait;
1177 case BT_CONNECTED:
1178 /* Already connected */
1179 err = -EISCONN;
1180 goto done;
1182 case BT_OPEN:
1183 case BT_BOUND:
1184 /* Can connect */
1185 break;
1187 default:
1188 err = -EBADFD;
1189 goto done;
1192 /* Set destination address and psm */
1193 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1194 l2cap_pi(sk)->psm = la.l2_psm;
1196 err = l2cap_do_connect(sk);
1197 if (err)
1198 goto done;
1200 wait:
1201 err = bt_sock_wait_state(sk, BT_CONNECTED,
1202 sock_sndtimeo(sk, flags & O_NONBLOCK));
1203 done:
1204 release_sock(sk);
1205 return err;
1208 static int l2cap_sock_listen(struct socket *sock, int backlog)
1210 struct sock *sk = sock->sk;
1211 int err = 0;
1213 BT_DBG("sk %p backlog %d", sk, backlog);
1215 lock_sock(sk);
1217 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1218 || sk->sk_state != BT_BOUND) {
1219 err = -EBADFD;
1220 goto done;
1223 switch (l2cap_pi(sk)->mode) {
1224 case L2CAP_MODE_BASIC:
1225 break;
1226 case L2CAP_MODE_ERTM:
1227 case L2CAP_MODE_STREAMING:
1228 if (enable_ertm)
1229 break;
1230 /* fall through */
1231 default:
1232 err = -ENOTSUPP;
1233 goto done;
1236 if (!l2cap_pi(sk)->psm) {
1237 bdaddr_t *src = &bt_sk(sk)->src;
1238 u16 psm;
1240 err = -EINVAL;
1242 write_lock_bh(&l2cap_sk_list.lock);
1244 for (psm = 0x1001; psm < 0x1100; psm += 2)
1245 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1246 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1247 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1248 err = 0;
1249 break;
1252 write_unlock_bh(&l2cap_sk_list.lock);
1254 if (err < 0)
1255 goto done;
1258 sk->sk_max_ack_backlog = backlog;
1259 sk->sk_ack_backlog = 0;
1260 sk->sk_state = BT_LISTEN;
1262 done:
1263 release_sock(sk);
1264 return err;
1267 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1269 DECLARE_WAITQUEUE(wait, current);
1270 struct sock *sk = sock->sk, *nsk;
1271 long timeo;
1272 int err = 0;
1274 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1276 if (sk->sk_state != BT_LISTEN) {
1277 err = -EBADFD;
1278 goto done;
1281 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1283 BT_DBG("sk %p timeo %ld", sk, timeo);
1285 /* Wait for an incoming connection. (wake-one). */
1286 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1287 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1288 set_current_state(TASK_INTERRUPTIBLE);
1289 if (!timeo) {
1290 err = -EAGAIN;
1291 break;
1294 release_sock(sk);
1295 timeo = schedule_timeout(timeo);
1296 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1298 if (sk->sk_state != BT_LISTEN) {
1299 err = -EBADFD;
1300 break;
1303 if (signal_pending(current)) {
1304 err = sock_intr_errno(timeo);
1305 break;
1308 set_current_state(TASK_RUNNING);
1309 remove_wait_queue(sk_sleep(sk), &wait);
1311 if (err)
1312 goto done;
1314 newsock->state = SS_CONNECTED;
1316 BT_DBG("new socket %p", nsk);
1318 done:
1319 release_sock(sk);
1320 return err;
1323 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1325 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1326 struct sock *sk = sock->sk;
1328 BT_DBG("sock %p, sk %p", sock, sk);
1330 addr->sa_family = AF_BLUETOOTH;
1331 *len = sizeof(struct sockaddr_l2);
1333 if (peer) {
1334 la->l2_psm = l2cap_pi(sk)->psm;
1335 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1336 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1337 } else {
1338 la->l2_psm = l2cap_pi(sk)->sport;
1339 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1340 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1343 return 0;
1346 static int __l2cap_wait_ack(struct sock *sk)
1348 DECLARE_WAITQUEUE(wait, current);
1349 int err = 0;
1350 int timeo = HZ/5;
1352 add_wait_queue(sk_sleep(sk), &wait);
1353 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1354 set_current_state(TASK_INTERRUPTIBLE);
1356 if (!timeo)
1357 timeo = HZ/5;
1359 if (signal_pending(current)) {
1360 err = sock_intr_errno(timeo);
1361 break;
1364 release_sock(sk);
1365 timeo = schedule_timeout(timeo);
1366 lock_sock(sk);
1368 err = sock_error(sk);
1369 if (err)
1370 break;
1372 set_current_state(TASK_RUNNING);
1373 remove_wait_queue(sk_sleep(sk), &wait);
1374 return err;
1377 static void l2cap_monitor_timeout(unsigned long arg)
1379 struct sock *sk = (void *) arg;
1381 BT_DBG("sk %p", sk);
1383 bh_lock_sock(sk);
1384 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1385 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1386 bh_unlock_sock(sk);
1387 return;
1390 l2cap_pi(sk)->retry_count++;
1391 __mod_monitor_timer();
1393 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1394 bh_unlock_sock(sk);
1397 static void l2cap_retrans_timeout(unsigned long arg)
1399 struct sock *sk = (void *) arg;
1401 BT_DBG("sk %p", sk);
1403 bh_lock_sock(sk);
1404 l2cap_pi(sk)->retry_count = 1;
1405 __mod_monitor_timer();
1407 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1409 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1410 bh_unlock_sock(sk);
1413 static void l2cap_drop_acked_frames(struct sock *sk)
1415 struct sk_buff *skb;
1417 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1418 l2cap_pi(sk)->unacked_frames) {
1419 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1420 break;
1422 skb = skb_dequeue(TX_QUEUE(sk));
1423 kfree_skb(skb);
1425 l2cap_pi(sk)->unacked_frames--;
1428 if (!l2cap_pi(sk)->unacked_frames)
1429 del_timer(&l2cap_pi(sk)->retrans_timer);
1432 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1434 struct l2cap_pinfo *pi = l2cap_pi(sk);
1436 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1438 hci_send_acl(pi->conn->hcon, skb, 0);
1441 static void l2cap_streaming_send(struct sock *sk)
1443 struct sk_buff *skb, *tx_skb;
1444 struct l2cap_pinfo *pi = l2cap_pi(sk);
1445 u16 control, fcs;
1447 while ((skb = sk->sk_send_head)) {
1448 tx_skb = skb_clone(skb, GFP_ATOMIC);
1450 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1451 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1452 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1454 if (pi->fcs == L2CAP_FCS_CRC16) {
1455 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1456 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1459 l2cap_do_send(sk, tx_skb);
1461 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1463 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1464 sk->sk_send_head = NULL;
1465 else
1466 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1468 skb = skb_dequeue(TX_QUEUE(sk));
1469 kfree_skb(skb);
1473 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1475 struct l2cap_pinfo *pi = l2cap_pi(sk);
1476 struct sk_buff *skb, *tx_skb;
1477 u16 control, fcs;
1479 skb = skb_peek(TX_QUEUE(sk));
1480 if (!skb)
1481 return;
1483 do {
1484 if (bt_cb(skb)->tx_seq == tx_seq)
1485 break;
1487 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1488 return;
1490 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1492 if (pi->remote_max_tx &&
1493 bt_cb(skb)->retries == pi->remote_max_tx) {
1494 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1495 return;
1498 tx_skb = skb_clone(skb, GFP_ATOMIC);
1499 bt_cb(skb)->retries++;
1500 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1502 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1503 control |= L2CAP_CTRL_FINAL;
1504 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1507 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1508 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1510 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1512 if (pi->fcs == L2CAP_FCS_CRC16) {
1513 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1514 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1517 l2cap_do_send(sk, tx_skb);
1520 static int l2cap_ertm_send(struct sock *sk)
1522 struct sk_buff *skb, *tx_skb;
1523 struct l2cap_pinfo *pi = l2cap_pi(sk);
1524 u16 control, fcs;
1525 int nsent = 0;
1527 if (sk->sk_state != BT_CONNECTED)
1528 return -ENOTCONN;
1530 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1532 if (pi->remote_max_tx &&
1533 bt_cb(skb)->retries == pi->remote_max_tx) {
1534 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1535 break;
1538 tx_skb = skb_clone(skb, GFP_ATOMIC);
1540 bt_cb(skb)->retries++;
1542 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1543 control &= L2CAP_CTRL_SAR;
1545 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1546 control |= L2CAP_CTRL_FINAL;
1547 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1549 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1550 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1551 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1554 if (pi->fcs == L2CAP_FCS_CRC16) {
1555 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1556 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1559 l2cap_do_send(sk, tx_skb);
1561 __mod_retrans_timer();
1563 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1564 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1566 pi->unacked_frames++;
1567 pi->frames_sent++;
1569 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1570 sk->sk_send_head = NULL;
1571 else
1572 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1574 nsent++;
1577 return nsent;
1580 static int l2cap_retransmit_frames(struct sock *sk)
1582 struct l2cap_pinfo *pi = l2cap_pi(sk);
1583 int ret;
1585 if (!skb_queue_empty(TX_QUEUE(sk)))
1586 sk->sk_send_head = TX_QUEUE(sk)->next;
1588 pi->next_tx_seq = pi->expected_ack_seq;
1589 ret = l2cap_ertm_send(sk);
1590 return ret;
1593 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1595 struct sock *sk = (struct sock *)pi;
1596 u16 control = 0;
1598 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1600 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1601 control |= L2CAP_SUPER_RCV_NOT_READY;
1602 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1603 l2cap_send_sframe(pi, control);
1604 return;
1607 if (l2cap_ertm_send(sk) > 0)
1608 return;
1610 control |= L2CAP_SUPER_RCV_READY;
1611 l2cap_send_sframe(pi, control);
1614 static void l2cap_send_srejtail(struct sock *sk)
1616 struct srej_list *tail;
1617 u16 control;
1619 control = L2CAP_SUPER_SELECT_REJECT;
1620 control |= L2CAP_CTRL_FINAL;
1622 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1623 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1625 l2cap_send_sframe(l2cap_pi(sk), control);
1628 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1630 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1631 struct sk_buff **frag;
1632 int err, sent = 0;
1634 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1635 return -EFAULT;
1637 sent += count;
1638 len -= count;
1640 /* Continuation fragments (no L2CAP header) */
1641 frag = &skb_shinfo(skb)->frag_list;
1642 while (len) {
1643 count = min_t(unsigned int, conn->mtu, len);
1645 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1646 if (!*frag)
1647 return -EFAULT;
1648 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1649 return -EFAULT;
1651 sent += count;
1652 len -= count;
1654 frag = &(*frag)->next;
1657 return sent;
1660 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1662 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1663 struct sk_buff *skb;
1664 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1665 struct l2cap_hdr *lh;
1667 BT_DBG("sk %p len %d", sk, (int)len);
1669 count = min_t(unsigned int, (conn->mtu - hlen), len);
1670 skb = bt_skb_send_alloc(sk, count + hlen,
1671 msg->msg_flags & MSG_DONTWAIT, &err);
1672 if (!skb)
1673 return ERR_PTR(-ENOMEM);
1675 /* Create L2CAP header */
1676 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1677 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1678 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1679 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1681 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1682 if (unlikely(err < 0)) {
1683 kfree_skb(skb);
1684 return ERR_PTR(err);
1686 return skb;
1689 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1691 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1692 struct sk_buff *skb;
1693 int err, count, hlen = L2CAP_HDR_SIZE;
1694 struct l2cap_hdr *lh;
1696 BT_DBG("sk %p len %d", sk, (int)len);
1698 count = min_t(unsigned int, (conn->mtu - hlen), len);
1699 skb = bt_skb_send_alloc(sk, count + hlen,
1700 msg->msg_flags & MSG_DONTWAIT, &err);
1701 if (!skb)
1702 return ERR_PTR(-ENOMEM);
1704 /* Create L2CAP header */
1705 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1706 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1707 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1709 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1710 if (unlikely(err < 0)) {
1711 kfree_skb(skb);
1712 return ERR_PTR(err);
1714 return skb;
1717 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1719 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1720 struct sk_buff *skb;
1721 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1722 struct l2cap_hdr *lh;
1724 BT_DBG("sk %p len %d", sk, (int)len);
1726 if (!conn)
1727 return ERR_PTR(-ENOTCONN);
1729 if (sdulen)
1730 hlen += 2;
1732 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1733 hlen += 2;
1735 count = min_t(unsigned int, (conn->mtu - hlen), len);
1736 skb = bt_skb_send_alloc(sk, count + hlen,
1737 msg->msg_flags & MSG_DONTWAIT, &err);
1738 if (!skb)
1739 return ERR_PTR(-ENOMEM);
1741 /* Create L2CAP header */
1742 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1743 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1744 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1745 put_unaligned_le16(control, skb_put(skb, 2));
1746 if (sdulen)
1747 put_unaligned_le16(sdulen, skb_put(skb, 2));
1749 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1750 if (unlikely(err < 0)) {
1751 kfree_skb(skb);
1752 return ERR_PTR(err);
1755 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1756 put_unaligned_le16(0, skb_put(skb, 2));
1758 bt_cb(skb)->retries = 0;
1759 return skb;
1762 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1764 struct l2cap_pinfo *pi = l2cap_pi(sk);
1765 struct sk_buff *skb;
1766 struct sk_buff_head sar_queue;
1767 u16 control;
1768 size_t size = 0;
1770 skb_queue_head_init(&sar_queue);
1771 control = L2CAP_SDU_START;
1772 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1773 if (IS_ERR(skb))
1774 return PTR_ERR(skb);
1776 __skb_queue_tail(&sar_queue, skb);
1777 len -= pi->remote_mps;
1778 size += pi->remote_mps;
1780 while (len > 0) {
1781 size_t buflen;
1783 if (len > pi->remote_mps) {
1784 control = L2CAP_SDU_CONTINUE;
1785 buflen = pi->remote_mps;
1786 } else {
1787 control = L2CAP_SDU_END;
1788 buflen = len;
1791 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1792 if (IS_ERR(skb)) {
1793 skb_queue_purge(&sar_queue);
1794 return PTR_ERR(skb);
1797 __skb_queue_tail(&sar_queue, skb);
1798 len -= buflen;
1799 size += buflen;
1801 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1802 if (sk->sk_send_head == NULL)
1803 sk->sk_send_head = sar_queue.next;
1805 return size;
1808 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1810 struct sock *sk = sock->sk;
1811 struct l2cap_pinfo *pi = l2cap_pi(sk);
1812 struct sk_buff *skb;
1813 u16 control;
1814 int err;
1816 BT_DBG("sock %p, sk %p", sock, sk);
1818 err = sock_error(sk);
1819 if (err)
1820 return err;
1822 if (msg->msg_flags & MSG_OOB)
1823 return -EOPNOTSUPP;
1825 lock_sock(sk);
1827 if (sk->sk_state != BT_CONNECTED) {
1828 err = -ENOTCONN;
1829 goto done;
1832 /* Connectionless channel */
1833 if (sk->sk_type == SOCK_DGRAM) {
1834 skb = l2cap_create_connless_pdu(sk, msg, len);
1835 if (IS_ERR(skb)) {
1836 err = PTR_ERR(skb);
1837 } else {
1838 l2cap_do_send(sk, skb);
1839 err = len;
1841 goto done;
1844 switch (pi->mode) {
1845 case L2CAP_MODE_BASIC:
1846 /* Check outgoing MTU */
1847 if (len > pi->omtu) {
1848 err = -EMSGSIZE;
1849 goto done;
1852 /* Create a basic PDU */
1853 skb = l2cap_create_basic_pdu(sk, msg, len);
1854 if (IS_ERR(skb)) {
1855 err = PTR_ERR(skb);
1856 goto done;
1859 l2cap_do_send(sk, skb);
1860 err = len;
1861 break;
1863 case L2CAP_MODE_ERTM:
1864 case L2CAP_MODE_STREAMING:
1865 /* Entire SDU fits into one PDU */
1866 if (len <= pi->remote_mps) {
1867 control = L2CAP_SDU_UNSEGMENTED;
1868 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1869 if (IS_ERR(skb)) {
1870 err = PTR_ERR(skb);
1871 goto done;
1873 __skb_queue_tail(TX_QUEUE(sk), skb);
1875 if (sk->sk_send_head == NULL)
1876 sk->sk_send_head = skb;
1878 } else {
1879 /* Segment SDU into multiples PDUs */
1880 err = l2cap_sar_segment_sdu(sk, msg, len);
1881 if (err < 0)
1882 goto done;
1885 if (pi->mode == L2CAP_MODE_STREAMING) {
1886 l2cap_streaming_send(sk);
1887 } else {
1888 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1889 pi->conn_state && L2CAP_CONN_WAIT_F) {
1890 err = len;
1891 break;
1893 err = l2cap_ertm_send(sk);
1896 if (err >= 0)
1897 err = len;
1898 break;
1900 default:
1901 BT_DBG("bad state %1.1x", pi->mode);
1902 err = -EBADFD;
1905 done:
1906 release_sock(sk);
1907 return err;
1910 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1912 struct sock *sk = sock->sk;
1914 lock_sock(sk);
1916 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1917 struct l2cap_conn_rsp rsp;
1918 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1919 u8 buf[128];
1921 sk->sk_state = BT_CONFIG;
1923 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1924 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1925 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1926 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1927 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1928 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1930 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1931 release_sock(sk);
1932 return 0;
1935 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1936 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1937 l2cap_build_conf_req(sk, buf), buf);
1938 l2cap_pi(sk)->num_conf_req++;
1940 release_sock(sk);
1941 return 0;
1944 release_sock(sk);
1946 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1949 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1951 struct sock *sk = sock->sk;
1952 struct l2cap_options opts;
1953 int len, err = 0;
1954 u32 opt;
1956 BT_DBG("sk %p", sk);
1958 lock_sock(sk);
1960 switch (optname) {
1961 case L2CAP_OPTIONS:
1962 opts.imtu = l2cap_pi(sk)->imtu;
1963 opts.omtu = l2cap_pi(sk)->omtu;
1964 opts.flush_to = l2cap_pi(sk)->flush_to;
1965 opts.mode = l2cap_pi(sk)->mode;
1966 opts.fcs = l2cap_pi(sk)->fcs;
1967 opts.max_tx = l2cap_pi(sk)->max_tx;
1968 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1970 len = min_t(unsigned int, sizeof(opts), optlen);
1971 if (copy_from_user((char *) &opts, optval, len)) {
1972 err = -EFAULT;
1973 break;
1976 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1977 err = -EINVAL;
1978 break;
1981 l2cap_pi(sk)->mode = opts.mode;
1982 switch (l2cap_pi(sk)->mode) {
1983 case L2CAP_MODE_BASIC:
1984 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1985 break;
1986 case L2CAP_MODE_ERTM:
1987 case L2CAP_MODE_STREAMING:
1988 if (enable_ertm)
1989 break;
1990 /* fall through */
1991 default:
1992 err = -EINVAL;
1993 break;
1996 l2cap_pi(sk)->imtu = opts.imtu;
1997 l2cap_pi(sk)->omtu = opts.omtu;
1998 l2cap_pi(sk)->fcs = opts.fcs;
1999 l2cap_pi(sk)->max_tx = opts.max_tx;
2000 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2001 break;
2003 case L2CAP_LM:
2004 if (get_user(opt, (u32 __user *) optval)) {
2005 err = -EFAULT;
2006 break;
2009 if (opt & L2CAP_LM_AUTH)
2010 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2011 if (opt & L2CAP_LM_ENCRYPT)
2012 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2013 if (opt & L2CAP_LM_SECURE)
2014 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2016 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2017 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2018 break;
2020 default:
2021 err = -ENOPROTOOPT;
2022 break;
2025 release_sock(sk);
2026 return err;
2029 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2031 struct sock *sk = sock->sk;
2032 struct bt_security sec;
2033 int len, err = 0;
2034 u32 opt;
2036 BT_DBG("sk %p", sk);
2038 if (level == SOL_L2CAP)
2039 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2041 if (level != SOL_BLUETOOTH)
2042 return -ENOPROTOOPT;
2044 lock_sock(sk);
2046 switch (optname) {
2047 case BT_SECURITY:
2048 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2049 && sk->sk_type != SOCK_RAW) {
2050 err = -EINVAL;
2051 break;
2054 sec.level = BT_SECURITY_LOW;
2056 len = min_t(unsigned int, sizeof(sec), optlen);
2057 if (copy_from_user((char *) &sec, optval, len)) {
2058 err = -EFAULT;
2059 break;
2062 if (sec.level < BT_SECURITY_LOW ||
2063 sec.level > BT_SECURITY_HIGH) {
2064 err = -EINVAL;
2065 break;
2068 l2cap_pi(sk)->sec_level = sec.level;
2069 break;
2071 case BT_DEFER_SETUP:
2072 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2073 err = -EINVAL;
2074 break;
2077 if (get_user(opt, (u32 __user *) optval)) {
2078 err = -EFAULT;
2079 break;
2082 bt_sk(sk)->defer_setup = opt;
2083 break;
2085 default:
2086 err = -ENOPROTOOPT;
2087 break;
2090 release_sock(sk);
2091 return err;
2094 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2096 struct sock *sk = sock->sk;
2097 struct l2cap_options opts;
2098 struct l2cap_conninfo cinfo;
2099 int len, err = 0;
2100 u32 opt;
2102 BT_DBG("sk %p", sk);
2104 if (get_user(len, optlen))
2105 return -EFAULT;
2107 lock_sock(sk);
2109 switch (optname) {
2110 case L2CAP_OPTIONS:
2111 opts.imtu = l2cap_pi(sk)->imtu;
2112 opts.omtu = l2cap_pi(sk)->omtu;
2113 opts.flush_to = l2cap_pi(sk)->flush_to;
2114 opts.mode = l2cap_pi(sk)->mode;
2115 opts.fcs = l2cap_pi(sk)->fcs;
2116 opts.max_tx = l2cap_pi(sk)->max_tx;
2117 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2119 len = min_t(unsigned int, len, sizeof(opts));
2120 if (copy_to_user(optval, (char *) &opts, len))
2121 err = -EFAULT;
2123 break;
2125 case L2CAP_LM:
2126 switch (l2cap_pi(sk)->sec_level) {
2127 case BT_SECURITY_LOW:
2128 opt = L2CAP_LM_AUTH;
2129 break;
2130 case BT_SECURITY_MEDIUM:
2131 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2132 break;
2133 case BT_SECURITY_HIGH:
2134 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2135 L2CAP_LM_SECURE;
2136 break;
2137 default:
2138 opt = 0;
2139 break;
2142 if (l2cap_pi(sk)->role_switch)
2143 opt |= L2CAP_LM_MASTER;
2145 if (l2cap_pi(sk)->force_reliable)
2146 opt |= L2CAP_LM_RELIABLE;
2148 if (put_user(opt, (u32 __user *) optval))
2149 err = -EFAULT;
2150 break;
2152 case L2CAP_CONNINFO:
2153 if (sk->sk_state != BT_CONNECTED &&
2154 !(sk->sk_state == BT_CONNECT2 &&
2155 bt_sk(sk)->defer_setup)) {
2156 err = -ENOTCONN;
2157 break;
2160 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2161 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2163 len = min_t(unsigned int, len, sizeof(cinfo));
2164 if (copy_to_user(optval, (char *) &cinfo, len))
2165 err = -EFAULT;
2167 break;
2169 default:
2170 err = -ENOPROTOOPT;
2171 break;
2174 release_sock(sk);
2175 return err;
2178 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2180 struct sock *sk = sock->sk;
2181 struct bt_security sec;
2182 int len, err = 0;
2184 BT_DBG("sk %p", sk);
2186 if (level == SOL_L2CAP)
2187 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2189 if (level != SOL_BLUETOOTH)
2190 return -ENOPROTOOPT;
2192 if (get_user(len, optlen))
2193 return -EFAULT;
2195 lock_sock(sk);
2197 switch (optname) {
2198 case BT_SECURITY:
2199 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2200 && sk->sk_type != SOCK_RAW) {
2201 err = -EINVAL;
2202 break;
2205 sec.level = l2cap_pi(sk)->sec_level;
2207 len = min_t(unsigned int, len, sizeof(sec));
2208 if (copy_to_user(optval, (char *) &sec, len))
2209 err = -EFAULT;
2211 break;
2213 case BT_DEFER_SETUP:
2214 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2215 err = -EINVAL;
2216 break;
2219 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2220 err = -EFAULT;
2222 break;
2224 default:
2225 err = -ENOPROTOOPT;
2226 break;
2229 release_sock(sk);
2230 return err;
2233 static int l2cap_sock_shutdown(struct socket *sock, int how)
2235 struct sock *sk = sock->sk;
2236 int err = 0;
2238 BT_DBG("sock %p, sk %p", sock, sk);
2240 if (!sk)
2241 return 0;
2243 lock_sock(sk);
2244 if (!sk->sk_shutdown) {
2245 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2246 err = __l2cap_wait_ack(sk);
2248 sk->sk_shutdown = SHUTDOWN_MASK;
2249 l2cap_sock_clear_timer(sk);
2250 __l2cap_sock_close(sk, 0);
2252 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2253 err = bt_sock_wait_state(sk, BT_CLOSED,
2254 sk->sk_lingertime);
2257 if (!err && sk->sk_err)
2258 err = -sk->sk_err;
2260 release_sock(sk);
2261 return err;
2264 static int l2cap_sock_release(struct socket *sock)
2266 struct sock *sk = sock->sk;
2267 int err;
2269 BT_DBG("sock %p, sk %p", sock, sk);
2271 if (!sk)
2272 return 0;
2274 err = l2cap_sock_shutdown(sock, 2);
2276 sock_orphan(sk);
2277 l2cap_sock_kill(sk);
2278 return err;
2281 static void l2cap_chan_ready(struct sock *sk)
2283 struct sock *parent = bt_sk(sk)->parent;
2285 BT_DBG("sk %p, parent %p", sk, parent);
2287 l2cap_pi(sk)->conf_state = 0;
2288 l2cap_sock_clear_timer(sk);
2290 if (!parent) {
2291 /* Outgoing channel.
2292 * Wake up socket sleeping on connect.
2294 sk->sk_state = BT_CONNECTED;
2295 sk->sk_state_change(sk);
2296 } else {
2297 /* Incoming channel.
2298 * Wake up socket sleeping on accept.
2300 parent->sk_data_ready(parent, 0);
2304 /* Copy frame to all raw sockets on that connection */
2305 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2307 struct l2cap_chan_list *l = &conn->chan_list;
2308 struct sk_buff *nskb;
2309 struct sock *sk;
2311 BT_DBG("conn %p", conn);
2313 read_lock(&l->lock);
2314 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2315 if (sk->sk_type != SOCK_RAW)
2316 continue;
2318 /* Don't send frame to the socket it came from */
2319 if (skb->sk == sk)
2320 continue;
2321 nskb = skb_clone(skb, GFP_ATOMIC);
2322 if (!nskb)
2323 continue;
2325 if (sock_queue_rcv_skb(sk, nskb))
2326 kfree_skb(nskb);
2328 read_unlock(&l->lock);
2331 /* ---- L2CAP signalling commands ---- */
2332 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2333 u8 code, u8 ident, u16 dlen, void *data)
2335 struct sk_buff *skb, **frag;
2336 struct l2cap_cmd_hdr *cmd;
2337 struct l2cap_hdr *lh;
2338 int len, count;
2340 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2341 conn, code, ident, dlen);
2343 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2344 count = min_t(unsigned int, conn->mtu, len);
2346 skb = bt_skb_alloc(count, GFP_ATOMIC);
2347 if (!skb)
2348 return NULL;
2350 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2351 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2352 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2354 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2355 cmd->code = code;
2356 cmd->ident = ident;
2357 cmd->len = cpu_to_le16(dlen);
2359 if (dlen) {
2360 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2361 memcpy(skb_put(skb, count), data, count);
2362 data += count;
2365 len -= skb->len;
2367 /* Continuation fragments (no L2CAP header) */
2368 frag = &skb_shinfo(skb)->frag_list;
2369 while (len) {
2370 count = min_t(unsigned int, conn->mtu, len);
2372 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2373 if (!*frag)
2374 goto fail;
2376 memcpy(skb_put(*frag, count), data, count);
2378 len -= count;
2379 data += count;
2381 frag = &(*frag)->next;
2384 return skb;
2386 fail:
2387 kfree_skb(skb);
2388 return NULL;
2391 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2393 struct l2cap_conf_opt *opt = *ptr;
2394 int len;
2396 len = L2CAP_CONF_OPT_SIZE + opt->len;
2397 *ptr += len;
2399 *type = opt->type;
2400 *olen = opt->len;
2402 switch (opt->len) {
2403 case 1:
2404 *val = *((u8 *) opt->val);
2405 break;
2407 case 2:
2408 *val = __le16_to_cpu(*((__le16 *) opt->val));
2409 break;
2411 case 4:
2412 *val = __le32_to_cpu(*((__le32 *) opt->val));
2413 break;
2415 default:
2416 *val = (unsigned long) opt->val;
2417 break;
2420 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2421 return len;
2424 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2426 struct l2cap_conf_opt *opt = *ptr;
2428 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2430 opt->type = type;
2431 opt->len = len;
2433 switch (len) {
2434 case 1:
2435 *((u8 *) opt->val) = val;
2436 break;
2438 case 2:
2439 *((__le16 *) opt->val) = cpu_to_le16(val);
2440 break;
2442 case 4:
2443 *((__le32 *) opt->val) = cpu_to_le32(val);
2444 break;
2446 default:
2447 memcpy(opt->val, (void *) val, len);
2448 break;
2451 *ptr += L2CAP_CONF_OPT_SIZE + len;
2454 static void l2cap_ack_timeout(unsigned long arg)
2456 struct sock *sk = (void *) arg;
2458 bh_lock_sock(sk);
2459 l2cap_send_ack(l2cap_pi(sk));
2460 bh_unlock_sock(sk);
2463 static inline void l2cap_ertm_init(struct sock *sk)
2465 l2cap_pi(sk)->expected_ack_seq = 0;
2466 l2cap_pi(sk)->unacked_frames = 0;
2467 l2cap_pi(sk)->buffer_seq = 0;
2468 l2cap_pi(sk)->num_acked = 0;
2469 l2cap_pi(sk)->frames_sent = 0;
2471 setup_timer(&l2cap_pi(sk)->retrans_timer,
2472 l2cap_retrans_timeout, (unsigned long) sk);
2473 setup_timer(&l2cap_pi(sk)->monitor_timer,
2474 l2cap_monitor_timeout, (unsigned long) sk);
2475 setup_timer(&l2cap_pi(sk)->ack_timer,
2476 l2cap_ack_timeout, (unsigned long) sk);
2478 __skb_queue_head_init(SREJ_QUEUE(sk));
2479 __skb_queue_head_init(BUSY_QUEUE(sk));
2481 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2483 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2486 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2488 switch (mode) {
2489 case L2CAP_MODE_STREAMING:
2490 case L2CAP_MODE_ERTM:
2491 if (l2cap_mode_supported(mode, remote_feat_mask))
2492 return mode;
2493 /* fall through */
2494 default:
2495 return L2CAP_MODE_BASIC;
2499 static int l2cap_build_conf_req(struct sock *sk, void *data)
2501 struct l2cap_pinfo *pi = l2cap_pi(sk);
2502 struct l2cap_conf_req *req = data;
2503 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2504 void *ptr = req->data;
2506 BT_DBG("sk %p", sk);
2508 if (pi->num_conf_req || pi->num_conf_rsp)
2509 goto done;
2511 switch (pi->mode) {
2512 case L2CAP_MODE_STREAMING:
2513 case L2CAP_MODE_ERTM:
2514 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2515 break;
2517 /* fall through */
2518 default:
2519 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2520 break;
2523 done:
2524 switch (pi->mode) {
2525 case L2CAP_MODE_BASIC:
2526 if (pi->imtu != L2CAP_DEFAULT_MTU)
2527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2529 rfc.mode = L2CAP_MODE_BASIC;
2530 rfc.txwin_size = 0;
2531 rfc.max_transmit = 0;
2532 rfc.retrans_timeout = 0;
2533 rfc.monitor_timeout = 0;
2534 rfc.max_pdu_size = 0;
2536 break;
2538 case L2CAP_MODE_ERTM:
2539 rfc.mode = L2CAP_MODE_ERTM;
2540 rfc.txwin_size = pi->tx_win;
2541 rfc.max_transmit = pi->max_tx;
2542 rfc.retrans_timeout = 0;
2543 rfc.monitor_timeout = 0;
2544 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2545 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2546 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2548 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2549 break;
2551 if (pi->fcs == L2CAP_FCS_NONE ||
2552 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2553 pi->fcs = L2CAP_FCS_NONE;
2554 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2556 break;
2558 case L2CAP_MODE_STREAMING:
2559 rfc.mode = L2CAP_MODE_STREAMING;
2560 rfc.txwin_size = 0;
2561 rfc.max_transmit = 0;
2562 rfc.retrans_timeout = 0;
2563 rfc.monitor_timeout = 0;
2564 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2565 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2566 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2568 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2569 break;
2571 if (pi->fcs == L2CAP_FCS_NONE ||
2572 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2573 pi->fcs = L2CAP_FCS_NONE;
2574 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2576 break;
2579 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2580 (unsigned long) &rfc);
2582 /* FIXME: Need actual value of the flush timeout */
2583 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2584 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2586 req->dcid = cpu_to_le16(pi->dcid);
2587 req->flags = cpu_to_le16(0);
2589 return ptr - data;
2592 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2594 struct l2cap_pinfo *pi = l2cap_pi(sk);
2595 struct l2cap_conf_rsp *rsp = data;
2596 void *ptr = rsp->data;
2597 void *req = pi->conf_req;
2598 int len = pi->conf_len;
2599 int type, hint, olen;
2600 unsigned long val;
2601 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2602 u16 mtu = L2CAP_DEFAULT_MTU;
2603 u16 result = L2CAP_CONF_SUCCESS;
2605 BT_DBG("sk %p", sk);
2607 while (len >= L2CAP_CONF_OPT_SIZE) {
2608 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2610 hint = type & L2CAP_CONF_HINT;
2611 type &= L2CAP_CONF_MASK;
2613 switch (type) {
2614 case L2CAP_CONF_MTU:
2615 mtu = val;
2616 break;
2618 case L2CAP_CONF_FLUSH_TO:
2619 pi->flush_to = val;
2620 break;
2622 case L2CAP_CONF_QOS:
2623 break;
2625 case L2CAP_CONF_RFC:
2626 if (olen == sizeof(rfc))
2627 memcpy(&rfc, (void *) val, olen);
2628 break;
2630 case L2CAP_CONF_FCS:
2631 if (val == L2CAP_FCS_NONE)
2632 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2634 break;
2636 default:
2637 if (hint)
2638 break;
2640 result = L2CAP_CONF_UNKNOWN;
2641 *((u8 *) ptr++) = type;
2642 break;
2646 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2647 goto done;
2649 switch (pi->mode) {
2650 case L2CAP_MODE_STREAMING:
2651 case L2CAP_MODE_ERTM:
2652 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2653 pi->mode = l2cap_select_mode(rfc.mode,
2654 pi->conn->feat_mask);
2655 break;
2658 if (pi->mode != rfc.mode)
2659 return -ECONNREFUSED;
2661 break;
2664 done:
2665 if (pi->mode != rfc.mode) {
2666 result = L2CAP_CONF_UNACCEPT;
2667 rfc.mode = pi->mode;
2669 if (pi->num_conf_rsp == 1)
2670 return -ECONNREFUSED;
2672 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2673 sizeof(rfc), (unsigned long) &rfc);
2677 if (result == L2CAP_CONF_SUCCESS) {
2678 /* Configure output options and let the other side know
2679 * which ones we don't like. */
2681 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2682 result = L2CAP_CONF_UNACCEPT;
2683 else {
2684 pi->omtu = mtu;
2685 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2687 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2689 switch (rfc.mode) {
2690 case L2CAP_MODE_BASIC:
2691 pi->fcs = L2CAP_FCS_NONE;
2692 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2693 break;
2695 case L2CAP_MODE_ERTM:
2696 pi->remote_tx_win = rfc.txwin_size;
2697 pi->remote_max_tx = rfc.max_transmit;
2698 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2699 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2701 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2703 rfc.retrans_timeout =
2704 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2705 rfc.monitor_timeout =
2706 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2708 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2710 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2711 sizeof(rfc), (unsigned long) &rfc);
2713 break;
2715 case L2CAP_MODE_STREAMING:
2716 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2717 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2719 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2721 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2723 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2724 sizeof(rfc), (unsigned long) &rfc);
2726 break;
2728 default:
2729 result = L2CAP_CONF_UNACCEPT;
2731 memset(&rfc, 0, sizeof(rfc));
2732 rfc.mode = pi->mode;
2735 if (result == L2CAP_CONF_SUCCESS)
2736 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2738 rsp->scid = cpu_to_le16(pi->dcid);
2739 rsp->result = cpu_to_le16(result);
2740 rsp->flags = cpu_to_le16(0x0000);
2742 return ptr - data;
2745 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2747 struct l2cap_pinfo *pi = l2cap_pi(sk);
2748 struct l2cap_conf_req *req = data;
2749 void *ptr = req->data;
2750 int type, olen;
2751 unsigned long val;
2752 struct l2cap_conf_rfc rfc;
2754 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2756 while (len >= L2CAP_CONF_OPT_SIZE) {
2757 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2759 switch (type) {
2760 case L2CAP_CONF_MTU:
2761 if (val < L2CAP_DEFAULT_MIN_MTU) {
2762 *result = L2CAP_CONF_UNACCEPT;
2763 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2764 } else
2765 pi->omtu = val;
2766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2767 break;
2769 case L2CAP_CONF_FLUSH_TO:
2770 pi->flush_to = val;
2771 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2772 2, pi->flush_to);
2773 break;
2775 case L2CAP_CONF_RFC:
2776 if (olen == sizeof(rfc))
2777 memcpy(&rfc, (void *)val, olen);
2779 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2780 rfc.mode != pi->mode)
2781 return -ECONNREFUSED;
2783 pi->fcs = 0;
2785 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2786 sizeof(rfc), (unsigned long) &rfc);
2787 break;
2791 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2792 return -ECONNREFUSED;
2794 pi->mode = rfc.mode;
2796 if (*result == L2CAP_CONF_SUCCESS) {
2797 switch (rfc.mode) {
2798 case L2CAP_MODE_ERTM:
2799 pi->remote_tx_win = rfc.txwin_size;
2800 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2801 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2802 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2803 break;
2804 case L2CAP_MODE_STREAMING:
2805 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2809 req->dcid = cpu_to_le16(pi->dcid);
2810 req->flags = cpu_to_le16(0x0000);
2812 return ptr - data;
2815 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2817 struct l2cap_conf_rsp *rsp = data;
2818 void *ptr = rsp->data;
2820 BT_DBG("sk %p", sk);
2822 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2823 rsp->result = cpu_to_le16(result);
2824 rsp->flags = cpu_to_le16(flags);
2826 return ptr - data;
2829 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2831 struct l2cap_pinfo *pi = l2cap_pi(sk);
2832 int type, olen;
2833 unsigned long val;
2834 struct l2cap_conf_rfc rfc;
2836 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2838 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2839 return;
2841 while (len >= L2CAP_CONF_OPT_SIZE) {
2842 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2844 switch (type) {
2845 case L2CAP_CONF_RFC:
2846 if (olen == sizeof(rfc))
2847 memcpy(&rfc, (void *)val, olen);
2848 goto done;
2852 done:
2853 switch (rfc.mode) {
2854 case L2CAP_MODE_ERTM:
2855 pi->remote_tx_win = rfc.txwin_size;
2856 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2857 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2858 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2859 break;
2860 case L2CAP_MODE_STREAMING:
2861 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2865 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2867 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2869 if (rej->reason != 0x0000)
2870 return 0;
2872 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2873 cmd->ident == conn->info_ident) {
2874 del_timer(&conn->info_timer);
2876 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2877 conn->info_ident = 0;
2879 l2cap_conn_start(conn);
2882 return 0;
2885 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2887 struct l2cap_chan_list *list = &conn->chan_list;
2888 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2889 struct l2cap_conn_rsp rsp;
2890 struct sock *parent, *uninitialized_var(sk);
2891 int result, status = L2CAP_CS_NO_INFO;
2893 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2894 __le16 psm = req->psm;
2896 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2898 /* Check if we have socket listening on psm */
2899 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2900 if (!parent) {
2901 result = L2CAP_CR_BAD_PSM;
2902 goto sendresp;
2905 /* Check if the ACL is secure enough (if not SDP) */
2906 if (psm != cpu_to_le16(0x0001) &&
2907 !hci_conn_check_link_mode(conn->hcon)) {
2908 conn->disc_reason = 0x05;
2909 result = L2CAP_CR_SEC_BLOCK;
2910 goto response;
2913 result = L2CAP_CR_NO_MEM;
2915 /* Check for backlog size */
2916 if (sk_acceptq_is_full(parent)) {
2917 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2918 goto response;
2921 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2922 if (!sk)
2923 goto response;
2925 write_lock_bh(&list->lock);
2927 /* Check if we already have channel with that dcid */
2928 if (__l2cap_get_chan_by_dcid(list, scid)) {
2929 write_unlock_bh(&list->lock);
2930 sock_set_flag(sk, SOCK_ZAPPED);
2931 l2cap_sock_kill(sk);
2932 goto response;
2935 hci_conn_hold(conn->hcon);
2937 l2cap_sock_init(sk, parent);
2938 bacpy(&bt_sk(sk)->src, conn->src);
2939 bacpy(&bt_sk(sk)->dst, conn->dst);
2940 l2cap_pi(sk)->psm = psm;
2941 l2cap_pi(sk)->dcid = scid;
2943 __l2cap_chan_add(conn, sk, parent);
2944 dcid = l2cap_pi(sk)->scid;
2946 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2948 l2cap_pi(sk)->ident = cmd->ident;
2950 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2951 if (l2cap_check_security(sk)) {
2952 if (bt_sk(sk)->defer_setup) {
2953 sk->sk_state = BT_CONNECT2;
2954 result = L2CAP_CR_PEND;
2955 status = L2CAP_CS_AUTHOR_PEND;
2956 parent->sk_data_ready(parent, 0);
2957 } else {
2958 sk->sk_state = BT_CONFIG;
2959 result = L2CAP_CR_SUCCESS;
2960 status = L2CAP_CS_NO_INFO;
2962 } else {
2963 sk->sk_state = BT_CONNECT2;
2964 result = L2CAP_CR_PEND;
2965 status = L2CAP_CS_AUTHEN_PEND;
2967 } else {
2968 sk->sk_state = BT_CONNECT2;
2969 result = L2CAP_CR_PEND;
2970 status = L2CAP_CS_NO_INFO;
2973 write_unlock_bh(&list->lock);
2975 response:
2976 bh_unlock_sock(parent);
2978 sendresp:
2979 rsp.scid = cpu_to_le16(scid);
2980 rsp.dcid = cpu_to_le16(dcid);
2981 rsp.result = cpu_to_le16(result);
2982 rsp.status = cpu_to_le16(status);
2983 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2985 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2986 struct l2cap_info_req info;
2987 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2989 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2990 conn->info_ident = l2cap_get_ident(conn);
2992 mod_timer(&conn->info_timer, jiffies +
2993 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2995 l2cap_send_cmd(conn, conn->info_ident,
2996 L2CAP_INFO_REQ, sizeof(info), &info);
2999 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3000 result == L2CAP_CR_SUCCESS) {
3001 u8 buf[128];
3002 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3003 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3004 l2cap_build_conf_req(sk, buf), buf);
3005 l2cap_pi(sk)->num_conf_req++;
3008 return 0;
3011 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3013 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3014 u16 scid, dcid, result, status;
3015 struct sock *sk;
3016 u8 req[128];
3018 scid = __le16_to_cpu(rsp->scid);
3019 dcid = __le16_to_cpu(rsp->dcid);
3020 result = __le16_to_cpu(rsp->result);
3021 status = __le16_to_cpu(rsp->status);
3023 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3025 if (scid) {
3026 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3027 if (!sk)
3028 return -EFAULT;
3029 } else {
3030 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3031 if (!sk)
3032 return -EFAULT;
3035 switch (result) {
3036 case L2CAP_CR_SUCCESS:
3037 sk->sk_state = BT_CONFIG;
3038 l2cap_pi(sk)->ident = 0;
3039 l2cap_pi(sk)->dcid = dcid;
3040 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3042 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3043 break;
3045 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3047 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3048 l2cap_build_conf_req(sk, req), req);
3049 l2cap_pi(sk)->num_conf_req++;
3050 break;
3052 case L2CAP_CR_PEND:
3053 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3054 break;
3056 default:
3057 l2cap_chan_del(sk, ECONNREFUSED);
3058 break;
3061 bh_unlock_sock(sk);
3062 return 0;
3065 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3067 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3068 u16 dcid, flags;
3069 u8 rsp[64];
3070 struct sock *sk;
3071 int len;
3073 dcid = __le16_to_cpu(req->dcid);
3074 flags = __le16_to_cpu(req->flags);
3076 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3078 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3079 if (!sk)
3080 return -ENOENT;
3082 if (sk->sk_state != BT_CONFIG) {
3083 struct l2cap_cmd_rej rej;
3085 rej.reason = cpu_to_le16(0x0002);
3086 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3087 sizeof(rej), &rej);
3088 goto unlock;
3091 /* Reject if config buffer is too small. */
3092 len = cmd_len - sizeof(*req);
3093 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3094 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3095 l2cap_build_conf_rsp(sk, rsp,
3096 L2CAP_CONF_REJECT, flags), rsp);
3097 goto unlock;
3100 /* Store config. */
3101 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3102 l2cap_pi(sk)->conf_len += len;
3104 if (flags & 0x0001) {
3105 /* Incomplete config. Send empty response. */
3106 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3107 l2cap_build_conf_rsp(sk, rsp,
3108 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3109 goto unlock;
3112 /* Complete config. */
3113 len = l2cap_parse_conf_req(sk, rsp);
3114 if (len < 0) {
3115 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3116 goto unlock;
3119 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3120 l2cap_pi(sk)->num_conf_rsp++;
3122 /* Reset config buffer. */
3123 l2cap_pi(sk)->conf_len = 0;
3125 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3126 goto unlock;
3128 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3129 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3130 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3131 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3133 sk->sk_state = BT_CONNECTED;
3135 l2cap_pi(sk)->next_tx_seq = 0;
3136 l2cap_pi(sk)->expected_tx_seq = 0;
3137 __skb_queue_head_init(TX_QUEUE(sk));
3138 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3139 l2cap_ertm_init(sk);
3141 l2cap_chan_ready(sk);
3142 goto unlock;
3145 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3146 u8 buf[64];
3147 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3148 l2cap_build_conf_req(sk, buf), buf);
3149 l2cap_pi(sk)->num_conf_req++;
3152 unlock:
3153 bh_unlock_sock(sk);
3154 return 0;
3157 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3159 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3160 u16 scid, flags, result;
3161 struct sock *sk;
3162 int len = cmd->len - sizeof(*rsp);
3164 scid = __le16_to_cpu(rsp->scid);
3165 flags = __le16_to_cpu(rsp->flags);
3166 result = __le16_to_cpu(rsp->result);
3168 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3169 scid, flags, result);
3171 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3172 if (!sk)
3173 return 0;
3175 switch (result) {
3176 case L2CAP_CONF_SUCCESS:
3177 l2cap_conf_rfc_get(sk, rsp->data, len);
3178 break;
3180 case L2CAP_CONF_UNACCEPT:
3181 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3182 char req[64];
3184 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3185 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3186 goto done;
3189 /* throw out any old stored conf requests */
3190 result = L2CAP_CONF_SUCCESS;
3191 len = l2cap_parse_conf_rsp(sk, rsp->data,
3192 len, req, &result);
3193 if (len < 0) {
3194 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3195 goto done;
3198 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3199 L2CAP_CONF_REQ, len, req);
3200 l2cap_pi(sk)->num_conf_req++;
3201 if (result != L2CAP_CONF_SUCCESS)
3202 goto done;
3203 break;
3206 default:
3207 sk->sk_err = ECONNRESET;
3208 l2cap_sock_set_timer(sk, HZ * 5);
3209 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3210 goto done;
3213 if (flags & 0x01)
3214 goto done;
3216 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3218 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3219 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3220 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3221 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3223 sk->sk_state = BT_CONNECTED;
3224 l2cap_pi(sk)->next_tx_seq = 0;
3225 l2cap_pi(sk)->expected_tx_seq = 0;
3226 __skb_queue_head_init(TX_QUEUE(sk));
3227 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3228 l2cap_ertm_init(sk);
3230 l2cap_chan_ready(sk);
3233 done:
3234 bh_unlock_sock(sk);
3235 return 0;
3238 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3240 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3241 struct l2cap_disconn_rsp rsp;
3242 u16 dcid, scid;
3243 struct sock *sk;
3245 scid = __le16_to_cpu(req->scid);
3246 dcid = __le16_to_cpu(req->dcid);
3248 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3250 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3251 if (!sk)
3252 return 0;
3254 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3255 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3256 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3258 sk->sk_shutdown = SHUTDOWN_MASK;
3260 l2cap_chan_del(sk, ECONNRESET);
3261 bh_unlock_sock(sk);
3263 l2cap_sock_kill(sk);
3264 return 0;
3267 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3269 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3270 u16 dcid, scid;
3271 struct sock *sk;
3273 scid = __le16_to_cpu(rsp->scid);
3274 dcid = __le16_to_cpu(rsp->dcid);
3276 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3278 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3279 if (!sk)
3280 return 0;
3282 l2cap_chan_del(sk, 0);
3283 bh_unlock_sock(sk);
3285 l2cap_sock_kill(sk);
3286 return 0;
3289 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3291 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3292 u16 type;
3294 type = __le16_to_cpu(req->type);
3296 BT_DBG("type 0x%4.4x", type);
3298 if (type == L2CAP_IT_FEAT_MASK) {
3299 u8 buf[8];
3300 u32 feat_mask = l2cap_feat_mask;
3301 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3302 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3303 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3304 if (enable_ertm)
3305 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3306 | L2CAP_FEAT_FCS;
3307 put_unaligned_le32(feat_mask, rsp->data);
3308 l2cap_send_cmd(conn, cmd->ident,
3309 L2CAP_INFO_RSP, sizeof(buf), buf);
3310 } else if (type == L2CAP_IT_FIXED_CHAN) {
3311 u8 buf[12];
3312 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3313 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3314 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3315 memcpy(buf + 4, l2cap_fixed_chan, 8);
3316 l2cap_send_cmd(conn, cmd->ident,
3317 L2CAP_INFO_RSP, sizeof(buf), buf);
3318 } else {
3319 struct l2cap_info_rsp rsp;
3320 rsp.type = cpu_to_le16(type);
3321 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3322 l2cap_send_cmd(conn, cmd->ident,
3323 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3326 return 0;
3329 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3331 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3332 u16 type, result;
3334 type = __le16_to_cpu(rsp->type);
3335 result = __le16_to_cpu(rsp->result);
3337 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3339 del_timer(&conn->info_timer);
3341 if (type == L2CAP_IT_FEAT_MASK) {
3342 conn->feat_mask = get_unaligned_le32(rsp->data);
3344 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3345 struct l2cap_info_req req;
3346 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3348 conn->info_ident = l2cap_get_ident(conn);
3350 l2cap_send_cmd(conn, conn->info_ident,
3351 L2CAP_INFO_REQ, sizeof(req), &req);
3352 } else {
3353 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3354 conn->info_ident = 0;
3356 l2cap_conn_start(conn);
3358 } else if (type == L2CAP_IT_FIXED_CHAN) {
3359 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3360 conn->info_ident = 0;
3362 l2cap_conn_start(conn);
3365 return 0;
3368 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3370 u8 *data = skb->data;
3371 int len = skb->len;
3372 struct l2cap_cmd_hdr cmd;
3373 int err = 0;
3375 l2cap_raw_recv(conn, skb);
3377 while (len >= L2CAP_CMD_HDR_SIZE) {
3378 u16 cmd_len;
3379 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3380 data += L2CAP_CMD_HDR_SIZE;
3381 len -= L2CAP_CMD_HDR_SIZE;
3383 cmd_len = le16_to_cpu(cmd.len);
3385 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3387 if (cmd_len > len || !cmd.ident) {
3388 BT_DBG("corrupted command");
3389 break;
3392 switch (cmd.code) {
3393 case L2CAP_COMMAND_REJ:
3394 l2cap_command_rej(conn, &cmd, data);
3395 break;
3397 case L2CAP_CONN_REQ:
3398 err = l2cap_connect_req(conn, &cmd, data);
3399 break;
3401 case L2CAP_CONN_RSP:
3402 err = l2cap_connect_rsp(conn, &cmd, data);
3403 break;
3405 case L2CAP_CONF_REQ:
3406 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3407 break;
3409 case L2CAP_CONF_RSP:
3410 err = l2cap_config_rsp(conn, &cmd, data);
3411 break;
3413 case L2CAP_DISCONN_REQ:
3414 err = l2cap_disconnect_req(conn, &cmd, data);
3415 break;
3417 case L2CAP_DISCONN_RSP:
3418 err = l2cap_disconnect_rsp(conn, &cmd, data);
3419 break;
3421 case L2CAP_ECHO_REQ:
3422 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3423 break;
3425 case L2CAP_ECHO_RSP:
3426 break;
3428 case L2CAP_INFO_REQ:
3429 err = l2cap_information_req(conn, &cmd, data);
3430 break;
3432 case L2CAP_INFO_RSP:
3433 err = l2cap_information_rsp(conn, &cmd, data);
3434 break;
3436 default:
3437 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3438 err = -EINVAL;
3439 break;
3442 if (err) {
3443 struct l2cap_cmd_rej rej;
3444 BT_DBG("error %d", err);
3446 /* FIXME: Map err to a valid reason */
3447 rej.reason = cpu_to_le16(0);
3448 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3451 data += cmd_len;
3452 len -= cmd_len;
3455 kfree_skb(skb);
3458 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3460 u16 our_fcs, rcv_fcs;
3461 int hdr_size = L2CAP_HDR_SIZE + 2;
3463 if (pi->fcs == L2CAP_FCS_CRC16) {
3464 skb_trim(skb, skb->len - 2);
3465 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3466 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3468 if (our_fcs != rcv_fcs)
3469 return -EBADMSG;
3471 return 0;
3474 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3476 struct l2cap_pinfo *pi = l2cap_pi(sk);
3477 u16 control = 0;
3479 pi->frames_sent = 0;
3481 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3483 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3484 control |= L2CAP_SUPER_RCV_NOT_READY;
3485 l2cap_send_sframe(pi, control);
3486 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3489 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3490 l2cap_retransmit_frames(sk);
3492 l2cap_ertm_send(sk);
3494 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3495 pi->frames_sent == 0) {
3496 control |= L2CAP_SUPER_RCV_READY;
3497 l2cap_send_sframe(pi, control);
3501 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3503 struct sk_buff *next_skb;
3504 struct l2cap_pinfo *pi = l2cap_pi(sk);
3505 int tx_seq_offset, next_tx_seq_offset;
3507 bt_cb(skb)->tx_seq = tx_seq;
3508 bt_cb(skb)->sar = sar;
3510 next_skb = skb_peek(SREJ_QUEUE(sk));
3511 if (!next_skb) {
3512 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3513 return 0;
3516 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3517 if (tx_seq_offset < 0)
3518 tx_seq_offset += 64;
3520 do {
3521 if (bt_cb(next_skb)->tx_seq == tx_seq)
3522 return -EINVAL;
3524 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3525 pi->buffer_seq) % 64;
3526 if (next_tx_seq_offset < 0)
3527 next_tx_seq_offset += 64;
3529 if (next_tx_seq_offset > tx_seq_offset) {
3530 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3531 return 0;
3534 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3535 break;
3537 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3539 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3541 return 0;
3544 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3546 struct l2cap_pinfo *pi = l2cap_pi(sk);
3547 struct sk_buff *_skb;
3548 int err;
3550 switch (control & L2CAP_CTRL_SAR) {
3551 case L2CAP_SDU_UNSEGMENTED:
3552 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3553 goto drop;
3555 err = sock_queue_rcv_skb(sk, skb);
3556 if (!err)
3557 return err;
3559 break;
3561 case L2CAP_SDU_START:
3562 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3563 goto drop;
3565 pi->sdu_len = get_unaligned_le16(skb->data);
3567 if (pi->sdu_len > pi->imtu)
3568 goto disconnect;
3570 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3571 if (!pi->sdu)
3572 return -ENOMEM;
3574 /* pull sdu_len bytes only after alloc, because of Local Busy
3575 * condition we have to be sure that this will be executed
3576 * only once, i.e., when alloc does not fail */
3577 skb_pull(skb, 2);
3579 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3581 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3582 pi->partial_sdu_len = skb->len;
3583 break;
3585 case L2CAP_SDU_CONTINUE:
3586 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3587 goto disconnect;
3589 if (!pi->sdu)
3590 goto disconnect;
3592 pi->partial_sdu_len += skb->len;
3593 if (pi->partial_sdu_len > pi->sdu_len)
3594 goto drop;
3596 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3598 break;
3600 case L2CAP_SDU_END:
3601 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3602 goto disconnect;
3604 if (!pi->sdu)
3605 goto disconnect;
3607 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3608 pi->partial_sdu_len += skb->len;
3610 if (pi->partial_sdu_len > pi->imtu)
3611 goto drop;
3613 if (pi->partial_sdu_len != pi->sdu_len)
3614 goto drop;
3616 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3619 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3620 if (!_skb) {
3621 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3622 return -ENOMEM;
3625 err = sock_queue_rcv_skb(sk, _skb);
3626 if (err < 0) {
3627 kfree_skb(_skb);
3628 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3629 return err;
3632 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3633 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3635 kfree_skb(pi->sdu);
3636 break;
3639 kfree_skb(skb);
3640 return 0;
3642 drop:
3643 kfree_skb(pi->sdu);
3644 pi->sdu = NULL;
3646 disconnect:
3647 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3648 kfree_skb(skb);
3649 return 0;
3652 static int l2cap_try_push_rx_skb(struct sock *sk)
3654 struct l2cap_pinfo *pi = l2cap_pi(sk);
3655 struct sk_buff *skb;
3656 u16 control;
3657 int err;
3659 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3660 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3661 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3662 if (err < 0) {
3663 skb_queue_head(BUSY_QUEUE(sk), skb);
3664 return -EBUSY;
3667 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3670 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3671 goto done;
3673 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3674 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3675 l2cap_send_sframe(pi, control);
3676 l2cap_pi(sk)->retry_count = 1;
3678 del_timer(&pi->retrans_timer);
3679 __mod_monitor_timer();
3681 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3683 done:
3684 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3685 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3687 BT_DBG("sk %p, Exit local busy", sk);
3689 return 0;
3692 static void l2cap_busy_work(struct work_struct *work)
3694 DECLARE_WAITQUEUE(wait, current);
3695 struct l2cap_pinfo *pi =
3696 container_of(work, struct l2cap_pinfo, busy_work);
3697 struct sock *sk = (struct sock *)pi;
3698 int n_tries = 0, timeo = HZ/5, err;
3699 struct sk_buff *skb;
3701 lock_sock(sk);
3703 add_wait_queue(sk_sleep(sk), &wait);
3704 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3705 set_current_state(TASK_INTERRUPTIBLE);
3707 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3708 err = -EBUSY;
3709 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3710 break;
3713 if (!timeo)
3714 timeo = HZ/5;
3716 if (signal_pending(current)) {
3717 err = sock_intr_errno(timeo);
3718 break;
3721 release_sock(sk);
3722 timeo = schedule_timeout(timeo);
3723 lock_sock(sk);
3725 err = sock_error(sk);
3726 if (err)
3727 break;
3729 if (l2cap_try_push_rx_skb(sk) == 0)
3730 break;
3733 set_current_state(TASK_RUNNING);
3734 remove_wait_queue(sk_sleep(sk), &wait);
3736 release_sock(sk);
3739 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3741 struct l2cap_pinfo *pi = l2cap_pi(sk);
3742 int sctrl, err;
3744 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3745 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3746 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3747 return l2cap_try_push_rx_skb(sk);
3752 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3753 if (err >= 0) {
3754 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3755 return err;
3758 /* Busy Condition */
3759 BT_DBG("sk %p, Enter local busy", sk);
3761 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3762 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3763 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3765 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3766 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3767 l2cap_send_sframe(pi, sctrl);
3769 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3771 del_timer(&pi->ack_timer);
3773 queue_work(_busy_wq, &pi->busy_work);
3775 return err;
3778 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3780 struct l2cap_pinfo *pi = l2cap_pi(sk);
3781 struct sk_buff *_skb;
3782 int err = -EINVAL;
3785 * TODO: We have to notify the userland if some data is lost with the
3786 * Streaming Mode.
3789 switch (control & L2CAP_CTRL_SAR) {
3790 case L2CAP_SDU_UNSEGMENTED:
3791 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3792 kfree_skb(pi->sdu);
3793 break;
3796 err = sock_queue_rcv_skb(sk, skb);
3797 if (!err)
3798 return 0;
3800 break;
3802 case L2CAP_SDU_START:
3803 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3804 kfree_skb(pi->sdu);
3805 break;
3808 pi->sdu_len = get_unaligned_le16(skb->data);
3809 skb_pull(skb, 2);
3811 if (pi->sdu_len > pi->imtu) {
3812 err = -EMSGSIZE;
3813 break;
3816 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3817 if (!pi->sdu) {
3818 err = -ENOMEM;
3819 break;
3822 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3824 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3825 pi->partial_sdu_len = skb->len;
3826 err = 0;
3827 break;
3829 case L2CAP_SDU_CONTINUE:
3830 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3831 break;
3833 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3835 pi->partial_sdu_len += skb->len;
3836 if (pi->partial_sdu_len > pi->sdu_len)
3837 kfree_skb(pi->sdu);
3838 else
3839 err = 0;
3841 break;
3843 case L2CAP_SDU_END:
3844 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3845 break;
3847 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3849 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3850 pi->partial_sdu_len += skb->len;
3852 if (pi->partial_sdu_len > pi->imtu)
3853 goto drop;
3855 if (pi->partial_sdu_len == pi->sdu_len) {
3856 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3857 err = sock_queue_rcv_skb(sk, _skb);
3858 if (err < 0)
3859 kfree_skb(_skb);
3861 err = 0;
3863 drop:
3864 kfree_skb(pi->sdu);
3865 break;
3868 kfree_skb(skb);
3869 return err;
3872 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3874 struct sk_buff *skb;
3875 u16 control;
3877 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3878 if (bt_cb(skb)->tx_seq != tx_seq)
3879 break;
3881 skb = skb_dequeue(SREJ_QUEUE(sk));
3882 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3883 l2cap_ertm_reassembly_sdu(sk, skb, control);
3884 l2cap_pi(sk)->buffer_seq_srej =
3885 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3886 tx_seq = (tx_seq + 1) % 64;
3890 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3892 struct l2cap_pinfo *pi = l2cap_pi(sk);
3893 struct srej_list *l, *tmp;
3894 u16 control;
3896 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3897 if (l->tx_seq == tx_seq) {
3898 list_del(&l->list);
3899 kfree(l);
3900 return;
3902 control = L2CAP_SUPER_SELECT_REJECT;
3903 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3904 l2cap_send_sframe(pi, control);
3905 list_del(&l->list);
3906 list_add_tail(&l->list, SREJ_LIST(sk));
3910 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3912 struct l2cap_pinfo *pi = l2cap_pi(sk);
3913 struct srej_list *new;
3914 u16 control;
3916 while (tx_seq != pi->expected_tx_seq) {
3917 control = L2CAP_SUPER_SELECT_REJECT;
3918 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3919 l2cap_send_sframe(pi, control);
3921 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3922 new->tx_seq = pi->expected_tx_seq;
3923 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3924 list_add_tail(&new->list, SREJ_LIST(sk));
3926 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3929 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3931 struct l2cap_pinfo *pi = l2cap_pi(sk);
3932 u8 tx_seq = __get_txseq(rx_control);
3933 u8 req_seq = __get_reqseq(rx_control);
3934 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3935 int tx_seq_offset, expected_tx_seq_offset;
3936 int num_to_ack = (pi->tx_win/6) + 1;
3937 int err = 0;
3939 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3940 rx_control);
3942 if (L2CAP_CTRL_FINAL & rx_control &&
3943 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3944 del_timer(&pi->monitor_timer);
3945 if (pi->unacked_frames > 0)
3946 __mod_retrans_timer();
3947 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3950 pi->expected_ack_seq = req_seq;
3951 l2cap_drop_acked_frames(sk);
3953 if (tx_seq == pi->expected_tx_seq)
3954 goto expected;
3956 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3957 if (tx_seq_offset < 0)
3958 tx_seq_offset += 64;
3960 /* invalid tx_seq */
3961 if (tx_seq_offset >= pi->tx_win) {
3962 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3963 goto drop;
3966 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3967 goto drop;
3969 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3970 struct srej_list *first;
3972 first = list_first_entry(SREJ_LIST(sk),
3973 struct srej_list, list);
3974 if (tx_seq == first->tx_seq) {
3975 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3976 l2cap_check_srej_gap(sk, tx_seq);
3978 list_del(&first->list);
3979 kfree(first);
3981 if (list_empty(SREJ_LIST(sk))) {
3982 pi->buffer_seq = pi->buffer_seq_srej;
3983 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3984 l2cap_send_ack(pi);
3985 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3987 } else {
3988 struct srej_list *l;
3990 /* duplicated tx_seq */
3991 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3992 goto drop;
3994 list_for_each_entry(l, SREJ_LIST(sk), list) {
3995 if (l->tx_seq == tx_seq) {
3996 l2cap_resend_srejframe(sk, tx_seq);
3997 return 0;
4000 l2cap_send_srejframe(sk, tx_seq);
4002 } else {
4003 expected_tx_seq_offset =
4004 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4005 if (expected_tx_seq_offset < 0)
4006 expected_tx_seq_offset += 64;
4008 /* duplicated tx_seq */
4009 if (tx_seq_offset < expected_tx_seq_offset)
4010 goto drop;
4012 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4014 BT_DBG("sk %p, Enter SREJ", sk);
4016 INIT_LIST_HEAD(SREJ_LIST(sk));
4017 pi->buffer_seq_srej = pi->buffer_seq;
4019 __skb_queue_head_init(SREJ_QUEUE(sk));
4020 __skb_queue_head_init(BUSY_QUEUE(sk));
4021 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4023 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4025 l2cap_send_srejframe(sk, tx_seq);
4027 del_timer(&pi->ack_timer);
4029 return 0;
4031 expected:
4032 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4034 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4035 bt_cb(skb)->tx_seq = tx_seq;
4036 bt_cb(skb)->sar = sar;
4037 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4038 return 0;
4041 err = l2cap_push_rx_skb(sk, skb, rx_control);
4042 if (err < 0)
4043 return 0;
4045 if (rx_control & L2CAP_CTRL_FINAL) {
4046 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4047 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4048 else
4049 l2cap_retransmit_frames(sk);
4052 __mod_ack_timer();
4054 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4055 if (pi->num_acked == num_to_ack - 1)
4056 l2cap_send_ack(pi);
4058 return 0;
4060 drop:
4061 kfree_skb(skb);
4062 return 0;
4065 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4067 struct l2cap_pinfo *pi = l2cap_pi(sk);
4069 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4070 rx_control);
4072 pi->expected_ack_seq = __get_reqseq(rx_control);
4073 l2cap_drop_acked_frames(sk);
4075 if (rx_control & L2CAP_CTRL_POLL) {
4076 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4077 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4078 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4079 (pi->unacked_frames > 0))
4080 __mod_retrans_timer();
4082 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4083 l2cap_send_srejtail(sk);
4084 } else {
4085 l2cap_send_i_or_rr_or_rnr(sk);
4088 } else if (rx_control & L2CAP_CTRL_FINAL) {
4089 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4091 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4092 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4093 else
4094 l2cap_retransmit_frames(sk);
4096 } else {
4097 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4098 (pi->unacked_frames > 0))
4099 __mod_retrans_timer();
4101 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4102 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4103 l2cap_send_ack(pi);
4104 } else {
4105 l2cap_ertm_send(sk);
4110 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4112 struct l2cap_pinfo *pi = l2cap_pi(sk);
4113 u8 tx_seq = __get_reqseq(rx_control);
4115 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4117 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4119 pi->expected_ack_seq = tx_seq;
4120 l2cap_drop_acked_frames(sk);
4122 if (rx_control & L2CAP_CTRL_FINAL) {
4123 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4124 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4125 else
4126 l2cap_retransmit_frames(sk);
4127 } else {
4128 l2cap_retransmit_frames(sk);
4130 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4131 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4134 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4136 struct l2cap_pinfo *pi = l2cap_pi(sk);
4137 u8 tx_seq = __get_reqseq(rx_control);
4139 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4141 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4143 if (rx_control & L2CAP_CTRL_POLL) {
4144 pi->expected_ack_seq = tx_seq;
4145 l2cap_drop_acked_frames(sk);
4147 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4148 l2cap_retransmit_one_frame(sk, tx_seq);
4150 l2cap_ertm_send(sk);
4152 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4153 pi->srej_save_reqseq = tx_seq;
4154 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4156 } else if (rx_control & L2CAP_CTRL_FINAL) {
4157 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4158 pi->srej_save_reqseq == tx_seq)
4159 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4160 else
4161 l2cap_retransmit_one_frame(sk, tx_seq);
4162 } else {
4163 l2cap_retransmit_one_frame(sk, tx_seq);
4164 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4165 pi->srej_save_reqseq = tx_seq;
4166 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4171 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4173 struct l2cap_pinfo *pi = l2cap_pi(sk);
4174 u8 tx_seq = __get_reqseq(rx_control);
4176 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4178 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4179 pi->expected_ack_seq = tx_seq;
4180 l2cap_drop_acked_frames(sk);
4182 if (rx_control & L2CAP_CTRL_POLL)
4183 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4185 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4186 del_timer(&pi->retrans_timer);
4187 if (rx_control & L2CAP_CTRL_POLL)
4188 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4189 return;
4192 if (rx_control & L2CAP_CTRL_POLL)
4193 l2cap_send_srejtail(sk);
4194 else
4195 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4198 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4200 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4202 if (L2CAP_CTRL_FINAL & rx_control &&
4203 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4204 del_timer(&l2cap_pi(sk)->monitor_timer);
4205 if (l2cap_pi(sk)->unacked_frames > 0)
4206 __mod_retrans_timer();
4207 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4210 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4211 case L2CAP_SUPER_RCV_READY:
4212 l2cap_data_channel_rrframe(sk, rx_control);
4213 break;
4215 case L2CAP_SUPER_REJECT:
4216 l2cap_data_channel_rejframe(sk, rx_control);
4217 break;
4219 case L2CAP_SUPER_SELECT_REJECT:
4220 l2cap_data_channel_srejframe(sk, rx_control);
4221 break;
4223 case L2CAP_SUPER_RCV_NOT_READY:
4224 l2cap_data_channel_rnrframe(sk, rx_control);
4225 break;
4228 kfree_skb(skb);
4229 return 0;
4232 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4234 struct l2cap_pinfo *pi = l2cap_pi(sk);
4235 u16 control;
4236 u8 req_seq;
4237 int len, next_tx_seq_offset, req_seq_offset;
4239 control = get_unaligned_le16(skb->data);
4240 skb_pull(skb, 2);
4241 len = skb->len;
4244 * We can just drop the corrupted I-frame here.
4245 * Receiver will miss it and start proper recovery
4246 * procedures and ask retransmission.
4248 if (l2cap_check_fcs(pi, skb))
4249 goto drop;
4251 if (__is_sar_start(control) && __is_iframe(control))
4252 len -= 2;
4254 if (pi->fcs == L2CAP_FCS_CRC16)
4255 len -= 2;
4257 if (len > pi->mps) {
4258 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4259 goto drop;
4262 req_seq = __get_reqseq(control);
4263 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4264 if (req_seq_offset < 0)
4265 req_seq_offset += 64;
4267 next_tx_seq_offset =
4268 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4269 if (next_tx_seq_offset < 0)
4270 next_tx_seq_offset += 64;
4272 /* check for invalid req-seq */
4273 if (req_seq_offset > next_tx_seq_offset) {
4274 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4275 goto drop;
4278 if (__is_iframe(control)) {
4279 if (len < 0) {
4280 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4281 goto drop;
4284 l2cap_data_channel_iframe(sk, control, skb);
4285 } else {
4286 if (len != 0) {
4287 BT_ERR("%d", len);
4288 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4289 goto drop;
4292 l2cap_data_channel_sframe(sk, control, skb);
4295 return 0;
4297 drop:
4298 kfree_skb(skb);
4299 return 0;
4302 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4304 struct sock *sk;
4305 struct l2cap_pinfo *pi;
4306 u16 control;
4307 u8 tx_seq;
4308 int len;
4310 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4311 if (!sk) {
4312 BT_DBG("unknown cid 0x%4.4x", cid);
4313 goto drop;
4316 pi = l2cap_pi(sk);
4318 BT_DBG("sk %p, len %d", sk, skb->len);
4320 if (sk->sk_state != BT_CONNECTED)
4321 goto drop;
4323 switch (pi->mode) {
4324 case L2CAP_MODE_BASIC:
4325 /* If socket recv buffers overflows we drop data here
4326 * which is *bad* because L2CAP has to be reliable.
4327 * But we don't have any other choice. L2CAP doesn't
4328 * provide flow control mechanism. */
4330 if (pi->imtu < skb->len)
4331 goto drop;
4333 if (!sock_queue_rcv_skb(sk, skb))
4334 goto done;
4335 break;
4337 case L2CAP_MODE_ERTM:
4338 if (!sock_owned_by_user(sk)) {
4339 l2cap_ertm_data_rcv(sk, skb);
4340 } else {
4341 if (sk_add_backlog(sk, skb))
4342 goto drop;
4345 goto done;
4347 case L2CAP_MODE_STREAMING:
4348 control = get_unaligned_le16(skb->data);
4349 skb_pull(skb, 2);
4350 len = skb->len;
4352 if (l2cap_check_fcs(pi, skb))
4353 goto drop;
4355 if (__is_sar_start(control))
4356 len -= 2;
4358 if (pi->fcs == L2CAP_FCS_CRC16)
4359 len -= 2;
4361 if (len > pi->mps || len < 0 || __is_sframe(control))
4362 goto drop;
4364 tx_seq = __get_txseq(control);
4366 if (pi->expected_tx_seq == tx_seq)
4367 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4368 else
4369 pi->expected_tx_seq = (tx_seq + 1) % 64;
4371 l2cap_streaming_reassembly_sdu(sk, skb, control);
4373 goto done;
4375 default:
4376 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4377 break;
4380 drop:
4381 kfree_skb(skb);
4383 done:
4384 if (sk)
4385 bh_unlock_sock(sk);
4387 return 0;
4390 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4392 struct sock *sk;
4394 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4395 if (!sk)
4396 goto drop;
4398 BT_DBG("sk %p, len %d", sk, skb->len);
4400 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4401 goto drop;
4403 if (l2cap_pi(sk)->imtu < skb->len)
4404 goto drop;
4406 if (!sock_queue_rcv_skb(sk, skb))
4407 goto done;
4409 drop:
4410 kfree_skb(skb);
4412 done:
4413 if (sk)
4414 bh_unlock_sock(sk);
4415 return 0;
4418 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4420 struct l2cap_hdr *lh = (void *) skb->data;
4421 u16 cid, len;
4422 __le16 psm;
4424 skb_pull(skb, L2CAP_HDR_SIZE);
4425 cid = __le16_to_cpu(lh->cid);
4426 len = __le16_to_cpu(lh->len);
4428 if (len != skb->len) {
4429 kfree_skb(skb);
4430 return;
4433 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4435 switch (cid) {
4436 case L2CAP_CID_SIGNALING:
4437 l2cap_sig_channel(conn, skb);
4438 break;
4440 case L2CAP_CID_CONN_LESS:
4441 psm = get_unaligned_le16(skb->data);
4442 skb_pull(skb, 2);
4443 l2cap_conless_channel(conn, psm, skb);
4444 break;
4446 default:
4447 l2cap_data_channel(conn, cid, skb);
4448 break;
4452 /* ---- L2CAP interface with lower layer (HCI) ---- */
4454 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4456 int exact = 0, lm1 = 0, lm2 = 0;
4457 register struct sock *sk;
4458 struct hlist_node *node;
4460 if (type != ACL_LINK)
4461 return -EINVAL;
4463 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4465 /* Find listening sockets and check their link_mode */
4466 read_lock(&l2cap_sk_list.lock);
4467 sk_for_each(sk, node, &l2cap_sk_list.head) {
4468 if (sk->sk_state != BT_LISTEN)
4469 continue;
4471 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4472 lm1 |= HCI_LM_ACCEPT;
4473 if (l2cap_pi(sk)->role_switch)
4474 lm1 |= HCI_LM_MASTER;
4475 exact++;
4476 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4477 lm2 |= HCI_LM_ACCEPT;
4478 if (l2cap_pi(sk)->role_switch)
4479 lm2 |= HCI_LM_MASTER;
4482 read_unlock(&l2cap_sk_list.lock);
4484 return exact ? lm1 : lm2;
4487 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4489 struct l2cap_conn *conn;
4491 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4493 if (hcon->type != ACL_LINK)
4494 return -EINVAL;
4496 if (!status) {
4497 conn = l2cap_conn_add(hcon, status);
4498 if (conn)
4499 l2cap_conn_ready(conn);
4500 } else
4501 l2cap_conn_del(hcon, bt_err(status));
4503 return 0;
4506 static int l2cap_disconn_ind(struct hci_conn *hcon)
4508 struct l2cap_conn *conn = hcon->l2cap_data;
4510 BT_DBG("hcon %p", hcon);
4512 if (hcon->type != ACL_LINK || !conn)
4513 return 0x13;
4515 return conn->disc_reason;
4518 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4520 BT_DBG("hcon %p reason %d", hcon, reason);
4522 if (hcon->type != ACL_LINK)
4523 return -EINVAL;
4525 l2cap_conn_del(hcon, bt_err(reason));
4527 return 0;
4530 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4532 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4533 return;
4535 if (encrypt == 0x00) {
4536 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4537 l2cap_sock_clear_timer(sk);
4538 l2cap_sock_set_timer(sk, HZ * 5);
4539 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4540 __l2cap_sock_close(sk, ECONNREFUSED);
4541 } else {
4542 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4543 l2cap_sock_clear_timer(sk);
4547 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4549 struct l2cap_chan_list *l;
4550 struct l2cap_conn *conn = hcon->l2cap_data;
4551 struct sock *sk;
4553 if (!conn)
4554 return 0;
4556 l = &conn->chan_list;
4558 BT_DBG("conn %p", conn);
4560 read_lock(&l->lock);
4562 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4563 bh_lock_sock(sk);
4565 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4566 bh_unlock_sock(sk);
4567 continue;
4570 if (!status && (sk->sk_state == BT_CONNECTED ||
4571 sk->sk_state == BT_CONFIG)) {
4572 l2cap_check_encryption(sk, encrypt);
4573 bh_unlock_sock(sk);
4574 continue;
4577 if (sk->sk_state == BT_CONNECT) {
4578 if (!status) {
4579 struct l2cap_conn_req req;
4580 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4581 req.psm = l2cap_pi(sk)->psm;
4583 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4584 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4586 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4587 L2CAP_CONN_REQ, sizeof(req), &req);
4588 } else {
4589 l2cap_sock_clear_timer(sk);
4590 l2cap_sock_set_timer(sk, HZ / 10);
4592 } else if (sk->sk_state == BT_CONNECT2) {
4593 struct l2cap_conn_rsp rsp;
4594 __u16 result;
4596 if (!status) {
4597 sk->sk_state = BT_CONFIG;
4598 result = L2CAP_CR_SUCCESS;
4599 } else {
4600 sk->sk_state = BT_DISCONN;
4601 l2cap_sock_set_timer(sk, HZ / 10);
4602 result = L2CAP_CR_SEC_BLOCK;
4605 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4606 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4607 rsp.result = cpu_to_le16(result);
4608 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4609 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4610 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4613 bh_unlock_sock(sk);
4616 read_unlock(&l->lock);
4618 return 0;
4621 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4623 struct l2cap_conn *conn = hcon->l2cap_data;
4625 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4626 goto drop;
4628 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4630 if (flags & ACL_START) {
4631 struct l2cap_hdr *hdr;
4632 int len;
4634 if (conn->rx_len) {
4635 BT_ERR("Unexpected start frame (len %d)", skb->len);
4636 kfree_skb(conn->rx_skb);
4637 conn->rx_skb = NULL;
4638 conn->rx_len = 0;
4639 l2cap_conn_unreliable(conn, ECOMM);
4642 if (skb->len < 2) {
4643 BT_ERR("Frame is too short (len %d)", skb->len);
4644 l2cap_conn_unreliable(conn, ECOMM);
4645 goto drop;
4648 hdr = (struct l2cap_hdr *) skb->data;
4649 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4651 if (len == skb->len) {
4652 /* Complete frame received */
4653 l2cap_recv_frame(conn, skb);
4654 return 0;
4657 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4659 if (skb->len > len) {
4660 BT_ERR("Frame is too long (len %d, expected len %d)",
4661 skb->len, len);
4662 l2cap_conn_unreliable(conn, ECOMM);
4663 goto drop;
4666 /* Allocate skb for the complete frame (with header) */
4667 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4668 if (!conn->rx_skb)
4669 goto drop;
4671 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4672 skb->len);
4673 conn->rx_len = len - skb->len;
4674 } else {
4675 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4677 if (!conn->rx_len) {
4678 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4679 l2cap_conn_unreliable(conn, ECOMM);
4680 goto drop;
4683 if (skb->len > conn->rx_len) {
4684 BT_ERR("Fragment is too long (len %d, expected %d)",
4685 skb->len, conn->rx_len);
4686 kfree_skb(conn->rx_skb);
4687 conn->rx_skb = NULL;
4688 conn->rx_len = 0;
4689 l2cap_conn_unreliable(conn, ECOMM);
4690 goto drop;
4693 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4694 skb->len);
4695 conn->rx_len -= skb->len;
4697 if (!conn->rx_len) {
4698 /* Complete frame received */
4699 l2cap_recv_frame(conn, conn->rx_skb);
4700 conn->rx_skb = NULL;
4704 drop:
4705 kfree_skb(skb);
4706 return 0;
4709 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4711 struct sock *sk;
4712 struct hlist_node *node;
4714 read_lock_bh(&l2cap_sk_list.lock);
4716 sk_for_each(sk, node, &l2cap_sk_list.head) {
4717 struct l2cap_pinfo *pi = l2cap_pi(sk);
4719 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4720 batostr(&bt_sk(sk)->src),
4721 batostr(&bt_sk(sk)->dst),
4722 sk->sk_state, __le16_to_cpu(pi->psm),
4723 pi->scid, pi->dcid,
4724 pi->imtu, pi->omtu, pi->sec_level);
4727 read_unlock_bh(&l2cap_sk_list.lock);
4729 return 0;
4732 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4734 return single_open(file, l2cap_debugfs_show, inode->i_private);
4737 static const struct file_operations l2cap_debugfs_fops = {
4738 .open = l2cap_debugfs_open,
4739 .read = seq_read,
4740 .llseek = seq_lseek,
4741 .release = single_release,
4744 static struct dentry *l2cap_debugfs;
4746 static const struct proto_ops l2cap_sock_ops = {
4747 .family = PF_BLUETOOTH,
4748 .owner = THIS_MODULE,
4749 .release = l2cap_sock_release,
4750 .bind = l2cap_sock_bind,
4751 .connect = l2cap_sock_connect,
4752 .listen = l2cap_sock_listen,
4753 .accept = l2cap_sock_accept,
4754 .getname = l2cap_sock_getname,
4755 .sendmsg = l2cap_sock_sendmsg,
4756 .recvmsg = l2cap_sock_recvmsg,
4757 .poll = bt_sock_poll,
4758 .ioctl = bt_sock_ioctl,
4759 .mmap = sock_no_mmap,
4760 .socketpair = sock_no_socketpair,
4761 .shutdown = l2cap_sock_shutdown,
4762 .setsockopt = l2cap_sock_setsockopt,
4763 .getsockopt = l2cap_sock_getsockopt
4766 static const struct net_proto_family l2cap_sock_family_ops = {
4767 .family = PF_BLUETOOTH,
4768 .owner = THIS_MODULE,
4769 .create = l2cap_sock_create,
4772 static struct hci_proto l2cap_hci_proto = {
4773 .name = "L2CAP",
4774 .id = HCI_PROTO_L2CAP,
4775 .connect_ind = l2cap_connect_ind,
4776 .connect_cfm = l2cap_connect_cfm,
4777 .disconn_ind = l2cap_disconn_ind,
4778 .disconn_cfm = l2cap_disconn_cfm,
4779 .security_cfm = l2cap_security_cfm,
4780 .recv_acldata = l2cap_recv_acldata
4783 static int __init l2cap_init(void)
4785 int err;
4787 err = proto_register(&l2cap_proto, 0);
4788 if (err < 0)
4789 return err;
4791 _busy_wq = create_singlethread_workqueue("l2cap");
4792 if (!_busy_wq)
4793 goto error;
4795 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4796 if (err < 0) {
4797 BT_ERR("L2CAP socket registration failed");
4798 goto error;
4801 err = hci_register_proto(&l2cap_hci_proto);
4802 if (err < 0) {
4803 BT_ERR("L2CAP protocol registration failed");
4804 bt_sock_unregister(BTPROTO_L2CAP);
4805 goto error;
4808 if (bt_debugfs) {
4809 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4810 bt_debugfs, NULL, &l2cap_debugfs_fops);
4811 if (!l2cap_debugfs)
4812 BT_ERR("Failed to create L2CAP debug file");
4815 BT_INFO("L2CAP ver %s", VERSION);
4816 BT_INFO("L2CAP socket layer initialized");
4818 return 0;
4820 error:
4821 proto_unregister(&l2cap_proto);
4822 return err;
4825 static void __exit l2cap_exit(void)
4827 debugfs_remove(l2cap_debugfs);
4829 flush_workqueue(_busy_wq);
4830 destroy_workqueue(_busy_wq);
4832 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4833 BT_ERR("L2CAP socket unregistration failed");
4835 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4836 BT_ERR("L2CAP protocol unregistration failed");
4838 proto_unregister(&l2cap_proto);
4841 void l2cap_load(void)
4843 /* Dummy function to trigger automatic L2CAP module loading by
4844 * other modules that use L2CAP sockets but don't use any other
4845 * symbols from it. */
4847 EXPORT_SYMBOL(l2cap_load);
4849 module_init(l2cap_init);
4850 module_exit(l2cap_exit);
4852 module_param(enable_ertm, bool, 0644);
4853 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4855 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4856 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4857 MODULE_VERSION(VERSION);
4858 MODULE_LICENSE("GPL");
4859 MODULE_ALIAS("bt-proto-0");