Bluetooth: Improve ERTM local busy handling
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blob6b839d682143555b328588320e0faae1d0db8c3e
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
82 /* ---- L2CAP timers ---- */
83 static void l2cap_sock_timeout(unsigned long arg)
85 struct sock *sk = (struct sock *) arg;
86 int reason;
88 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 bh_lock_sock(sk);
92 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
93 reason = ECONNREFUSED;
94 else if (sk->sk_state == BT_CONNECT &&
95 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
96 reason = ECONNREFUSED;
97 else
98 reason = ETIMEDOUT;
100 __l2cap_sock_close(sk, reason);
102 bh_unlock_sock(sk);
104 l2cap_sock_kill(sk);
105 sock_put(sk);
108 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
110 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
111 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
114 static void l2cap_sock_clear_timer(struct sock *sk)
116 BT_DBG("sock %p state %d", sk, sk->sk_state);
117 sk_stop_timer(sk, &sk->sk_timer);
120 /* ---- L2CAP channels ---- */
121 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
123 struct sock *s;
124 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
125 if (l2cap_pi(s)->dcid == cid)
126 break;
128 return s;
131 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
133 struct sock *s;
134 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
135 if (l2cap_pi(s)->scid == cid)
136 break;
138 return s;
141 /* Find channel with given SCID.
142 * Returns locked socket */
143 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 struct sock *s;
146 read_lock(&l->lock);
147 s = __l2cap_get_chan_by_scid(l, cid);
148 if (s)
149 bh_lock_sock(s);
150 read_unlock(&l->lock);
151 return s;
154 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
156 struct sock *s;
157 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
158 if (l2cap_pi(s)->ident == ident)
159 break;
161 return s;
164 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 struct sock *s;
167 read_lock(&l->lock);
168 s = __l2cap_get_chan_by_ident(l, ident);
169 if (s)
170 bh_lock_sock(s);
171 read_unlock(&l->lock);
172 return s;
175 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
177 u16 cid = L2CAP_CID_DYN_START;
179 for (; cid < L2CAP_CID_DYN_END; cid++) {
180 if (!__l2cap_get_chan_by_scid(l, cid))
181 return cid;
184 return 0;
187 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
189 sock_hold(sk);
191 if (l->head)
192 l2cap_pi(l->head)->prev_c = sk;
194 l2cap_pi(sk)->next_c = l->head;
195 l2cap_pi(sk)->prev_c = NULL;
196 l->head = sk;
199 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
201 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
203 write_lock_bh(&l->lock);
204 if (sk == l->head)
205 l->head = next;
207 if (next)
208 l2cap_pi(next)->prev_c = prev;
209 if (prev)
210 l2cap_pi(prev)->next_c = next;
211 write_unlock_bh(&l->lock);
213 __sock_put(sk);
216 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
218 struct l2cap_chan_list *l = &conn->chan_list;
220 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
221 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
223 conn->disc_reason = 0x13;
225 l2cap_pi(sk)->conn = conn;
227 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
228 /* Alloc CID for connection-oriented socket */
229 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
230 } else if (sk->sk_type == SOCK_DGRAM) {
231 /* Connectionless socket */
232 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
233 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
235 } else {
236 /* Raw socket can send/recv signalling messages only */
237 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
238 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
239 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
242 __l2cap_chan_link(l, sk);
244 if (parent)
245 bt_accept_enqueue(parent, sk);
248 /* Delete channel.
249 * Must be called on the locked socket. */
250 static void l2cap_chan_del(struct sock *sk, int err)
252 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
253 struct sock *parent = bt_sk(sk)->parent;
255 l2cap_sock_clear_timer(sk);
257 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
259 if (conn) {
260 /* Unlink from channel list */
261 l2cap_chan_unlink(&conn->chan_list, sk);
262 l2cap_pi(sk)->conn = NULL;
263 hci_conn_put(conn->hcon);
266 sk->sk_state = BT_CLOSED;
267 sock_set_flag(sk, SOCK_ZAPPED);
269 if (err)
270 sk->sk_err = err;
272 if (parent) {
273 bt_accept_unlink(sk);
274 parent->sk_data_ready(parent, 0);
275 } else
276 sk->sk_state_change(sk);
278 skb_queue_purge(TX_QUEUE(sk));
280 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
281 struct srej_list *l, *tmp;
283 del_timer(&l2cap_pi(sk)->retrans_timer);
284 del_timer(&l2cap_pi(sk)->monitor_timer);
285 del_timer(&l2cap_pi(sk)->ack_timer);
287 skb_queue_purge(SREJ_QUEUE(sk));
288 skb_queue_purge(BUSY_QUEUE(sk));
290 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
291 list_del(&l->list);
292 kfree(l);
297 /* Service level security */
298 static inline int l2cap_check_security(struct sock *sk)
300 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
301 __u8 auth_type;
303 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
304 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
305 auth_type = HCI_AT_NO_BONDING_MITM;
306 else
307 auth_type = HCI_AT_NO_BONDING;
309 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
310 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
311 } else {
312 switch (l2cap_pi(sk)->sec_level) {
313 case BT_SECURITY_HIGH:
314 auth_type = HCI_AT_GENERAL_BONDING_MITM;
315 break;
316 case BT_SECURITY_MEDIUM:
317 auth_type = HCI_AT_GENERAL_BONDING;
318 break;
319 default:
320 auth_type = HCI_AT_NO_BONDING;
321 break;
325 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
326 auth_type);
329 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
331 u8 id;
333 /* Get next available identificator.
334 * 1 - 128 are used by kernel.
335 * 129 - 199 are reserved.
336 * 200 - 254 are used by utilities like l2ping, etc.
339 spin_lock_bh(&conn->lock);
341 if (++conn->tx_ident > 128)
342 conn->tx_ident = 1;
344 id = conn->tx_ident;
346 spin_unlock_bh(&conn->lock);
348 return id;
351 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
353 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
355 BT_DBG("code 0x%2.2x", code);
357 if (!skb)
358 return;
360 hci_send_acl(conn->hcon, skb, 0);
363 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
365 struct sk_buff *skb;
366 struct l2cap_hdr *lh;
367 struct l2cap_conn *conn = pi->conn;
368 struct sock *sk = (struct sock *)pi;
369 int count, hlen = L2CAP_HDR_SIZE + 2;
371 if (sk->sk_state != BT_CONNECTED)
372 return;
374 if (pi->fcs == L2CAP_FCS_CRC16)
375 hlen += 2;
377 BT_DBG("pi %p, control 0x%2.2x", pi, control);
379 count = min_t(unsigned int, conn->mtu, hlen);
380 control |= L2CAP_CTRL_FRAME_TYPE;
382 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
383 control |= L2CAP_CTRL_FINAL;
384 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
387 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
388 control |= L2CAP_CTRL_POLL;
389 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
392 skb = bt_skb_alloc(count, GFP_ATOMIC);
393 if (!skb)
394 return;
396 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
397 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
398 lh->cid = cpu_to_le16(pi->dcid);
399 put_unaligned_le16(control, skb_put(skb, 2));
401 if (pi->fcs == L2CAP_FCS_CRC16) {
402 u16 fcs = crc16(0, (u8 *)lh, count - 2);
403 put_unaligned_le16(fcs, skb_put(skb, 2));
406 hci_send_acl(pi->conn->hcon, skb, 0);
409 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
411 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
412 control |= L2CAP_SUPER_RCV_NOT_READY;
413 pi->conn_state |= L2CAP_CONN_RNR_SENT;
414 } else
415 control |= L2CAP_SUPER_RCV_READY;
417 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
419 l2cap_send_sframe(pi, control);
422 static inline int __l2cap_no_conn_pending(struct sock *sk)
424 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
427 static void l2cap_do_start(struct sock *sk)
429 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
431 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
432 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
433 return;
435 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
436 struct l2cap_conn_req req;
437 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
438 req.psm = l2cap_pi(sk)->psm;
440 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
441 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
443 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
444 L2CAP_CONN_REQ, sizeof(req), &req);
446 } else {
447 struct l2cap_info_req req;
448 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
450 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
451 conn->info_ident = l2cap_get_ident(conn);
453 mod_timer(&conn->info_timer, jiffies +
454 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
456 l2cap_send_cmd(conn, conn->info_ident,
457 L2CAP_INFO_REQ, sizeof(req), &req);
461 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
463 u32 local_feat_mask = l2cap_feat_mask;
464 if (enable_ertm)
465 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
467 switch (mode) {
468 case L2CAP_MODE_ERTM:
469 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
470 case L2CAP_MODE_STREAMING:
471 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
472 default:
473 return 0x00;
477 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
479 struct l2cap_disconn_req req;
481 if (!conn)
482 return;
484 skb_queue_purge(TX_QUEUE(sk));
486 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
487 del_timer(&l2cap_pi(sk)->retrans_timer);
488 del_timer(&l2cap_pi(sk)->monitor_timer);
489 del_timer(&l2cap_pi(sk)->ack_timer);
492 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
493 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
494 l2cap_send_cmd(conn, l2cap_get_ident(conn),
495 L2CAP_DISCONN_REQ, sizeof(req), &req);
497 sk->sk_state = BT_DISCONN;
498 sk->sk_err = err;
501 /* ---- L2CAP connections ---- */
502 static void l2cap_conn_start(struct l2cap_conn *conn)
504 struct l2cap_chan_list *l = &conn->chan_list;
505 struct sock_del_list del, *tmp1, *tmp2;
506 struct sock *sk;
508 BT_DBG("conn %p", conn);
510 INIT_LIST_HEAD(&del.list);
512 read_lock(&l->lock);
514 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
515 bh_lock_sock(sk);
517 if (sk->sk_type != SOCK_SEQPACKET &&
518 sk->sk_type != SOCK_STREAM) {
519 bh_unlock_sock(sk);
520 continue;
523 if (sk->sk_state == BT_CONNECT) {
524 if (l2cap_check_security(sk) &&
525 __l2cap_no_conn_pending(sk)) {
526 struct l2cap_conn_req req;
528 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
529 conn->feat_mask)
530 && l2cap_pi(sk)->conf_state &
531 L2CAP_CONF_STATE2_DEVICE) {
532 tmp1 = kzalloc(sizeof(struct srej_list),
533 GFP_ATOMIC);
534 tmp1->sk = sk;
535 list_add_tail(&tmp1->list, &del.list);
536 bh_unlock_sock(sk);
537 continue;
540 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
541 req.psm = l2cap_pi(sk)->psm;
543 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
544 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
546 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
547 L2CAP_CONN_REQ, sizeof(req), &req);
549 } else if (sk->sk_state == BT_CONNECT2) {
550 struct l2cap_conn_rsp rsp;
551 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
552 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
554 if (l2cap_check_security(sk)) {
555 if (bt_sk(sk)->defer_setup) {
556 struct sock *parent = bt_sk(sk)->parent;
557 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
558 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
559 parent->sk_data_ready(parent, 0);
561 } else {
562 sk->sk_state = BT_CONFIG;
563 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
564 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
566 } else {
567 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
568 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
571 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
572 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
575 bh_unlock_sock(sk);
578 read_unlock(&l->lock);
580 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
581 bh_lock_sock(tmp1->sk);
582 __l2cap_sock_close(tmp1->sk, ECONNRESET);
583 bh_unlock_sock(tmp1->sk);
584 list_del(&tmp1->list);
585 kfree(tmp1);
589 static void l2cap_conn_ready(struct l2cap_conn *conn)
591 struct l2cap_chan_list *l = &conn->chan_list;
592 struct sock *sk;
594 BT_DBG("conn %p", conn);
596 read_lock(&l->lock);
598 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
599 bh_lock_sock(sk);
601 if (sk->sk_type != SOCK_SEQPACKET &&
602 sk->sk_type != SOCK_STREAM) {
603 l2cap_sock_clear_timer(sk);
604 sk->sk_state = BT_CONNECTED;
605 sk->sk_state_change(sk);
606 } else if (sk->sk_state == BT_CONNECT)
607 l2cap_do_start(sk);
609 bh_unlock_sock(sk);
612 read_unlock(&l->lock);
615 /* Notify sockets that we cannot guaranty reliability anymore */
616 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
618 struct l2cap_chan_list *l = &conn->chan_list;
619 struct sock *sk;
621 BT_DBG("conn %p", conn);
623 read_lock(&l->lock);
625 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
626 if (l2cap_pi(sk)->force_reliable)
627 sk->sk_err = err;
630 read_unlock(&l->lock);
633 static void l2cap_info_timeout(unsigned long arg)
635 struct l2cap_conn *conn = (void *) arg;
637 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
638 conn->info_ident = 0;
640 l2cap_conn_start(conn);
643 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
645 struct l2cap_conn *conn = hcon->l2cap_data;
647 if (conn || status)
648 return conn;
650 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
651 if (!conn)
652 return NULL;
654 hcon->l2cap_data = conn;
655 conn->hcon = hcon;
657 BT_DBG("hcon %p conn %p", hcon, conn);
659 conn->mtu = hcon->hdev->acl_mtu;
660 conn->src = &hcon->hdev->bdaddr;
661 conn->dst = &hcon->dst;
663 conn->feat_mask = 0;
665 spin_lock_init(&conn->lock);
666 rwlock_init(&conn->chan_list.lock);
668 setup_timer(&conn->info_timer, l2cap_info_timeout,
669 (unsigned long) conn);
671 conn->disc_reason = 0x13;
673 return conn;
676 static void l2cap_conn_del(struct hci_conn *hcon, int err)
678 struct l2cap_conn *conn = hcon->l2cap_data;
679 struct sock *sk;
681 if (!conn)
682 return;
684 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
686 kfree_skb(conn->rx_skb);
688 /* Kill channels */
689 while ((sk = conn->chan_list.head)) {
690 bh_lock_sock(sk);
691 l2cap_chan_del(sk, err);
692 bh_unlock_sock(sk);
693 l2cap_sock_kill(sk);
696 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
697 del_timer_sync(&conn->info_timer);
699 hcon->l2cap_data = NULL;
700 kfree(conn);
703 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
705 struct l2cap_chan_list *l = &conn->chan_list;
706 write_lock_bh(&l->lock);
707 __l2cap_chan_add(conn, sk, parent);
708 write_unlock_bh(&l->lock);
711 /* ---- Socket interface ---- */
712 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
714 struct sock *sk;
715 struct hlist_node *node;
716 sk_for_each(sk, node, &l2cap_sk_list.head)
717 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
718 goto found;
719 sk = NULL;
720 found:
721 return sk;
724 /* Find socket with psm and source bdaddr.
725 * Returns closest match.
727 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
729 struct sock *sk = NULL, *sk1 = NULL;
730 struct hlist_node *node;
732 sk_for_each(sk, node, &l2cap_sk_list.head) {
733 if (state && sk->sk_state != state)
734 continue;
736 if (l2cap_pi(sk)->psm == psm) {
737 /* Exact match. */
738 if (!bacmp(&bt_sk(sk)->src, src))
739 break;
741 /* Closest match */
742 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
743 sk1 = sk;
746 return node ? sk : sk1;
749 /* Find socket with given address (psm, src).
750 * Returns locked socket */
751 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
753 struct sock *s;
754 read_lock(&l2cap_sk_list.lock);
755 s = __l2cap_get_sock_by_psm(state, psm, src);
756 if (s)
757 bh_lock_sock(s);
758 read_unlock(&l2cap_sk_list.lock);
759 return s;
762 static void l2cap_sock_destruct(struct sock *sk)
764 BT_DBG("sk %p", sk);
766 skb_queue_purge(&sk->sk_receive_queue);
767 skb_queue_purge(&sk->sk_write_queue);
770 static void l2cap_sock_cleanup_listen(struct sock *parent)
772 struct sock *sk;
774 BT_DBG("parent %p", parent);
776 /* Close not yet accepted channels */
777 while ((sk = bt_accept_dequeue(parent, NULL)))
778 l2cap_sock_close(sk);
780 parent->sk_state = BT_CLOSED;
781 sock_set_flag(parent, SOCK_ZAPPED);
784 /* Kill socket (only if zapped and orphan)
785 * Must be called on unlocked socket.
787 static void l2cap_sock_kill(struct sock *sk)
789 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
790 return;
792 BT_DBG("sk %p state %d", sk, sk->sk_state);
794 /* Kill poor orphan */
795 bt_sock_unlink(&l2cap_sk_list, sk);
796 sock_set_flag(sk, SOCK_DEAD);
797 sock_put(sk);
800 static void __l2cap_sock_close(struct sock *sk, int reason)
802 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
804 switch (sk->sk_state) {
805 case BT_LISTEN:
806 l2cap_sock_cleanup_listen(sk);
807 break;
809 case BT_CONNECTED:
810 case BT_CONFIG:
811 if (sk->sk_type == SOCK_SEQPACKET ||
812 sk->sk_type == SOCK_STREAM) {
813 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
815 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
816 l2cap_send_disconn_req(conn, sk, reason);
817 } else
818 l2cap_chan_del(sk, reason);
819 break;
821 case BT_CONNECT2:
822 if (sk->sk_type == SOCK_SEQPACKET ||
823 sk->sk_type == SOCK_STREAM) {
824 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
825 struct l2cap_conn_rsp rsp;
826 __u16 result;
828 if (bt_sk(sk)->defer_setup)
829 result = L2CAP_CR_SEC_BLOCK;
830 else
831 result = L2CAP_CR_BAD_PSM;
833 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
834 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
835 rsp.result = cpu_to_le16(result);
836 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
837 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
838 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
839 } else
840 l2cap_chan_del(sk, reason);
841 break;
843 case BT_CONNECT:
844 case BT_DISCONN:
845 l2cap_chan_del(sk, reason);
846 break;
848 default:
849 sock_set_flag(sk, SOCK_ZAPPED);
850 break;
854 /* Must be called on unlocked socket. */
855 static void l2cap_sock_close(struct sock *sk)
857 l2cap_sock_clear_timer(sk);
858 lock_sock(sk);
859 __l2cap_sock_close(sk, ECONNRESET);
860 release_sock(sk);
861 l2cap_sock_kill(sk);
864 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
866 struct l2cap_pinfo *pi = l2cap_pi(sk);
868 BT_DBG("sk %p", sk);
870 if (parent) {
871 sk->sk_type = parent->sk_type;
872 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
874 pi->imtu = l2cap_pi(parent)->imtu;
875 pi->omtu = l2cap_pi(parent)->omtu;
876 pi->conf_state = l2cap_pi(parent)->conf_state;
877 pi->mode = l2cap_pi(parent)->mode;
878 pi->fcs = l2cap_pi(parent)->fcs;
879 pi->max_tx = l2cap_pi(parent)->max_tx;
880 pi->tx_win = l2cap_pi(parent)->tx_win;
881 pi->sec_level = l2cap_pi(parent)->sec_level;
882 pi->role_switch = l2cap_pi(parent)->role_switch;
883 pi->force_reliable = l2cap_pi(parent)->force_reliable;
884 } else {
885 pi->imtu = L2CAP_DEFAULT_MTU;
886 pi->omtu = 0;
887 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
888 pi->mode = L2CAP_MODE_ERTM;
889 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
890 } else {
891 pi->mode = L2CAP_MODE_BASIC;
893 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
894 pi->fcs = L2CAP_FCS_CRC16;
895 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
896 pi->sec_level = BT_SECURITY_LOW;
897 pi->role_switch = 0;
898 pi->force_reliable = 0;
901 /* Default config options */
902 pi->conf_len = 0;
903 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
904 skb_queue_head_init(TX_QUEUE(sk));
905 skb_queue_head_init(SREJ_QUEUE(sk));
906 skb_queue_head_init(BUSY_QUEUE(sk));
907 INIT_LIST_HEAD(SREJ_LIST(sk));
910 static struct proto l2cap_proto = {
911 .name = "L2CAP",
912 .owner = THIS_MODULE,
913 .obj_size = sizeof(struct l2cap_pinfo)
916 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
918 struct sock *sk;
920 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
921 if (!sk)
922 return NULL;
924 sock_init_data(sock, sk);
925 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
927 sk->sk_destruct = l2cap_sock_destruct;
928 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
930 sock_reset_flag(sk, SOCK_ZAPPED);
932 sk->sk_protocol = proto;
933 sk->sk_state = BT_OPEN;
935 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
937 bt_sock_link(&l2cap_sk_list, sk);
938 return sk;
941 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
942 int kern)
944 struct sock *sk;
946 BT_DBG("sock %p", sock);
948 sock->state = SS_UNCONNECTED;
950 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
951 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
952 return -ESOCKTNOSUPPORT;
954 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
955 return -EPERM;
957 sock->ops = &l2cap_sock_ops;
959 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
960 if (!sk)
961 return -ENOMEM;
963 l2cap_sock_init(sk, NULL);
964 return 0;
967 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
969 struct sock *sk = sock->sk;
970 struct sockaddr_l2 la;
971 int len, err = 0;
973 BT_DBG("sk %p", sk);
975 if (!addr || addr->sa_family != AF_BLUETOOTH)
976 return -EINVAL;
978 memset(&la, 0, sizeof(la));
979 len = min_t(unsigned int, sizeof(la), alen);
980 memcpy(&la, addr, len);
982 if (la.l2_cid)
983 return -EINVAL;
985 lock_sock(sk);
987 if (sk->sk_state != BT_OPEN) {
988 err = -EBADFD;
989 goto done;
992 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
993 !capable(CAP_NET_BIND_SERVICE)) {
994 err = -EACCES;
995 goto done;
998 write_lock_bh(&l2cap_sk_list.lock);
1000 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1001 err = -EADDRINUSE;
1002 } else {
1003 /* Save source address */
1004 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1005 l2cap_pi(sk)->psm = la.l2_psm;
1006 l2cap_pi(sk)->sport = la.l2_psm;
1007 sk->sk_state = BT_BOUND;
1009 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1010 __le16_to_cpu(la.l2_psm) == 0x0003)
1011 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1014 write_unlock_bh(&l2cap_sk_list.lock);
1016 done:
1017 release_sock(sk);
1018 return err;
1021 static int l2cap_do_connect(struct sock *sk)
1023 bdaddr_t *src = &bt_sk(sk)->src;
1024 bdaddr_t *dst = &bt_sk(sk)->dst;
1025 struct l2cap_conn *conn;
1026 struct hci_conn *hcon;
1027 struct hci_dev *hdev;
1028 __u8 auth_type;
1029 int err;
1031 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1032 l2cap_pi(sk)->psm);
1034 hdev = hci_get_route(dst, src);
1035 if (!hdev)
1036 return -EHOSTUNREACH;
1038 hci_dev_lock_bh(hdev);
1040 err = -ENOMEM;
1042 if (sk->sk_type == SOCK_RAW) {
1043 switch (l2cap_pi(sk)->sec_level) {
1044 case BT_SECURITY_HIGH:
1045 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1046 break;
1047 case BT_SECURITY_MEDIUM:
1048 auth_type = HCI_AT_DEDICATED_BONDING;
1049 break;
1050 default:
1051 auth_type = HCI_AT_NO_BONDING;
1052 break;
1054 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1055 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1056 auth_type = HCI_AT_NO_BONDING_MITM;
1057 else
1058 auth_type = HCI_AT_NO_BONDING;
1060 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1061 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1062 } else {
1063 switch (l2cap_pi(sk)->sec_level) {
1064 case BT_SECURITY_HIGH:
1065 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1066 break;
1067 case BT_SECURITY_MEDIUM:
1068 auth_type = HCI_AT_GENERAL_BONDING;
1069 break;
1070 default:
1071 auth_type = HCI_AT_NO_BONDING;
1072 break;
1076 hcon = hci_connect(hdev, ACL_LINK, dst,
1077 l2cap_pi(sk)->sec_level, auth_type);
1078 if (!hcon)
1079 goto done;
1081 conn = l2cap_conn_add(hcon, 0);
1082 if (!conn) {
1083 hci_conn_put(hcon);
1084 goto done;
1087 err = 0;
1089 /* Update source addr of the socket */
1090 bacpy(src, conn->src);
1092 l2cap_chan_add(conn, sk, NULL);
1094 sk->sk_state = BT_CONNECT;
1095 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1097 if (hcon->state == BT_CONNECTED) {
1098 if (sk->sk_type != SOCK_SEQPACKET &&
1099 sk->sk_type != SOCK_STREAM) {
1100 l2cap_sock_clear_timer(sk);
1101 sk->sk_state = BT_CONNECTED;
1102 } else
1103 l2cap_do_start(sk);
1106 done:
1107 hci_dev_unlock_bh(hdev);
1108 hci_dev_put(hdev);
1109 return err;
1112 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1114 struct sock *sk = sock->sk;
1115 struct sockaddr_l2 la;
1116 int len, err = 0;
1118 BT_DBG("sk %p", sk);
1120 if (!addr || alen < sizeof(addr->sa_family) ||
1121 addr->sa_family != AF_BLUETOOTH)
1122 return -EINVAL;
1124 memset(&la, 0, sizeof(la));
1125 len = min_t(unsigned int, sizeof(la), alen);
1126 memcpy(&la, addr, len);
1128 if (la.l2_cid)
1129 return -EINVAL;
1131 lock_sock(sk);
1133 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1134 && !la.l2_psm) {
1135 err = -EINVAL;
1136 goto done;
1139 switch (l2cap_pi(sk)->mode) {
1140 case L2CAP_MODE_BASIC:
1141 break;
1142 case L2CAP_MODE_ERTM:
1143 case L2CAP_MODE_STREAMING:
1144 if (enable_ertm)
1145 break;
1146 /* fall through */
1147 default:
1148 err = -ENOTSUPP;
1149 goto done;
1152 switch (sk->sk_state) {
1153 case BT_CONNECT:
1154 case BT_CONNECT2:
1155 case BT_CONFIG:
1156 /* Already connecting */
1157 goto wait;
1159 case BT_CONNECTED:
1160 /* Already connected */
1161 goto done;
1163 case BT_OPEN:
1164 case BT_BOUND:
1165 /* Can connect */
1166 break;
1168 default:
1169 err = -EBADFD;
1170 goto done;
1173 /* Set destination address and psm */
1174 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1175 l2cap_pi(sk)->psm = la.l2_psm;
1177 err = l2cap_do_connect(sk);
1178 if (err)
1179 goto done;
1181 wait:
1182 err = bt_sock_wait_state(sk, BT_CONNECTED,
1183 sock_sndtimeo(sk, flags & O_NONBLOCK));
1184 done:
1185 release_sock(sk);
1186 return err;
1189 static int l2cap_sock_listen(struct socket *sock, int backlog)
1191 struct sock *sk = sock->sk;
1192 int err = 0;
1194 BT_DBG("sk %p backlog %d", sk, backlog);
1196 lock_sock(sk);
1198 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1199 || sk->sk_state != BT_BOUND) {
1200 err = -EBADFD;
1201 goto done;
1204 switch (l2cap_pi(sk)->mode) {
1205 case L2CAP_MODE_BASIC:
1206 break;
1207 case L2CAP_MODE_ERTM:
1208 case L2CAP_MODE_STREAMING:
1209 if (enable_ertm)
1210 break;
1211 /* fall through */
1212 default:
1213 err = -ENOTSUPP;
1214 goto done;
1217 if (!l2cap_pi(sk)->psm) {
1218 bdaddr_t *src = &bt_sk(sk)->src;
1219 u16 psm;
1221 err = -EINVAL;
1223 write_lock_bh(&l2cap_sk_list.lock);
1225 for (psm = 0x1001; psm < 0x1100; psm += 2)
1226 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1227 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1228 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1229 err = 0;
1230 break;
1233 write_unlock_bh(&l2cap_sk_list.lock);
1235 if (err < 0)
1236 goto done;
1239 sk->sk_max_ack_backlog = backlog;
1240 sk->sk_ack_backlog = 0;
1241 sk->sk_state = BT_LISTEN;
1243 done:
1244 release_sock(sk);
1245 return err;
1248 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1250 DECLARE_WAITQUEUE(wait, current);
1251 struct sock *sk = sock->sk, *nsk;
1252 long timeo;
1253 int err = 0;
1255 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1257 if (sk->sk_state != BT_LISTEN) {
1258 err = -EBADFD;
1259 goto done;
1262 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1264 BT_DBG("sk %p timeo %ld", sk, timeo);
1266 /* Wait for an incoming connection. (wake-one). */
1267 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1268 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1269 set_current_state(TASK_INTERRUPTIBLE);
1270 if (!timeo) {
1271 err = -EAGAIN;
1272 break;
1275 release_sock(sk);
1276 timeo = schedule_timeout(timeo);
1277 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1279 if (sk->sk_state != BT_LISTEN) {
1280 err = -EBADFD;
1281 break;
1284 if (signal_pending(current)) {
1285 err = sock_intr_errno(timeo);
1286 break;
1289 set_current_state(TASK_RUNNING);
1290 remove_wait_queue(sk_sleep(sk), &wait);
1292 if (err)
1293 goto done;
1295 newsock->state = SS_CONNECTED;
1297 BT_DBG("new socket %p", nsk);
1299 done:
1300 release_sock(sk);
1301 return err;
1304 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1306 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1307 struct sock *sk = sock->sk;
1309 BT_DBG("sock %p, sk %p", sock, sk);
1311 addr->sa_family = AF_BLUETOOTH;
1312 *len = sizeof(struct sockaddr_l2);
1314 if (peer) {
1315 la->l2_psm = l2cap_pi(sk)->psm;
1316 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1317 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1318 } else {
1319 la->l2_psm = l2cap_pi(sk)->sport;
1320 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1321 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1324 return 0;
1327 static int __l2cap_wait_ack(struct sock *sk)
1329 DECLARE_WAITQUEUE(wait, current);
1330 int err = 0;
1331 int timeo = HZ/5;
1333 add_wait_queue(sk_sleep(sk), &wait);
1334 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1335 set_current_state(TASK_INTERRUPTIBLE);
1337 if (!timeo)
1338 timeo = HZ/5;
1340 if (signal_pending(current)) {
1341 err = sock_intr_errno(timeo);
1342 break;
1345 release_sock(sk);
1346 timeo = schedule_timeout(timeo);
1347 lock_sock(sk);
1349 err = sock_error(sk);
1350 if (err)
1351 break;
1353 set_current_state(TASK_RUNNING);
1354 remove_wait_queue(sk_sleep(sk), &wait);
1355 return err;
1358 static void l2cap_monitor_timeout(unsigned long arg)
1360 struct sock *sk = (void *) arg;
1362 BT_DBG("sk %p", sk);
1364 bh_lock_sock(sk);
1365 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1366 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1367 bh_unlock_sock(sk);
1368 return;
1371 l2cap_pi(sk)->retry_count++;
1372 __mod_monitor_timer();
1374 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1375 bh_unlock_sock(sk);
1378 static void l2cap_retrans_timeout(unsigned long arg)
1380 struct sock *sk = (void *) arg;
1382 BT_DBG("sk %p", sk);
1384 bh_lock_sock(sk);
1385 l2cap_pi(sk)->retry_count = 1;
1386 __mod_monitor_timer();
1388 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1390 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1391 bh_unlock_sock(sk);
1394 static void l2cap_drop_acked_frames(struct sock *sk)
1396 struct sk_buff *skb;
1398 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1399 l2cap_pi(sk)->unacked_frames) {
1400 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1401 break;
1403 skb = skb_dequeue(TX_QUEUE(sk));
1404 kfree_skb(skb);
1406 l2cap_pi(sk)->unacked_frames--;
1409 if (!l2cap_pi(sk)->unacked_frames)
1410 del_timer(&l2cap_pi(sk)->retrans_timer);
1413 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1415 struct l2cap_pinfo *pi = l2cap_pi(sk);
1417 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1419 hci_send_acl(pi->conn->hcon, skb, 0);
1422 static int l2cap_streaming_send(struct sock *sk)
1424 struct sk_buff *skb, *tx_skb;
1425 struct l2cap_pinfo *pi = l2cap_pi(sk);
1426 u16 control, fcs;
1428 while ((skb = sk->sk_send_head)) {
1429 tx_skb = skb_clone(skb, GFP_ATOMIC);
1431 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1432 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1433 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1435 if (pi->fcs == L2CAP_FCS_CRC16) {
1436 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1437 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1440 l2cap_do_send(sk, tx_skb);
1442 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1444 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1445 sk->sk_send_head = NULL;
1446 else
1447 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1449 skb = skb_dequeue(TX_QUEUE(sk));
1450 kfree_skb(skb);
1452 return 0;
1455 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1457 struct l2cap_pinfo *pi = l2cap_pi(sk);
1458 struct sk_buff *skb, *tx_skb;
1459 u16 control, fcs;
1461 skb = skb_peek(TX_QUEUE(sk));
1462 if (!skb)
1463 return;
1465 do {
1466 if (bt_cb(skb)->tx_seq == tx_seq)
1467 break;
1469 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1470 return;
1472 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1474 if (pi->remote_max_tx &&
1475 bt_cb(skb)->retries == pi->remote_max_tx) {
1476 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1477 return;
1480 tx_skb = skb_clone(skb, GFP_ATOMIC);
1481 bt_cb(skb)->retries++;
1482 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1484 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1485 control |= L2CAP_CTRL_FINAL;
1486 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1489 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1490 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1492 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1494 if (pi->fcs == L2CAP_FCS_CRC16) {
1495 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1496 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1499 l2cap_do_send(sk, tx_skb);
1502 static int l2cap_ertm_send(struct sock *sk)
1504 struct sk_buff *skb, *tx_skb;
1505 struct l2cap_pinfo *pi = l2cap_pi(sk);
1506 u16 control, fcs;
1507 int nsent = 0;
1509 if (sk->sk_state != BT_CONNECTED)
1510 return -ENOTCONN;
1512 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1514 if (pi->remote_max_tx &&
1515 bt_cb(skb)->retries == pi->remote_max_tx) {
1516 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1517 break;
1520 tx_skb = skb_clone(skb, GFP_ATOMIC);
1522 bt_cb(skb)->retries++;
1524 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1525 control &= L2CAP_CTRL_SAR;
1527 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1528 control |= L2CAP_CTRL_FINAL;
1529 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1531 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1532 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1533 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1536 if (pi->fcs == L2CAP_FCS_CRC16) {
1537 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1538 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1541 l2cap_do_send(sk, tx_skb);
1543 __mod_retrans_timer();
1545 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1546 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1548 pi->unacked_frames++;
1549 pi->frames_sent++;
1551 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1552 sk->sk_send_head = NULL;
1553 else
1554 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1556 nsent++;
1559 return nsent;
1562 static int l2cap_retransmit_frames(struct sock *sk)
1564 struct l2cap_pinfo *pi = l2cap_pi(sk);
1565 int ret;
1567 if (!skb_queue_empty(TX_QUEUE(sk)))
1568 sk->sk_send_head = TX_QUEUE(sk)->next;
1570 pi->next_tx_seq = pi->expected_ack_seq;
1571 ret = l2cap_ertm_send(sk);
1572 return ret;
1575 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1577 struct sock *sk = (struct sock *)pi;
1578 u16 control = 0;
1580 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1582 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1583 control |= L2CAP_SUPER_RCV_NOT_READY;
1584 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1585 l2cap_send_sframe(pi, control);
1586 return;
1589 if (l2cap_ertm_send(sk) > 0)
1590 return;
1592 control |= L2CAP_SUPER_RCV_READY;
1593 l2cap_send_sframe(pi, control);
1596 static void l2cap_send_srejtail(struct sock *sk)
1598 struct srej_list *tail;
1599 u16 control;
1601 control = L2CAP_SUPER_SELECT_REJECT;
1602 control |= L2CAP_CTRL_FINAL;
1604 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1605 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1607 l2cap_send_sframe(l2cap_pi(sk), control);
1610 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1612 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1613 struct sk_buff **frag;
1614 int err, sent = 0;
1616 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1617 return -EFAULT;
1619 sent += count;
1620 len -= count;
1622 /* Continuation fragments (no L2CAP header) */
1623 frag = &skb_shinfo(skb)->frag_list;
1624 while (len) {
1625 count = min_t(unsigned int, conn->mtu, len);
1627 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1628 if (!*frag)
1629 return -EFAULT;
1630 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1631 return -EFAULT;
1633 sent += count;
1634 len -= count;
1636 frag = &(*frag)->next;
1639 return sent;
1642 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1644 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1645 struct sk_buff *skb;
1646 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1647 struct l2cap_hdr *lh;
1649 BT_DBG("sk %p len %d", sk, (int)len);
1651 count = min_t(unsigned int, (conn->mtu - hlen), len);
1652 skb = bt_skb_send_alloc(sk, count + hlen,
1653 msg->msg_flags & MSG_DONTWAIT, &err);
1654 if (!skb)
1655 return ERR_PTR(-ENOMEM);
1657 /* Create L2CAP header */
1658 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1659 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1660 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1661 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1663 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1664 if (unlikely(err < 0)) {
1665 kfree_skb(skb);
1666 return ERR_PTR(err);
1668 return skb;
1671 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1673 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1674 struct sk_buff *skb;
1675 int err, count, hlen = L2CAP_HDR_SIZE;
1676 struct l2cap_hdr *lh;
1678 BT_DBG("sk %p len %d", sk, (int)len);
1680 count = min_t(unsigned int, (conn->mtu - hlen), len);
1681 skb = bt_skb_send_alloc(sk, count + hlen,
1682 msg->msg_flags & MSG_DONTWAIT, &err);
1683 if (!skb)
1684 return ERR_PTR(-ENOMEM);
1686 /* Create L2CAP header */
1687 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1688 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1689 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1691 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1692 if (unlikely(err < 0)) {
1693 kfree_skb(skb);
1694 return ERR_PTR(err);
1696 return skb;
1699 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1701 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1702 struct sk_buff *skb;
1703 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1704 struct l2cap_hdr *lh;
1706 BT_DBG("sk %p len %d", sk, (int)len);
1708 if (!conn)
1709 return ERR_PTR(-ENOTCONN);
1711 if (sdulen)
1712 hlen += 2;
1714 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1715 hlen += 2;
1717 count = min_t(unsigned int, (conn->mtu - hlen), len);
1718 skb = bt_skb_send_alloc(sk, count + hlen,
1719 msg->msg_flags & MSG_DONTWAIT, &err);
1720 if (!skb)
1721 return ERR_PTR(-ENOMEM);
1723 /* Create L2CAP header */
1724 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1725 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1726 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1727 put_unaligned_le16(control, skb_put(skb, 2));
1728 if (sdulen)
1729 put_unaligned_le16(sdulen, skb_put(skb, 2));
1731 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1732 if (unlikely(err < 0)) {
1733 kfree_skb(skb);
1734 return ERR_PTR(err);
1737 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1738 put_unaligned_le16(0, skb_put(skb, 2));
1740 bt_cb(skb)->retries = 0;
1741 return skb;
1744 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1746 struct l2cap_pinfo *pi = l2cap_pi(sk);
1747 struct sk_buff *skb;
1748 struct sk_buff_head sar_queue;
1749 u16 control;
1750 size_t size = 0;
1752 skb_queue_head_init(&sar_queue);
1753 control = L2CAP_SDU_START;
1754 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1755 if (IS_ERR(skb))
1756 return PTR_ERR(skb);
1758 __skb_queue_tail(&sar_queue, skb);
1759 len -= pi->remote_mps;
1760 size += pi->remote_mps;
1762 while (len > 0) {
1763 size_t buflen;
1765 if (len > pi->remote_mps) {
1766 control = L2CAP_SDU_CONTINUE;
1767 buflen = pi->remote_mps;
1768 } else {
1769 control = L2CAP_SDU_END;
1770 buflen = len;
1773 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1774 if (IS_ERR(skb)) {
1775 skb_queue_purge(&sar_queue);
1776 return PTR_ERR(skb);
1779 __skb_queue_tail(&sar_queue, skb);
1780 len -= buflen;
1781 size += buflen;
1783 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1784 if (sk->sk_send_head == NULL)
1785 sk->sk_send_head = sar_queue.next;
1787 return size;
1790 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1792 struct sock *sk = sock->sk;
1793 struct l2cap_pinfo *pi = l2cap_pi(sk);
1794 struct sk_buff *skb;
1795 u16 control;
1796 int err;
1798 BT_DBG("sock %p, sk %p", sock, sk);
1800 err = sock_error(sk);
1801 if (err)
1802 return err;
1804 if (msg->msg_flags & MSG_OOB)
1805 return -EOPNOTSUPP;
1807 lock_sock(sk);
1809 if (sk->sk_state != BT_CONNECTED) {
1810 err = -ENOTCONN;
1811 goto done;
1814 /* Connectionless channel */
1815 if (sk->sk_type == SOCK_DGRAM) {
1816 skb = l2cap_create_connless_pdu(sk, msg, len);
1817 if (IS_ERR(skb)) {
1818 err = PTR_ERR(skb);
1819 } else {
1820 l2cap_do_send(sk, skb);
1821 err = len;
1823 goto done;
1826 switch (pi->mode) {
1827 case L2CAP_MODE_BASIC:
1828 /* Check outgoing MTU */
1829 if (len > pi->omtu) {
1830 err = -EINVAL;
1831 goto done;
1834 /* Create a basic PDU */
1835 skb = l2cap_create_basic_pdu(sk, msg, len);
1836 if (IS_ERR(skb)) {
1837 err = PTR_ERR(skb);
1838 goto done;
1841 l2cap_do_send(sk, skb);
1842 err = len;
1843 break;
1845 case L2CAP_MODE_ERTM:
1846 case L2CAP_MODE_STREAMING:
1847 /* Entire SDU fits into one PDU */
1848 if (len <= pi->remote_mps) {
1849 control = L2CAP_SDU_UNSEGMENTED;
1850 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1851 if (IS_ERR(skb)) {
1852 err = PTR_ERR(skb);
1853 goto done;
1855 __skb_queue_tail(TX_QUEUE(sk), skb);
1857 if (sk->sk_send_head == NULL)
1858 sk->sk_send_head = skb;
1860 } else {
1861 /* Segment SDU into multiples PDUs */
1862 err = l2cap_sar_segment_sdu(sk, msg, len);
1863 if (err < 0)
1864 goto done;
1867 if (pi->mode == L2CAP_MODE_STREAMING) {
1868 err = l2cap_streaming_send(sk);
1869 } else {
1870 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1871 pi->conn_state && L2CAP_CONN_WAIT_F) {
1872 err = len;
1873 break;
1875 err = l2cap_ertm_send(sk);
1878 if (err >= 0)
1879 err = len;
1880 break;
1882 default:
1883 BT_DBG("bad state %1.1x", pi->mode);
1884 err = -EINVAL;
1887 done:
1888 release_sock(sk);
1889 return err;
1892 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1894 struct sock *sk = sock->sk;
1896 lock_sock(sk);
1898 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1899 struct l2cap_conn_rsp rsp;
1901 sk->sk_state = BT_CONFIG;
1903 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1904 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1905 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1906 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1907 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1908 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1910 release_sock(sk);
1911 return 0;
1914 release_sock(sk);
1916 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1919 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1921 struct sock *sk = sock->sk;
1922 struct l2cap_options opts;
1923 int len, err = 0;
1924 u32 opt;
1926 BT_DBG("sk %p", sk);
1928 lock_sock(sk);
1930 switch (optname) {
1931 case L2CAP_OPTIONS:
1932 opts.imtu = l2cap_pi(sk)->imtu;
1933 opts.omtu = l2cap_pi(sk)->omtu;
1934 opts.flush_to = l2cap_pi(sk)->flush_to;
1935 opts.mode = l2cap_pi(sk)->mode;
1936 opts.fcs = l2cap_pi(sk)->fcs;
1937 opts.max_tx = l2cap_pi(sk)->max_tx;
1938 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1940 len = min_t(unsigned int, sizeof(opts), optlen);
1941 if (copy_from_user((char *) &opts, optval, len)) {
1942 err = -EFAULT;
1943 break;
1946 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1947 err = -EINVAL;
1948 break;
1951 l2cap_pi(sk)->mode = opts.mode;
1952 switch (l2cap_pi(sk)->mode) {
1953 case L2CAP_MODE_BASIC:
1954 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1955 break;
1956 case L2CAP_MODE_ERTM:
1957 case L2CAP_MODE_STREAMING:
1958 if (enable_ertm)
1959 break;
1960 /* fall through */
1961 default:
1962 err = -EINVAL;
1963 break;
1966 l2cap_pi(sk)->imtu = opts.imtu;
1967 l2cap_pi(sk)->omtu = opts.omtu;
1968 l2cap_pi(sk)->fcs = opts.fcs;
1969 l2cap_pi(sk)->max_tx = opts.max_tx;
1970 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1971 break;
1973 case L2CAP_LM:
1974 if (get_user(opt, (u32 __user *) optval)) {
1975 err = -EFAULT;
1976 break;
1979 if (opt & L2CAP_LM_AUTH)
1980 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1981 if (opt & L2CAP_LM_ENCRYPT)
1982 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1983 if (opt & L2CAP_LM_SECURE)
1984 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1986 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1987 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1988 break;
1990 default:
1991 err = -ENOPROTOOPT;
1992 break;
1995 release_sock(sk);
1996 return err;
1999 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2001 struct sock *sk = sock->sk;
2002 struct bt_security sec;
2003 int len, err = 0;
2004 u32 opt;
2006 BT_DBG("sk %p", sk);
2008 if (level == SOL_L2CAP)
2009 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2011 if (level != SOL_BLUETOOTH)
2012 return -ENOPROTOOPT;
2014 lock_sock(sk);
2016 switch (optname) {
2017 case BT_SECURITY:
2018 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2019 && sk->sk_type != SOCK_RAW) {
2020 err = -EINVAL;
2021 break;
2024 sec.level = BT_SECURITY_LOW;
2026 len = min_t(unsigned int, sizeof(sec), optlen);
2027 if (copy_from_user((char *) &sec, optval, len)) {
2028 err = -EFAULT;
2029 break;
2032 if (sec.level < BT_SECURITY_LOW ||
2033 sec.level > BT_SECURITY_HIGH) {
2034 err = -EINVAL;
2035 break;
2038 l2cap_pi(sk)->sec_level = sec.level;
2039 break;
2041 case BT_DEFER_SETUP:
2042 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2043 err = -EINVAL;
2044 break;
2047 if (get_user(opt, (u32 __user *) optval)) {
2048 err = -EFAULT;
2049 break;
2052 bt_sk(sk)->defer_setup = opt;
2053 break;
2055 default:
2056 err = -ENOPROTOOPT;
2057 break;
2060 release_sock(sk);
2061 return err;
2064 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2066 struct sock *sk = sock->sk;
2067 struct l2cap_options opts;
2068 struct l2cap_conninfo cinfo;
2069 int len, err = 0;
2070 u32 opt;
2072 BT_DBG("sk %p", sk);
2074 if (get_user(len, optlen))
2075 return -EFAULT;
2077 lock_sock(sk);
2079 switch (optname) {
2080 case L2CAP_OPTIONS:
2081 opts.imtu = l2cap_pi(sk)->imtu;
2082 opts.omtu = l2cap_pi(sk)->omtu;
2083 opts.flush_to = l2cap_pi(sk)->flush_to;
2084 opts.mode = l2cap_pi(sk)->mode;
2085 opts.fcs = l2cap_pi(sk)->fcs;
2086 opts.max_tx = l2cap_pi(sk)->max_tx;
2087 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2089 len = min_t(unsigned int, len, sizeof(opts));
2090 if (copy_to_user(optval, (char *) &opts, len))
2091 err = -EFAULT;
2093 break;
2095 case L2CAP_LM:
2096 switch (l2cap_pi(sk)->sec_level) {
2097 case BT_SECURITY_LOW:
2098 opt = L2CAP_LM_AUTH;
2099 break;
2100 case BT_SECURITY_MEDIUM:
2101 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2102 break;
2103 case BT_SECURITY_HIGH:
2104 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2105 L2CAP_LM_SECURE;
2106 break;
2107 default:
2108 opt = 0;
2109 break;
2112 if (l2cap_pi(sk)->role_switch)
2113 opt |= L2CAP_LM_MASTER;
2115 if (l2cap_pi(sk)->force_reliable)
2116 opt |= L2CAP_LM_RELIABLE;
2118 if (put_user(opt, (u32 __user *) optval))
2119 err = -EFAULT;
2120 break;
2122 case L2CAP_CONNINFO:
2123 if (sk->sk_state != BT_CONNECTED &&
2124 !(sk->sk_state == BT_CONNECT2 &&
2125 bt_sk(sk)->defer_setup)) {
2126 err = -ENOTCONN;
2127 break;
2130 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2131 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2133 len = min_t(unsigned int, len, sizeof(cinfo));
2134 if (copy_to_user(optval, (char *) &cinfo, len))
2135 err = -EFAULT;
2137 break;
2139 default:
2140 err = -ENOPROTOOPT;
2141 break;
2144 release_sock(sk);
2145 return err;
2148 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2150 struct sock *sk = sock->sk;
2151 struct bt_security sec;
2152 int len, err = 0;
2154 BT_DBG("sk %p", sk);
2156 if (level == SOL_L2CAP)
2157 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2159 if (level != SOL_BLUETOOTH)
2160 return -ENOPROTOOPT;
2162 if (get_user(len, optlen))
2163 return -EFAULT;
2165 lock_sock(sk);
2167 switch (optname) {
2168 case BT_SECURITY:
2169 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2170 && sk->sk_type != SOCK_RAW) {
2171 err = -EINVAL;
2172 break;
2175 sec.level = l2cap_pi(sk)->sec_level;
2177 len = min_t(unsigned int, len, sizeof(sec));
2178 if (copy_to_user(optval, (char *) &sec, len))
2179 err = -EFAULT;
2181 break;
2183 case BT_DEFER_SETUP:
2184 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2185 err = -EINVAL;
2186 break;
2189 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2190 err = -EFAULT;
2192 break;
2194 default:
2195 err = -ENOPROTOOPT;
2196 break;
2199 release_sock(sk);
2200 return err;
2203 static int l2cap_sock_shutdown(struct socket *sock, int how)
2205 struct sock *sk = sock->sk;
2206 int err = 0;
2208 BT_DBG("sock %p, sk %p", sock, sk);
2210 if (!sk)
2211 return 0;
2213 lock_sock(sk);
2214 if (!sk->sk_shutdown) {
2215 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2216 err = __l2cap_wait_ack(sk);
2218 sk->sk_shutdown = SHUTDOWN_MASK;
2219 l2cap_sock_clear_timer(sk);
2220 __l2cap_sock_close(sk, 0);
2222 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2223 err = bt_sock_wait_state(sk, BT_CLOSED,
2224 sk->sk_lingertime);
2227 if (!err && sk->sk_err)
2228 err = -sk->sk_err;
2230 release_sock(sk);
2231 return err;
2234 static int l2cap_sock_release(struct socket *sock)
2236 struct sock *sk = sock->sk;
2237 int err;
2239 BT_DBG("sock %p, sk %p", sock, sk);
2241 if (!sk)
2242 return 0;
2244 err = l2cap_sock_shutdown(sock, 2);
2246 sock_orphan(sk);
2247 l2cap_sock_kill(sk);
2248 return err;
2251 static void l2cap_chan_ready(struct sock *sk)
2253 struct sock *parent = bt_sk(sk)->parent;
2255 BT_DBG("sk %p, parent %p", sk, parent);
2257 l2cap_pi(sk)->conf_state = 0;
2258 l2cap_sock_clear_timer(sk);
2260 if (!parent) {
2261 /* Outgoing channel.
2262 * Wake up socket sleeping on connect.
2264 sk->sk_state = BT_CONNECTED;
2265 sk->sk_state_change(sk);
2266 } else {
2267 /* Incoming channel.
2268 * Wake up socket sleeping on accept.
2270 parent->sk_data_ready(parent, 0);
2274 /* Copy frame to all raw sockets on that connection */
2275 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2277 struct l2cap_chan_list *l = &conn->chan_list;
2278 struct sk_buff *nskb;
2279 struct sock *sk;
2281 BT_DBG("conn %p", conn);
2283 read_lock(&l->lock);
2284 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2285 if (sk->sk_type != SOCK_RAW)
2286 continue;
2288 /* Don't send frame to the socket it came from */
2289 if (skb->sk == sk)
2290 continue;
2291 nskb = skb_clone(skb, GFP_ATOMIC);
2292 if (!nskb)
2293 continue;
2295 if (sock_queue_rcv_skb(sk, nskb))
2296 kfree_skb(nskb);
2298 read_unlock(&l->lock);
2301 /* ---- L2CAP signalling commands ---- */
2302 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2303 u8 code, u8 ident, u16 dlen, void *data)
2305 struct sk_buff *skb, **frag;
2306 struct l2cap_cmd_hdr *cmd;
2307 struct l2cap_hdr *lh;
2308 int len, count;
2310 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2311 conn, code, ident, dlen);
2313 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2314 count = min_t(unsigned int, conn->mtu, len);
2316 skb = bt_skb_alloc(count, GFP_ATOMIC);
2317 if (!skb)
2318 return NULL;
2320 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2321 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2322 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2324 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2325 cmd->code = code;
2326 cmd->ident = ident;
2327 cmd->len = cpu_to_le16(dlen);
2329 if (dlen) {
2330 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2331 memcpy(skb_put(skb, count), data, count);
2332 data += count;
2335 len -= skb->len;
2337 /* Continuation fragments (no L2CAP header) */
2338 frag = &skb_shinfo(skb)->frag_list;
2339 while (len) {
2340 count = min_t(unsigned int, conn->mtu, len);
2342 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2343 if (!*frag)
2344 goto fail;
2346 memcpy(skb_put(*frag, count), data, count);
2348 len -= count;
2349 data += count;
2351 frag = &(*frag)->next;
2354 return skb;
2356 fail:
2357 kfree_skb(skb);
2358 return NULL;
2361 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2363 struct l2cap_conf_opt *opt = *ptr;
2364 int len;
2366 len = L2CAP_CONF_OPT_SIZE + opt->len;
2367 *ptr += len;
2369 *type = opt->type;
2370 *olen = opt->len;
2372 switch (opt->len) {
2373 case 1:
2374 *val = *((u8 *) opt->val);
2375 break;
2377 case 2:
2378 *val = __le16_to_cpu(*((__le16 *) opt->val));
2379 break;
2381 case 4:
2382 *val = __le32_to_cpu(*((__le32 *) opt->val));
2383 break;
2385 default:
2386 *val = (unsigned long) opt->val;
2387 break;
2390 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2391 return len;
2394 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2396 struct l2cap_conf_opt *opt = *ptr;
2398 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2400 opt->type = type;
2401 opt->len = len;
2403 switch (len) {
2404 case 1:
2405 *((u8 *) opt->val) = val;
2406 break;
2408 case 2:
2409 *((__le16 *) opt->val) = cpu_to_le16(val);
2410 break;
2412 case 4:
2413 *((__le32 *) opt->val) = cpu_to_le32(val);
2414 break;
2416 default:
2417 memcpy(opt->val, (void *) val, len);
2418 break;
2421 *ptr += L2CAP_CONF_OPT_SIZE + len;
2424 static void l2cap_ack_timeout(unsigned long arg)
2426 struct sock *sk = (void *) arg;
2428 bh_lock_sock(sk);
2429 l2cap_send_ack(l2cap_pi(sk));
2430 bh_unlock_sock(sk);
2433 static inline void l2cap_ertm_init(struct sock *sk)
2435 l2cap_pi(sk)->expected_ack_seq = 0;
2436 l2cap_pi(sk)->unacked_frames = 0;
2437 l2cap_pi(sk)->buffer_seq = 0;
2438 l2cap_pi(sk)->num_acked = 0;
2439 l2cap_pi(sk)->frames_sent = 0;
2441 setup_timer(&l2cap_pi(sk)->retrans_timer,
2442 l2cap_retrans_timeout, (unsigned long) sk);
2443 setup_timer(&l2cap_pi(sk)->monitor_timer,
2444 l2cap_monitor_timeout, (unsigned long) sk);
2445 setup_timer(&l2cap_pi(sk)->ack_timer,
2446 l2cap_ack_timeout, (unsigned long) sk);
2448 __skb_queue_head_init(SREJ_QUEUE(sk));
2449 __skb_queue_head_init(BUSY_QUEUE(sk));
2451 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2453 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2456 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2458 switch (mode) {
2459 case L2CAP_MODE_STREAMING:
2460 case L2CAP_MODE_ERTM:
2461 if (l2cap_mode_supported(mode, remote_feat_mask))
2462 return mode;
2463 /* fall through */
2464 default:
2465 return L2CAP_MODE_BASIC;
2469 static int l2cap_build_conf_req(struct sock *sk, void *data)
2471 struct l2cap_pinfo *pi = l2cap_pi(sk);
2472 struct l2cap_conf_req *req = data;
2473 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2474 void *ptr = req->data;
2476 BT_DBG("sk %p", sk);
2478 if (pi->num_conf_req || pi->num_conf_rsp)
2479 goto done;
2481 switch (pi->mode) {
2482 case L2CAP_MODE_STREAMING:
2483 case L2CAP_MODE_ERTM:
2484 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2485 break;
2487 /* fall through */
2488 default:
2489 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2490 break;
2493 done:
2494 switch (pi->mode) {
2495 case L2CAP_MODE_BASIC:
2496 if (pi->imtu != L2CAP_DEFAULT_MTU)
2497 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2499 rfc.mode = L2CAP_MODE_BASIC;
2500 rfc.txwin_size = 0;
2501 rfc.max_transmit = 0;
2502 rfc.retrans_timeout = 0;
2503 rfc.monitor_timeout = 0;
2504 rfc.max_pdu_size = 0;
2506 break;
2508 case L2CAP_MODE_ERTM:
2509 rfc.mode = L2CAP_MODE_ERTM;
2510 rfc.txwin_size = pi->tx_win;
2511 rfc.max_transmit = pi->max_tx;
2512 rfc.retrans_timeout = 0;
2513 rfc.monitor_timeout = 0;
2514 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2515 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2516 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2518 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2519 break;
2521 if (pi->fcs == L2CAP_FCS_NONE ||
2522 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2523 pi->fcs = L2CAP_FCS_NONE;
2524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2526 break;
2528 case L2CAP_MODE_STREAMING:
2529 rfc.mode = L2CAP_MODE_STREAMING;
2530 rfc.txwin_size = 0;
2531 rfc.max_transmit = 0;
2532 rfc.retrans_timeout = 0;
2533 rfc.monitor_timeout = 0;
2534 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2535 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2536 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2538 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2539 break;
2541 if (pi->fcs == L2CAP_FCS_NONE ||
2542 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2543 pi->fcs = L2CAP_FCS_NONE;
2544 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2546 break;
2549 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2550 (unsigned long) &rfc);
2552 /* FIXME: Need actual value of the flush timeout */
2553 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2554 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2556 req->dcid = cpu_to_le16(pi->dcid);
2557 req->flags = cpu_to_le16(0);
2559 return ptr - data;
2562 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2564 struct l2cap_pinfo *pi = l2cap_pi(sk);
2565 struct l2cap_conf_rsp *rsp = data;
2566 void *ptr = rsp->data;
2567 void *req = pi->conf_req;
2568 int len = pi->conf_len;
2569 int type, hint, olen;
2570 unsigned long val;
2571 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2572 u16 mtu = L2CAP_DEFAULT_MTU;
2573 u16 result = L2CAP_CONF_SUCCESS;
2575 BT_DBG("sk %p", sk);
2577 while (len >= L2CAP_CONF_OPT_SIZE) {
2578 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2580 hint = type & L2CAP_CONF_HINT;
2581 type &= L2CAP_CONF_MASK;
2583 switch (type) {
2584 case L2CAP_CONF_MTU:
2585 mtu = val;
2586 break;
2588 case L2CAP_CONF_FLUSH_TO:
2589 pi->flush_to = val;
2590 break;
2592 case L2CAP_CONF_QOS:
2593 break;
2595 case L2CAP_CONF_RFC:
2596 if (olen == sizeof(rfc))
2597 memcpy(&rfc, (void *) val, olen);
2598 break;
2600 case L2CAP_CONF_FCS:
2601 if (val == L2CAP_FCS_NONE)
2602 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2604 break;
2606 default:
2607 if (hint)
2608 break;
2610 result = L2CAP_CONF_UNKNOWN;
2611 *((u8 *) ptr++) = type;
2612 break;
2616 if (pi->num_conf_rsp || pi->num_conf_req)
2617 goto done;
2619 switch (pi->mode) {
2620 case L2CAP_MODE_STREAMING:
2621 case L2CAP_MODE_ERTM:
2622 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2623 pi->mode = l2cap_select_mode(rfc.mode,
2624 pi->conn->feat_mask);
2625 break;
2628 if (pi->mode != rfc.mode)
2629 return -ECONNREFUSED;
2631 break;
2634 done:
2635 if (pi->mode != rfc.mode) {
2636 result = L2CAP_CONF_UNACCEPT;
2637 rfc.mode = pi->mode;
2639 if (pi->num_conf_rsp == 1)
2640 return -ECONNREFUSED;
2642 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2643 sizeof(rfc), (unsigned long) &rfc);
2647 if (result == L2CAP_CONF_SUCCESS) {
2648 /* Configure output options and let the other side know
2649 * which ones we don't like. */
2651 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2652 result = L2CAP_CONF_UNACCEPT;
2653 else {
2654 pi->omtu = mtu;
2655 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2657 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2659 switch (rfc.mode) {
2660 case L2CAP_MODE_BASIC:
2661 pi->fcs = L2CAP_FCS_NONE;
2662 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2663 break;
2665 case L2CAP_MODE_ERTM:
2666 pi->remote_tx_win = rfc.txwin_size;
2667 pi->remote_max_tx = rfc.max_transmit;
2668 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2669 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2671 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2673 rfc.retrans_timeout =
2674 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2675 rfc.monitor_timeout =
2676 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2678 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2680 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2681 sizeof(rfc), (unsigned long) &rfc);
2683 break;
2685 case L2CAP_MODE_STREAMING:
2686 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2687 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2689 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2691 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2693 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2694 sizeof(rfc), (unsigned long) &rfc);
2696 break;
2698 default:
2699 result = L2CAP_CONF_UNACCEPT;
2701 memset(&rfc, 0, sizeof(rfc));
2702 rfc.mode = pi->mode;
2705 if (result == L2CAP_CONF_SUCCESS)
2706 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2708 rsp->scid = cpu_to_le16(pi->dcid);
2709 rsp->result = cpu_to_le16(result);
2710 rsp->flags = cpu_to_le16(0x0000);
2712 return ptr - data;
2715 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2717 struct l2cap_pinfo *pi = l2cap_pi(sk);
2718 struct l2cap_conf_req *req = data;
2719 void *ptr = req->data;
2720 int type, olen;
2721 unsigned long val;
2722 struct l2cap_conf_rfc rfc;
2724 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2726 while (len >= L2CAP_CONF_OPT_SIZE) {
2727 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2729 switch (type) {
2730 case L2CAP_CONF_MTU:
2731 if (val < L2CAP_DEFAULT_MIN_MTU) {
2732 *result = L2CAP_CONF_UNACCEPT;
2733 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2734 } else
2735 pi->omtu = val;
2736 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2737 break;
2739 case L2CAP_CONF_FLUSH_TO:
2740 pi->flush_to = val;
2741 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2742 2, pi->flush_to);
2743 break;
2745 case L2CAP_CONF_RFC:
2746 if (olen == sizeof(rfc))
2747 memcpy(&rfc, (void *)val, olen);
2749 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2750 rfc.mode != pi->mode)
2751 return -ECONNREFUSED;
2753 pi->fcs = 0;
2755 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2756 sizeof(rfc), (unsigned long) &rfc);
2757 break;
2761 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2762 return -ECONNREFUSED;
2764 pi->mode = rfc.mode;
2766 if (*result == L2CAP_CONF_SUCCESS) {
2767 switch (rfc.mode) {
2768 case L2CAP_MODE_ERTM:
2769 pi->remote_tx_win = rfc.txwin_size;
2770 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2771 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2772 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2773 break;
2774 case L2CAP_MODE_STREAMING:
2775 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2779 req->dcid = cpu_to_le16(pi->dcid);
2780 req->flags = cpu_to_le16(0x0000);
2782 return ptr - data;
2785 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2787 struct l2cap_conf_rsp *rsp = data;
2788 void *ptr = rsp->data;
2790 BT_DBG("sk %p", sk);
2792 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2793 rsp->result = cpu_to_le16(result);
2794 rsp->flags = cpu_to_le16(flags);
2796 return ptr - data;
2799 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2801 struct l2cap_pinfo *pi = l2cap_pi(sk);
2802 int type, olen;
2803 unsigned long val;
2804 struct l2cap_conf_rfc rfc;
2806 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2808 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2809 return;
2811 while (len >= L2CAP_CONF_OPT_SIZE) {
2812 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2814 switch (type) {
2815 case L2CAP_CONF_RFC:
2816 if (olen == sizeof(rfc))
2817 memcpy(&rfc, (void *)val, olen);
2818 goto done;
2822 done:
2823 switch (rfc.mode) {
2824 case L2CAP_MODE_ERTM:
2825 pi->remote_tx_win = rfc.txwin_size;
2826 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2827 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2828 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2829 break;
2830 case L2CAP_MODE_STREAMING:
2831 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2835 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2837 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2839 if (rej->reason != 0x0000)
2840 return 0;
2842 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2843 cmd->ident == conn->info_ident) {
2844 del_timer(&conn->info_timer);
2846 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2847 conn->info_ident = 0;
2849 l2cap_conn_start(conn);
2852 return 0;
2855 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2857 struct l2cap_chan_list *list = &conn->chan_list;
2858 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2859 struct l2cap_conn_rsp rsp;
2860 struct sock *sk, *parent;
2861 int result, status = L2CAP_CS_NO_INFO;
2863 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2864 __le16 psm = req->psm;
2866 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2868 /* Check if we have socket listening on psm */
2869 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2870 if (!parent) {
2871 result = L2CAP_CR_BAD_PSM;
2872 goto sendresp;
2875 /* Check if the ACL is secure enough (if not SDP) */
2876 if (psm != cpu_to_le16(0x0001) &&
2877 !hci_conn_check_link_mode(conn->hcon)) {
2878 conn->disc_reason = 0x05;
2879 result = L2CAP_CR_SEC_BLOCK;
2880 goto response;
2883 result = L2CAP_CR_NO_MEM;
2885 /* Check for backlog size */
2886 if (sk_acceptq_is_full(parent)) {
2887 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2888 goto response;
2891 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2892 if (!sk)
2893 goto response;
2895 write_lock_bh(&list->lock);
2897 /* Check if we already have channel with that dcid */
2898 if (__l2cap_get_chan_by_dcid(list, scid)) {
2899 write_unlock_bh(&list->lock);
2900 sock_set_flag(sk, SOCK_ZAPPED);
2901 l2cap_sock_kill(sk);
2902 goto response;
2905 hci_conn_hold(conn->hcon);
2907 l2cap_sock_init(sk, parent);
2908 bacpy(&bt_sk(sk)->src, conn->src);
2909 bacpy(&bt_sk(sk)->dst, conn->dst);
2910 l2cap_pi(sk)->psm = psm;
2911 l2cap_pi(sk)->dcid = scid;
2913 __l2cap_chan_add(conn, sk, parent);
2914 dcid = l2cap_pi(sk)->scid;
2916 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2918 l2cap_pi(sk)->ident = cmd->ident;
2920 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2921 if (l2cap_check_security(sk)) {
2922 if (bt_sk(sk)->defer_setup) {
2923 sk->sk_state = BT_CONNECT2;
2924 result = L2CAP_CR_PEND;
2925 status = L2CAP_CS_AUTHOR_PEND;
2926 parent->sk_data_ready(parent, 0);
2927 } else {
2928 sk->sk_state = BT_CONFIG;
2929 result = L2CAP_CR_SUCCESS;
2930 status = L2CAP_CS_NO_INFO;
2932 } else {
2933 sk->sk_state = BT_CONNECT2;
2934 result = L2CAP_CR_PEND;
2935 status = L2CAP_CS_AUTHEN_PEND;
2937 } else {
2938 sk->sk_state = BT_CONNECT2;
2939 result = L2CAP_CR_PEND;
2940 status = L2CAP_CS_NO_INFO;
2943 write_unlock_bh(&list->lock);
2945 response:
2946 bh_unlock_sock(parent);
2948 sendresp:
2949 rsp.scid = cpu_to_le16(scid);
2950 rsp.dcid = cpu_to_le16(dcid);
2951 rsp.result = cpu_to_le16(result);
2952 rsp.status = cpu_to_le16(status);
2953 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2955 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2956 struct l2cap_info_req info;
2957 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2959 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2960 conn->info_ident = l2cap_get_ident(conn);
2962 mod_timer(&conn->info_timer, jiffies +
2963 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2965 l2cap_send_cmd(conn, conn->info_ident,
2966 L2CAP_INFO_REQ, sizeof(info), &info);
2969 return 0;
2972 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2974 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2975 u16 scid, dcid, result, status;
2976 struct sock *sk;
2977 u8 req[128];
2979 scid = __le16_to_cpu(rsp->scid);
2980 dcid = __le16_to_cpu(rsp->dcid);
2981 result = __le16_to_cpu(rsp->result);
2982 status = __le16_to_cpu(rsp->status);
2984 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2986 if (scid) {
2987 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2988 if (!sk)
2989 return 0;
2990 } else {
2991 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2992 if (!sk)
2993 return 0;
2996 switch (result) {
2997 case L2CAP_CR_SUCCESS:
2998 sk->sk_state = BT_CONFIG;
2999 l2cap_pi(sk)->ident = 0;
3000 l2cap_pi(sk)->dcid = dcid;
3001 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3002 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3004 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3005 l2cap_build_conf_req(sk, req), req);
3006 l2cap_pi(sk)->num_conf_req++;
3007 break;
3009 case L2CAP_CR_PEND:
3010 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3011 break;
3013 default:
3014 l2cap_chan_del(sk, ECONNREFUSED);
3015 break;
3018 bh_unlock_sock(sk);
3019 return 0;
3022 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3024 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3025 u16 dcid, flags;
3026 u8 rsp[64];
3027 struct sock *sk;
3028 int len;
3030 dcid = __le16_to_cpu(req->dcid);
3031 flags = __le16_to_cpu(req->flags);
3033 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3035 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3036 if (!sk)
3037 return -ENOENT;
3039 if (sk->sk_state != BT_CONFIG) {
3040 struct l2cap_cmd_rej rej;
3042 rej.reason = cpu_to_le16(0x0002);
3043 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3044 sizeof(rej), &rej);
3045 goto unlock;
3048 /* Reject if config buffer is too small. */
3049 len = cmd_len - sizeof(*req);
3050 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3051 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3052 l2cap_build_conf_rsp(sk, rsp,
3053 L2CAP_CONF_REJECT, flags), rsp);
3054 goto unlock;
3057 /* Store config. */
3058 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3059 l2cap_pi(sk)->conf_len += len;
3061 if (flags & 0x0001) {
3062 /* Incomplete config. Send empty response. */
3063 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3064 l2cap_build_conf_rsp(sk, rsp,
3065 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3066 goto unlock;
3069 /* Complete config. */
3070 len = l2cap_parse_conf_req(sk, rsp);
3071 if (len < 0) {
3072 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3073 goto unlock;
3076 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3077 l2cap_pi(sk)->num_conf_rsp++;
3079 /* Reset config buffer. */
3080 l2cap_pi(sk)->conf_len = 0;
3082 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3083 goto unlock;
3085 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3086 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3087 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3088 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3090 sk->sk_state = BT_CONNECTED;
3092 l2cap_pi(sk)->next_tx_seq = 0;
3093 l2cap_pi(sk)->expected_tx_seq = 0;
3094 __skb_queue_head_init(TX_QUEUE(sk));
3095 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3096 l2cap_ertm_init(sk);
3098 l2cap_chan_ready(sk);
3099 goto unlock;
3102 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3103 u8 buf[64];
3104 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3105 l2cap_build_conf_req(sk, buf), buf);
3106 l2cap_pi(sk)->num_conf_req++;
3109 unlock:
3110 bh_unlock_sock(sk);
3111 return 0;
3114 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3116 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3117 u16 scid, flags, result;
3118 struct sock *sk;
3119 int len = cmd->len - sizeof(*rsp);
3121 scid = __le16_to_cpu(rsp->scid);
3122 flags = __le16_to_cpu(rsp->flags);
3123 result = __le16_to_cpu(rsp->result);
3125 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3126 scid, flags, result);
3128 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3129 if (!sk)
3130 return 0;
3132 switch (result) {
3133 case L2CAP_CONF_SUCCESS:
3134 l2cap_conf_rfc_get(sk, rsp->data, len);
3135 break;
3137 case L2CAP_CONF_UNACCEPT:
3138 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3139 char req[64];
3141 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3142 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3143 goto done;
3146 /* throw out any old stored conf requests */
3147 result = L2CAP_CONF_SUCCESS;
3148 len = l2cap_parse_conf_rsp(sk, rsp->data,
3149 len, req, &result);
3150 if (len < 0) {
3151 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3152 goto done;
3155 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3156 L2CAP_CONF_REQ, len, req);
3157 l2cap_pi(sk)->num_conf_req++;
3158 if (result != L2CAP_CONF_SUCCESS)
3159 goto done;
3160 break;
3163 default:
3164 sk->sk_err = ECONNRESET;
3165 l2cap_sock_set_timer(sk, HZ * 5);
3166 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3167 goto done;
3170 if (flags & 0x01)
3171 goto done;
3173 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3175 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3176 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3177 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3178 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3180 sk->sk_state = BT_CONNECTED;
3181 l2cap_pi(sk)->next_tx_seq = 0;
3182 l2cap_pi(sk)->expected_tx_seq = 0;
3183 __skb_queue_head_init(TX_QUEUE(sk));
3184 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3185 l2cap_ertm_init(sk);
3187 l2cap_chan_ready(sk);
3190 done:
3191 bh_unlock_sock(sk);
3192 return 0;
3195 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3197 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3198 struct l2cap_disconn_rsp rsp;
3199 u16 dcid, scid;
3200 struct sock *sk;
3202 scid = __le16_to_cpu(req->scid);
3203 dcid = __le16_to_cpu(req->dcid);
3205 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3207 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3208 if (!sk)
3209 return 0;
3211 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3212 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3213 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3215 sk->sk_shutdown = SHUTDOWN_MASK;
3217 l2cap_chan_del(sk, ECONNRESET);
3218 bh_unlock_sock(sk);
3220 l2cap_sock_kill(sk);
3221 return 0;
3224 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3226 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3227 u16 dcid, scid;
3228 struct sock *sk;
3230 scid = __le16_to_cpu(rsp->scid);
3231 dcid = __le16_to_cpu(rsp->dcid);
3233 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3235 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3236 if (!sk)
3237 return 0;
3239 l2cap_chan_del(sk, 0);
3240 bh_unlock_sock(sk);
3242 l2cap_sock_kill(sk);
3243 return 0;
3246 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3248 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3249 u16 type;
3251 type = __le16_to_cpu(req->type);
3253 BT_DBG("type 0x%4.4x", type);
3255 if (type == L2CAP_IT_FEAT_MASK) {
3256 u8 buf[8];
3257 u32 feat_mask = l2cap_feat_mask;
3258 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3259 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3260 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3261 if (enable_ertm)
3262 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3263 | L2CAP_FEAT_FCS;
3264 put_unaligned_le32(feat_mask, rsp->data);
3265 l2cap_send_cmd(conn, cmd->ident,
3266 L2CAP_INFO_RSP, sizeof(buf), buf);
3267 } else if (type == L2CAP_IT_FIXED_CHAN) {
3268 u8 buf[12];
3269 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3270 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3271 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3272 memcpy(buf + 4, l2cap_fixed_chan, 8);
3273 l2cap_send_cmd(conn, cmd->ident,
3274 L2CAP_INFO_RSP, sizeof(buf), buf);
3275 } else {
3276 struct l2cap_info_rsp rsp;
3277 rsp.type = cpu_to_le16(type);
3278 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3279 l2cap_send_cmd(conn, cmd->ident,
3280 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3283 return 0;
3286 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3288 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3289 u16 type, result;
3291 type = __le16_to_cpu(rsp->type);
3292 result = __le16_to_cpu(rsp->result);
3294 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3296 del_timer(&conn->info_timer);
3298 if (type == L2CAP_IT_FEAT_MASK) {
3299 conn->feat_mask = get_unaligned_le32(rsp->data);
3301 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3302 struct l2cap_info_req req;
3303 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3305 conn->info_ident = l2cap_get_ident(conn);
3307 l2cap_send_cmd(conn, conn->info_ident,
3308 L2CAP_INFO_REQ, sizeof(req), &req);
3309 } else {
3310 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3311 conn->info_ident = 0;
3313 l2cap_conn_start(conn);
3315 } else if (type == L2CAP_IT_FIXED_CHAN) {
3316 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3317 conn->info_ident = 0;
3319 l2cap_conn_start(conn);
3322 return 0;
3325 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3327 u8 *data = skb->data;
3328 int len = skb->len;
3329 struct l2cap_cmd_hdr cmd;
3330 int err = 0;
3332 l2cap_raw_recv(conn, skb);
3334 while (len >= L2CAP_CMD_HDR_SIZE) {
3335 u16 cmd_len;
3336 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3337 data += L2CAP_CMD_HDR_SIZE;
3338 len -= L2CAP_CMD_HDR_SIZE;
3340 cmd_len = le16_to_cpu(cmd.len);
3342 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3344 if (cmd_len > len || !cmd.ident) {
3345 BT_DBG("corrupted command");
3346 break;
3349 switch (cmd.code) {
3350 case L2CAP_COMMAND_REJ:
3351 l2cap_command_rej(conn, &cmd, data);
3352 break;
3354 case L2CAP_CONN_REQ:
3355 err = l2cap_connect_req(conn, &cmd, data);
3356 break;
3358 case L2CAP_CONN_RSP:
3359 err = l2cap_connect_rsp(conn, &cmd, data);
3360 break;
3362 case L2CAP_CONF_REQ:
3363 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3364 break;
3366 case L2CAP_CONF_RSP:
3367 err = l2cap_config_rsp(conn, &cmd, data);
3368 break;
3370 case L2CAP_DISCONN_REQ:
3371 err = l2cap_disconnect_req(conn, &cmd, data);
3372 break;
3374 case L2CAP_DISCONN_RSP:
3375 err = l2cap_disconnect_rsp(conn, &cmd, data);
3376 break;
3378 case L2CAP_ECHO_REQ:
3379 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3380 break;
3382 case L2CAP_ECHO_RSP:
3383 break;
3385 case L2CAP_INFO_REQ:
3386 err = l2cap_information_req(conn, &cmd, data);
3387 break;
3389 case L2CAP_INFO_RSP:
3390 err = l2cap_information_rsp(conn, &cmd, data);
3391 break;
3393 default:
3394 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3395 err = -EINVAL;
3396 break;
3399 if (err) {
3400 struct l2cap_cmd_rej rej;
3401 BT_DBG("error %d", err);
3403 /* FIXME: Map err to a valid reason */
3404 rej.reason = cpu_to_le16(0);
3405 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3408 data += cmd_len;
3409 len -= cmd_len;
3412 kfree_skb(skb);
3415 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3417 u16 our_fcs, rcv_fcs;
3418 int hdr_size = L2CAP_HDR_SIZE + 2;
3420 if (pi->fcs == L2CAP_FCS_CRC16) {
3421 skb_trim(skb, skb->len - 2);
3422 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3423 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3425 if (our_fcs != rcv_fcs)
3426 return -EINVAL;
3428 return 0;
3431 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3433 struct l2cap_pinfo *pi = l2cap_pi(sk);
3434 u16 control = 0;
3436 pi->frames_sent = 0;
3438 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3440 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3441 control |= L2CAP_SUPER_RCV_NOT_READY;
3442 l2cap_send_sframe(pi, control);
3443 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3446 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3447 l2cap_retransmit_frames(sk);
3449 l2cap_ertm_send(sk);
3451 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3452 pi->frames_sent == 0) {
3453 control |= L2CAP_SUPER_RCV_READY;
3454 l2cap_send_sframe(pi, control);
3458 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3460 struct sk_buff *next_skb;
3461 struct l2cap_pinfo *pi = l2cap_pi(sk);
3462 int tx_seq_offset, next_tx_seq_offset;
3464 bt_cb(skb)->tx_seq = tx_seq;
3465 bt_cb(skb)->sar = sar;
3467 next_skb = skb_peek(SREJ_QUEUE(sk));
3468 if (!next_skb) {
3469 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3470 return 0;
3473 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3474 if (tx_seq_offset < 0)
3475 tx_seq_offset += 64;
3477 do {
3478 if (bt_cb(next_skb)->tx_seq == tx_seq)
3479 return -EINVAL;
3481 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3482 pi->buffer_seq) % 64;
3483 if (next_tx_seq_offset < 0)
3484 next_tx_seq_offset += 64;
3486 if (next_tx_seq_offset > tx_seq_offset) {
3487 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3488 return 0;
3491 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3492 break;
3494 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3496 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3498 return 0;
3501 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3503 struct l2cap_pinfo *pi = l2cap_pi(sk);
3504 struct sk_buff *_skb;
3505 int err;
3507 switch (control & L2CAP_CTRL_SAR) {
3508 case L2CAP_SDU_UNSEGMENTED:
3509 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3510 goto drop;
3512 err = sock_queue_rcv_skb(sk, skb);
3513 if (!err)
3514 return err;
3516 break;
3518 case L2CAP_SDU_START:
3519 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3520 goto drop;
3522 pi->sdu_len = get_unaligned_le16(skb->data);
3524 if (pi->sdu_len > pi->imtu)
3525 goto disconnect;
3527 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3528 if (!pi->sdu)
3529 return -ENOMEM;
3531 /* pull sdu_len bytes only after alloc, because of Local Busy
3532 * condition we have to be sure that this will be executed
3533 * only once, i.e., when alloc does not fail */
3534 skb_pull(skb, 2);
3536 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3538 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3539 pi->partial_sdu_len = skb->len;
3540 break;
3542 case L2CAP_SDU_CONTINUE:
3543 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3544 goto disconnect;
3546 if (!pi->sdu)
3547 goto disconnect;
3549 pi->partial_sdu_len += skb->len;
3550 if (pi->partial_sdu_len > pi->sdu_len)
3551 goto drop;
3553 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3555 break;
3557 case L2CAP_SDU_END:
3558 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3559 goto disconnect;
3561 if (!pi->sdu)
3562 goto disconnect;
3564 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3565 pi->partial_sdu_len += skb->len;
3567 if (pi->partial_sdu_len > pi->imtu)
3568 goto drop;
3570 if (pi->partial_sdu_len != pi->sdu_len)
3571 goto drop;
3573 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3576 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3577 if (!_skb) {
3578 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3579 return -ENOMEM;
3582 err = sock_queue_rcv_skb(sk, _skb);
3583 if (err < 0) {
3584 kfree_skb(_skb);
3585 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3586 return err;
3589 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3590 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3592 kfree_skb(pi->sdu);
3593 break;
3596 kfree_skb(skb);
3597 return 0;
3599 drop:
3600 kfree_skb(pi->sdu);
3601 pi->sdu = NULL;
3603 disconnect:
3604 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3605 kfree_skb(skb);
3606 return 0;
3609 static int l2cap_try_push_rx_skb(struct sock *sk)
3611 struct l2cap_pinfo *pi = l2cap_pi(sk);
3612 struct sk_buff *skb;
3613 u16 control;
3614 int err;
3616 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3617 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3618 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3619 if (err < 0) {
3620 skb_queue_head(BUSY_QUEUE(sk), skb);
3621 return -EBUSY;
3624 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3627 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3628 goto done;
3630 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3631 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3632 l2cap_send_sframe(pi, control);
3633 l2cap_pi(sk)->retry_count = 1;
3635 del_timer(&pi->retrans_timer);
3636 __mod_monitor_timer();
3638 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3640 done:
3641 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3642 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3644 BT_DBG("sk %p, Exit local busy", sk);
3646 return 0;
3649 static void l2cap_busy_work(struct work_struct *work)
3651 DECLARE_WAITQUEUE(wait, current);
3652 struct l2cap_pinfo *pi =
3653 container_of(work, struct l2cap_pinfo, busy_work);
3654 struct sock *sk = (struct sock *)pi;
3655 int n_tries = 0, timeo = HZ/5, err;
3656 struct sk_buff *skb;
3658 lock_sock(sk);
3660 add_wait_queue(sk_sleep(sk), &wait);
3661 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3662 set_current_state(TASK_INTERRUPTIBLE);
3664 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3665 err = -EBUSY;
3666 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3667 break;
3670 if (!timeo)
3671 timeo = HZ/5;
3673 if (signal_pending(current)) {
3674 err = sock_intr_errno(timeo);
3675 break;
3678 release_sock(sk);
3679 timeo = schedule_timeout(timeo);
3680 lock_sock(sk);
3682 err = sock_error(sk);
3683 if (err)
3684 break;
3686 if (l2cap_try_push_rx_skb(sk) == 0)
3687 break;
3690 set_current_state(TASK_RUNNING);
3691 remove_wait_queue(sk_sleep(sk), &wait);
3693 release_sock(sk);
3696 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3698 struct l2cap_pinfo *pi = l2cap_pi(sk);
3699 int sctrl, err;
3701 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3702 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3703 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3704 return l2cap_try_push_rx_skb(sk);
3709 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3710 if (err >= 0) {
3711 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3712 return err;
3715 /* Busy Condition */
3716 BT_DBG("sk %p, Enter local busy", sk);
3718 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3719 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3720 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3722 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3723 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3724 l2cap_send_sframe(pi, sctrl);
3726 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3728 del_timer(&pi->ack_timer);
3730 queue_work(_busy_wq, &pi->busy_work);
3732 return err;
3735 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3737 struct l2cap_pinfo *pi = l2cap_pi(sk);
3738 struct sk_buff *_skb;
3739 int err = -EINVAL;
3742 * TODO: We have to notify the userland if some data is lost with the
3743 * Streaming Mode.
3746 switch (control & L2CAP_CTRL_SAR) {
3747 case L2CAP_SDU_UNSEGMENTED:
3748 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3749 kfree_skb(pi->sdu);
3750 break;
3753 err = sock_queue_rcv_skb(sk, skb);
3754 if (!err)
3755 return 0;
3757 break;
3759 case L2CAP_SDU_START:
3760 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3761 kfree_skb(pi->sdu);
3762 break;
3765 pi->sdu_len = get_unaligned_le16(skb->data);
3766 skb_pull(skb, 2);
3768 if (pi->sdu_len > pi->imtu) {
3769 err = -EMSGSIZE;
3770 break;
3773 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3774 if (!pi->sdu) {
3775 err = -ENOMEM;
3776 break;
3779 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3781 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3782 pi->partial_sdu_len = skb->len;
3783 err = 0;
3784 break;
3786 case L2CAP_SDU_CONTINUE:
3787 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3788 break;
3790 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3792 pi->partial_sdu_len += skb->len;
3793 if (pi->partial_sdu_len > pi->sdu_len)
3794 kfree_skb(pi->sdu);
3795 else
3796 err = 0;
3798 break;
3800 case L2CAP_SDU_END:
3801 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3802 break;
3804 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3806 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3807 pi->partial_sdu_len += skb->len;
3809 if (pi->partial_sdu_len > pi->imtu)
3810 goto drop;
3812 if (pi->partial_sdu_len == pi->sdu_len) {
3813 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3814 err = sock_queue_rcv_skb(sk, _skb);
3815 if (err < 0)
3816 kfree_skb(_skb);
3818 err = 0;
3820 drop:
3821 kfree_skb(pi->sdu);
3822 break;
3825 kfree_skb(skb);
3826 return err;
3829 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3831 struct sk_buff *skb;
3832 u16 control;
3834 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3835 if (bt_cb(skb)->tx_seq != tx_seq)
3836 break;
3838 skb = skb_dequeue(SREJ_QUEUE(sk));
3839 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3840 l2cap_ertm_reassembly_sdu(sk, skb, control);
3841 l2cap_pi(sk)->buffer_seq_srej =
3842 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3843 tx_seq = (tx_seq + 1) % 64;
3847 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3849 struct l2cap_pinfo *pi = l2cap_pi(sk);
3850 struct srej_list *l, *tmp;
3851 u16 control;
3853 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3854 if (l->tx_seq == tx_seq) {
3855 list_del(&l->list);
3856 kfree(l);
3857 return;
3859 control = L2CAP_SUPER_SELECT_REJECT;
3860 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3861 l2cap_send_sframe(pi, control);
3862 list_del(&l->list);
3863 list_add_tail(&l->list, SREJ_LIST(sk));
3867 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3869 struct l2cap_pinfo *pi = l2cap_pi(sk);
3870 struct srej_list *new;
3871 u16 control;
3873 while (tx_seq != pi->expected_tx_seq) {
3874 control = L2CAP_SUPER_SELECT_REJECT;
3875 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3876 l2cap_send_sframe(pi, control);
3878 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3879 new->tx_seq = pi->expected_tx_seq;
3880 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3881 list_add_tail(&new->list, SREJ_LIST(sk));
3883 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3886 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3888 struct l2cap_pinfo *pi = l2cap_pi(sk);
3889 u8 tx_seq = __get_txseq(rx_control);
3890 u8 req_seq = __get_reqseq(rx_control);
3891 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3892 int tx_seq_offset, expected_tx_seq_offset;
3893 int num_to_ack = (pi->tx_win/6) + 1;
3894 int err = 0;
3896 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3897 rx_control);
3899 if (L2CAP_CTRL_FINAL & rx_control &&
3900 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3901 del_timer(&pi->monitor_timer);
3902 if (pi->unacked_frames > 0)
3903 __mod_retrans_timer();
3904 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3907 pi->expected_ack_seq = req_seq;
3908 l2cap_drop_acked_frames(sk);
3910 if (tx_seq == pi->expected_tx_seq)
3911 goto expected;
3913 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3914 if (tx_seq_offset < 0)
3915 tx_seq_offset += 64;
3917 /* invalid tx_seq */
3918 if (tx_seq_offset >= pi->tx_win) {
3919 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3920 goto drop;
3923 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3924 goto drop;
3926 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3927 struct srej_list *first;
3929 first = list_first_entry(SREJ_LIST(sk),
3930 struct srej_list, list);
3931 if (tx_seq == first->tx_seq) {
3932 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3933 l2cap_check_srej_gap(sk, tx_seq);
3935 list_del(&first->list);
3936 kfree(first);
3938 if (list_empty(SREJ_LIST(sk))) {
3939 pi->buffer_seq = pi->buffer_seq_srej;
3940 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3941 l2cap_send_ack(pi);
3942 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3944 } else {
3945 struct srej_list *l;
3947 /* duplicated tx_seq */
3948 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3949 goto drop;
3951 list_for_each_entry(l, SREJ_LIST(sk), list) {
3952 if (l->tx_seq == tx_seq) {
3953 l2cap_resend_srejframe(sk, tx_seq);
3954 return 0;
3957 l2cap_send_srejframe(sk, tx_seq);
3959 } else {
3960 expected_tx_seq_offset =
3961 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3962 if (expected_tx_seq_offset < 0)
3963 expected_tx_seq_offset += 64;
3965 /* duplicated tx_seq */
3966 if (tx_seq_offset < expected_tx_seq_offset)
3967 goto drop;
3969 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3971 BT_DBG("sk %p, Enter SREJ", sk);
3973 INIT_LIST_HEAD(SREJ_LIST(sk));
3974 pi->buffer_seq_srej = pi->buffer_seq;
3976 __skb_queue_head_init(SREJ_QUEUE(sk));
3977 __skb_queue_head_init(BUSY_QUEUE(sk));
3978 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3980 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3982 l2cap_send_srejframe(sk, tx_seq);
3984 del_timer(&pi->ack_timer);
3986 return 0;
3988 expected:
3989 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3991 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3992 bt_cb(skb)->tx_seq = tx_seq;
3993 bt_cb(skb)->sar = sar;
3994 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3995 return 0;
3998 err = l2cap_push_rx_skb(sk, skb, rx_control);
3999 if (err < 0)
4000 return 0;
4002 if (rx_control & L2CAP_CTRL_FINAL) {
4003 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4004 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4005 else
4006 l2cap_retransmit_frames(sk);
4009 __mod_ack_timer();
4011 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4012 if (pi->num_acked == num_to_ack - 1)
4013 l2cap_send_ack(pi);
4015 return 0;
4017 drop:
4018 kfree_skb(skb);
4019 return 0;
4022 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4024 struct l2cap_pinfo *pi = l2cap_pi(sk);
4026 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4027 rx_control);
4029 pi->expected_ack_seq = __get_reqseq(rx_control);
4030 l2cap_drop_acked_frames(sk);
4032 if (rx_control & L2CAP_CTRL_POLL) {
4033 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4034 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4035 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4036 (pi->unacked_frames > 0))
4037 __mod_retrans_timer();
4039 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4040 l2cap_send_srejtail(sk);
4041 } else {
4042 l2cap_send_i_or_rr_or_rnr(sk);
4045 } else if (rx_control & L2CAP_CTRL_FINAL) {
4046 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4048 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4049 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4050 else
4051 l2cap_retransmit_frames(sk);
4053 } else {
4054 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4055 (pi->unacked_frames > 0))
4056 __mod_retrans_timer();
4058 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4059 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4060 l2cap_send_ack(pi);
4061 } else {
4062 l2cap_ertm_send(sk);
4067 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4069 struct l2cap_pinfo *pi = l2cap_pi(sk);
4070 u8 tx_seq = __get_reqseq(rx_control);
4072 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4074 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4076 pi->expected_ack_seq = tx_seq;
4077 l2cap_drop_acked_frames(sk);
4079 if (rx_control & L2CAP_CTRL_FINAL) {
4080 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4081 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4082 else
4083 l2cap_retransmit_frames(sk);
4084 } else {
4085 l2cap_retransmit_frames(sk);
4087 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4088 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4091 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4093 struct l2cap_pinfo *pi = l2cap_pi(sk);
4094 u8 tx_seq = __get_reqseq(rx_control);
4096 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4098 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4100 if (rx_control & L2CAP_CTRL_POLL) {
4101 pi->expected_ack_seq = tx_seq;
4102 l2cap_drop_acked_frames(sk);
4104 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4105 l2cap_retransmit_one_frame(sk, tx_seq);
4107 l2cap_ertm_send(sk);
4109 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4110 pi->srej_save_reqseq = tx_seq;
4111 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4113 } else if (rx_control & L2CAP_CTRL_FINAL) {
4114 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4115 pi->srej_save_reqseq == tx_seq)
4116 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4117 else
4118 l2cap_retransmit_one_frame(sk, tx_seq);
4119 } else {
4120 l2cap_retransmit_one_frame(sk, tx_seq);
4121 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4122 pi->srej_save_reqseq = tx_seq;
4123 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4128 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4130 struct l2cap_pinfo *pi = l2cap_pi(sk);
4131 u8 tx_seq = __get_reqseq(rx_control);
4133 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4135 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4136 pi->expected_ack_seq = tx_seq;
4137 l2cap_drop_acked_frames(sk);
4139 if (rx_control & L2CAP_CTRL_POLL)
4140 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4142 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4143 del_timer(&pi->retrans_timer);
4144 if (rx_control & L2CAP_CTRL_POLL)
4145 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4146 return;
4149 if (rx_control & L2CAP_CTRL_POLL)
4150 l2cap_send_srejtail(sk);
4151 else
4152 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4155 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4157 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4159 if (L2CAP_CTRL_FINAL & rx_control &&
4160 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4161 del_timer(&l2cap_pi(sk)->monitor_timer);
4162 if (l2cap_pi(sk)->unacked_frames > 0)
4163 __mod_retrans_timer();
4164 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4167 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4168 case L2CAP_SUPER_RCV_READY:
4169 l2cap_data_channel_rrframe(sk, rx_control);
4170 break;
4172 case L2CAP_SUPER_REJECT:
4173 l2cap_data_channel_rejframe(sk, rx_control);
4174 break;
4176 case L2CAP_SUPER_SELECT_REJECT:
4177 l2cap_data_channel_srejframe(sk, rx_control);
4178 break;
4180 case L2CAP_SUPER_RCV_NOT_READY:
4181 l2cap_data_channel_rnrframe(sk, rx_control);
4182 break;
4185 kfree_skb(skb);
4186 return 0;
4189 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4191 struct l2cap_pinfo *pi = l2cap_pi(sk);
4192 u16 control;
4193 u8 req_seq;
4194 int len, next_tx_seq_offset, req_seq_offset;
4196 control = get_unaligned_le16(skb->data);
4197 skb_pull(skb, 2);
4198 len = skb->len;
4201 * We can just drop the corrupted I-frame here.
4202 * Receiver will miss it and start proper recovery
4203 * procedures and ask retransmission.
4205 if (l2cap_check_fcs(pi, skb))
4206 goto drop;
4208 if (__is_sar_start(control) && __is_iframe(control))
4209 len -= 2;
4211 if (pi->fcs == L2CAP_FCS_CRC16)
4212 len -= 2;
4214 if (len > pi->mps) {
4215 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4216 goto drop;
4219 req_seq = __get_reqseq(control);
4220 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4221 if (req_seq_offset < 0)
4222 req_seq_offset += 64;
4224 next_tx_seq_offset =
4225 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4226 if (next_tx_seq_offset < 0)
4227 next_tx_seq_offset += 64;
4229 /* check for invalid req-seq */
4230 if (req_seq_offset > next_tx_seq_offset) {
4231 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4232 goto drop;
4235 if (__is_iframe(control)) {
4236 if (len < 0) {
4237 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4238 goto drop;
4241 l2cap_data_channel_iframe(sk, control, skb);
4242 } else {
4243 if (len != 0) {
4244 BT_ERR("%d", len);
4245 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4246 goto drop;
4249 l2cap_data_channel_sframe(sk, control, skb);
4252 return 0;
4254 drop:
4255 kfree_skb(skb);
4256 return 0;
4259 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4261 struct sock *sk;
4262 struct l2cap_pinfo *pi;
4263 u16 control;
4264 u8 tx_seq;
4265 int len;
4267 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4268 if (!sk) {
4269 BT_DBG("unknown cid 0x%4.4x", cid);
4270 goto drop;
4273 pi = l2cap_pi(sk);
4275 BT_DBG("sk %p, len %d", sk, skb->len);
4277 if (sk->sk_state != BT_CONNECTED)
4278 goto drop;
4280 switch (pi->mode) {
4281 case L2CAP_MODE_BASIC:
4282 /* If socket recv buffers overflows we drop data here
4283 * which is *bad* because L2CAP has to be reliable.
4284 * But we don't have any other choice. L2CAP doesn't
4285 * provide flow control mechanism. */
4287 if (pi->imtu < skb->len)
4288 goto drop;
4290 if (!sock_queue_rcv_skb(sk, skb))
4291 goto done;
4292 break;
4294 case L2CAP_MODE_ERTM:
4295 if (!sock_owned_by_user(sk)) {
4296 l2cap_ertm_data_rcv(sk, skb);
4297 } else {
4298 if (sk_add_backlog(sk, skb))
4299 goto drop;
4302 goto done;
4304 case L2CAP_MODE_STREAMING:
4305 control = get_unaligned_le16(skb->data);
4306 skb_pull(skb, 2);
4307 len = skb->len;
4309 if (l2cap_check_fcs(pi, skb))
4310 goto drop;
4312 if (__is_sar_start(control))
4313 len -= 2;
4315 if (pi->fcs == L2CAP_FCS_CRC16)
4316 len -= 2;
4318 if (len > pi->mps || len < 0 || __is_sframe(control))
4319 goto drop;
4321 tx_seq = __get_txseq(control);
4323 if (pi->expected_tx_seq == tx_seq)
4324 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4325 else
4326 pi->expected_tx_seq = (tx_seq + 1) % 64;
4328 l2cap_streaming_reassembly_sdu(sk, skb, control);
4330 goto done;
4332 default:
4333 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4334 break;
4337 drop:
4338 kfree_skb(skb);
4340 done:
4341 if (sk)
4342 bh_unlock_sock(sk);
4344 return 0;
4347 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4349 struct sock *sk;
4351 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4352 if (!sk)
4353 goto drop;
4355 BT_DBG("sk %p, len %d", sk, skb->len);
4357 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4358 goto drop;
4360 if (l2cap_pi(sk)->imtu < skb->len)
4361 goto drop;
4363 if (!sock_queue_rcv_skb(sk, skb))
4364 goto done;
4366 drop:
4367 kfree_skb(skb);
4369 done:
4370 if (sk)
4371 bh_unlock_sock(sk);
4372 return 0;
4375 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4377 struct l2cap_hdr *lh = (void *) skb->data;
4378 u16 cid, len;
4379 __le16 psm;
4381 skb_pull(skb, L2CAP_HDR_SIZE);
4382 cid = __le16_to_cpu(lh->cid);
4383 len = __le16_to_cpu(lh->len);
4385 if (len != skb->len) {
4386 kfree_skb(skb);
4387 return;
4390 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4392 switch (cid) {
4393 case L2CAP_CID_SIGNALING:
4394 l2cap_sig_channel(conn, skb);
4395 break;
4397 case L2CAP_CID_CONN_LESS:
4398 psm = get_unaligned_le16(skb->data);
4399 skb_pull(skb, 2);
4400 l2cap_conless_channel(conn, psm, skb);
4401 break;
4403 default:
4404 l2cap_data_channel(conn, cid, skb);
4405 break;
4409 /* ---- L2CAP interface with lower layer (HCI) ---- */
4411 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4413 int exact = 0, lm1 = 0, lm2 = 0;
4414 register struct sock *sk;
4415 struct hlist_node *node;
4417 if (type != ACL_LINK)
4418 return 0;
4420 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4422 /* Find listening sockets and check their link_mode */
4423 read_lock(&l2cap_sk_list.lock);
4424 sk_for_each(sk, node, &l2cap_sk_list.head) {
4425 if (sk->sk_state != BT_LISTEN)
4426 continue;
4428 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4429 lm1 |= HCI_LM_ACCEPT;
4430 if (l2cap_pi(sk)->role_switch)
4431 lm1 |= HCI_LM_MASTER;
4432 exact++;
4433 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4434 lm2 |= HCI_LM_ACCEPT;
4435 if (l2cap_pi(sk)->role_switch)
4436 lm2 |= HCI_LM_MASTER;
4439 read_unlock(&l2cap_sk_list.lock);
4441 return exact ? lm1 : lm2;
4444 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4446 struct l2cap_conn *conn;
4448 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4450 if (hcon->type != ACL_LINK)
4451 return 0;
4453 if (!status) {
4454 conn = l2cap_conn_add(hcon, status);
4455 if (conn)
4456 l2cap_conn_ready(conn);
4457 } else
4458 l2cap_conn_del(hcon, bt_err(status));
4460 return 0;
4463 static int l2cap_disconn_ind(struct hci_conn *hcon)
4465 struct l2cap_conn *conn = hcon->l2cap_data;
4467 BT_DBG("hcon %p", hcon);
4469 if (hcon->type != ACL_LINK || !conn)
4470 return 0x13;
4472 return conn->disc_reason;
4475 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4477 BT_DBG("hcon %p reason %d", hcon, reason);
4479 if (hcon->type != ACL_LINK)
4480 return 0;
4482 l2cap_conn_del(hcon, bt_err(reason));
4484 return 0;
4487 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4489 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4490 return;
4492 if (encrypt == 0x00) {
4493 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4494 l2cap_sock_clear_timer(sk);
4495 l2cap_sock_set_timer(sk, HZ * 5);
4496 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4497 __l2cap_sock_close(sk, ECONNREFUSED);
4498 } else {
4499 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4500 l2cap_sock_clear_timer(sk);
4504 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4506 struct l2cap_chan_list *l;
4507 struct l2cap_conn *conn = hcon->l2cap_data;
4508 struct sock *sk;
4510 if (!conn)
4511 return 0;
4513 l = &conn->chan_list;
4515 BT_DBG("conn %p", conn);
4517 read_lock(&l->lock);
4519 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4520 bh_lock_sock(sk);
4522 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4523 bh_unlock_sock(sk);
4524 continue;
4527 if (!status && (sk->sk_state == BT_CONNECTED ||
4528 sk->sk_state == BT_CONFIG)) {
4529 l2cap_check_encryption(sk, encrypt);
4530 bh_unlock_sock(sk);
4531 continue;
4534 if (sk->sk_state == BT_CONNECT) {
4535 if (!status) {
4536 struct l2cap_conn_req req;
4537 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4538 req.psm = l2cap_pi(sk)->psm;
4540 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4541 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4543 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4544 L2CAP_CONN_REQ, sizeof(req), &req);
4545 } else {
4546 l2cap_sock_clear_timer(sk);
4547 l2cap_sock_set_timer(sk, HZ / 10);
4549 } else if (sk->sk_state == BT_CONNECT2) {
4550 struct l2cap_conn_rsp rsp;
4551 __u16 result;
4553 if (!status) {
4554 sk->sk_state = BT_CONFIG;
4555 result = L2CAP_CR_SUCCESS;
4556 } else {
4557 sk->sk_state = BT_DISCONN;
4558 l2cap_sock_set_timer(sk, HZ / 10);
4559 result = L2CAP_CR_SEC_BLOCK;
4562 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4563 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4564 rsp.result = cpu_to_le16(result);
4565 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4566 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4567 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4570 bh_unlock_sock(sk);
4573 read_unlock(&l->lock);
4575 return 0;
4578 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4580 struct l2cap_conn *conn = hcon->l2cap_data;
4582 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4583 goto drop;
4585 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4587 if (flags & ACL_START) {
4588 struct l2cap_hdr *hdr;
4589 int len;
4591 if (conn->rx_len) {
4592 BT_ERR("Unexpected start frame (len %d)", skb->len);
4593 kfree_skb(conn->rx_skb);
4594 conn->rx_skb = NULL;
4595 conn->rx_len = 0;
4596 l2cap_conn_unreliable(conn, ECOMM);
4599 if (skb->len < 2) {
4600 BT_ERR("Frame is too short (len %d)", skb->len);
4601 l2cap_conn_unreliable(conn, ECOMM);
4602 goto drop;
4605 hdr = (struct l2cap_hdr *) skb->data;
4606 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4608 if (len == skb->len) {
4609 /* Complete frame received */
4610 l2cap_recv_frame(conn, skb);
4611 return 0;
4614 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4616 if (skb->len > len) {
4617 BT_ERR("Frame is too long (len %d, expected len %d)",
4618 skb->len, len);
4619 l2cap_conn_unreliable(conn, ECOMM);
4620 goto drop;
4623 /* Allocate skb for the complete frame (with header) */
4624 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4625 if (!conn->rx_skb)
4626 goto drop;
4628 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4629 skb->len);
4630 conn->rx_len = len - skb->len;
4631 } else {
4632 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4634 if (!conn->rx_len) {
4635 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4636 l2cap_conn_unreliable(conn, ECOMM);
4637 goto drop;
4640 if (skb->len > conn->rx_len) {
4641 BT_ERR("Fragment is too long (len %d, expected %d)",
4642 skb->len, conn->rx_len);
4643 kfree_skb(conn->rx_skb);
4644 conn->rx_skb = NULL;
4645 conn->rx_len = 0;
4646 l2cap_conn_unreliable(conn, ECOMM);
4647 goto drop;
4650 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4651 skb->len);
4652 conn->rx_len -= skb->len;
4654 if (!conn->rx_len) {
4655 /* Complete frame received */
4656 l2cap_recv_frame(conn, conn->rx_skb);
4657 conn->rx_skb = NULL;
4661 drop:
4662 kfree_skb(skb);
4663 return 0;
4666 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4668 struct sock *sk;
4669 struct hlist_node *node;
4671 read_lock_bh(&l2cap_sk_list.lock);
4673 sk_for_each(sk, node, &l2cap_sk_list.head) {
4674 struct l2cap_pinfo *pi = l2cap_pi(sk);
4676 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4677 batostr(&bt_sk(sk)->src),
4678 batostr(&bt_sk(sk)->dst),
4679 sk->sk_state, __le16_to_cpu(pi->psm),
4680 pi->scid, pi->dcid,
4681 pi->imtu, pi->omtu, pi->sec_level);
4684 read_unlock_bh(&l2cap_sk_list.lock);
4686 return 0;
4689 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4691 return single_open(file, l2cap_debugfs_show, inode->i_private);
4694 static const struct file_operations l2cap_debugfs_fops = {
4695 .open = l2cap_debugfs_open,
4696 .read = seq_read,
4697 .llseek = seq_lseek,
4698 .release = single_release,
4701 static struct dentry *l2cap_debugfs;
4703 static const struct proto_ops l2cap_sock_ops = {
4704 .family = PF_BLUETOOTH,
4705 .owner = THIS_MODULE,
4706 .release = l2cap_sock_release,
4707 .bind = l2cap_sock_bind,
4708 .connect = l2cap_sock_connect,
4709 .listen = l2cap_sock_listen,
4710 .accept = l2cap_sock_accept,
4711 .getname = l2cap_sock_getname,
4712 .sendmsg = l2cap_sock_sendmsg,
4713 .recvmsg = l2cap_sock_recvmsg,
4714 .poll = bt_sock_poll,
4715 .ioctl = bt_sock_ioctl,
4716 .mmap = sock_no_mmap,
4717 .socketpair = sock_no_socketpair,
4718 .shutdown = l2cap_sock_shutdown,
4719 .setsockopt = l2cap_sock_setsockopt,
4720 .getsockopt = l2cap_sock_getsockopt
4723 static const struct net_proto_family l2cap_sock_family_ops = {
4724 .family = PF_BLUETOOTH,
4725 .owner = THIS_MODULE,
4726 .create = l2cap_sock_create,
4729 static struct hci_proto l2cap_hci_proto = {
4730 .name = "L2CAP",
4731 .id = HCI_PROTO_L2CAP,
4732 .connect_ind = l2cap_connect_ind,
4733 .connect_cfm = l2cap_connect_cfm,
4734 .disconn_ind = l2cap_disconn_ind,
4735 .disconn_cfm = l2cap_disconn_cfm,
4736 .security_cfm = l2cap_security_cfm,
4737 .recv_acldata = l2cap_recv_acldata
4740 static int __init l2cap_init(void)
4742 int err;
4744 err = proto_register(&l2cap_proto, 0);
4745 if (err < 0)
4746 return err;
4748 _busy_wq = create_singlethread_workqueue("l2cap");
4749 if (!_busy_wq)
4750 goto error;
4752 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4753 if (err < 0) {
4754 BT_ERR("L2CAP socket registration failed");
4755 goto error;
4758 err = hci_register_proto(&l2cap_hci_proto);
4759 if (err < 0) {
4760 BT_ERR("L2CAP protocol registration failed");
4761 bt_sock_unregister(BTPROTO_L2CAP);
4762 goto error;
4765 if (bt_debugfs) {
4766 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4767 bt_debugfs, NULL, &l2cap_debugfs_fops);
4768 if (!l2cap_debugfs)
4769 BT_ERR("Failed to create L2CAP debug file");
4772 BT_INFO("L2CAP ver %s", VERSION);
4773 BT_INFO("L2CAP socket layer initialized");
4775 return 0;
4777 error:
4778 proto_unregister(&l2cap_proto);
4779 return err;
4782 static void __exit l2cap_exit(void)
4784 debugfs_remove(l2cap_debugfs);
4786 flush_workqueue(_busy_wq);
4787 destroy_workqueue(_busy_wq);
4789 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4790 BT_ERR("L2CAP socket unregistration failed");
4792 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4793 BT_ERR("L2CAP protocol unregistration failed");
4795 proto_unregister(&l2cap_proto);
4798 void l2cap_load(void)
4800 /* Dummy function to trigger automatic L2CAP module loading by
4801 * other modules that use L2CAP sockets but don't use any other
4802 * symbols from it. */
4804 EXPORT_SYMBOL(l2cap_load);
4806 module_init(l2cap_init);
4807 module_exit(l2cap_exit);
4809 module_param(enable_ertm, bool, 0644);
4810 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4812 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4813 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4814 MODULE_VERSION(VERSION);
4815 MODULE_LICENSE("GPL");
4816 MODULE_ALIAS("bt-proto-0");