Bluetooth: Fix ACL MTU issue
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blob99cf1772b481896fa945741985df54bb0856aacc
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static const struct proto_ops l2cap_sock_ops;
66 static struct bt_sock_list l2cap_sk_list = {
67 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
70 static void __l2cap_sock_close(struct sock *sk, int reason);
71 static void l2cap_sock_close(struct sock *sk);
72 static void l2cap_sock_kill(struct sock *sk);
74 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
75 u8 code, u8 ident, u16 dlen, void *data);
77 /* ---- L2CAP timers ---- */
78 static void l2cap_sock_timeout(unsigned long arg)
80 struct sock *sk = (struct sock *) arg;
81 int reason;
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
85 bh_lock_sock(sk);
87 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
88 reason = ECONNREFUSED;
89 else if (sk->sk_state == BT_CONNECT &&
90 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
91 reason = ECONNREFUSED;
92 else
93 reason = ETIMEDOUT;
95 __l2cap_sock_close(sk, reason);
97 bh_unlock_sock(sk);
99 l2cap_sock_kill(sk);
100 sock_put(sk);
103 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
105 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
106 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
109 static void l2cap_sock_clear_timer(struct sock *sk)
111 BT_DBG("sock %p state %d", sk, sk->sk_state);
112 sk_stop_timer(sk, &sk->sk_timer);
115 /* ---- L2CAP channels ---- */
116 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
118 struct sock *s;
119 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
120 if (l2cap_pi(s)->dcid == cid)
121 break;
123 return s;
126 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
128 struct sock *s;
129 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
130 if (l2cap_pi(s)->scid == cid)
131 break;
133 return s;
136 /* Find channel with given SCID.
137 * Returns locked socket */
138 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
140 struct sock *s;
141 read_lock(&l->lock);
142 s = __l2cap_get_chan_by_scid(l, cid);
143 if (s)
144 bh_lock_sock(s);
145 read_unlock(&l->lock);
146 return s;
149 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
151 struct sock *s;
152 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
153 if (l2cap_pi(s)->ident == ident)
154 break;
156 return s;
159 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
161 struct sock *s;
162 read_lock(&l->lock);
163 s = __l2cap_get_chan_by_ident(l, ident);
164 if (s)
165 bh_lock_sock(s);
166 read_unlock(&l->lock);
167 return s;
170 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
172 u16 cid = L2CAP_CID_DYN_START;
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(l, cid))
176 return cid;
179 return 0;
182 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
184 sock_hold(sk);
186 if (l->head)
187 l2cap_pi(l->head)->prev_c = sk;
189 l2cap_pi(sk)->next_c = l->head;
190 l2cap_pi(sk)->prev_c = NULL;
191 l->head = sk;
194 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
196 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
198 write_lock_bh(&l->lock);
199 if (sk == l->head)
200 l->head = next;
202 if (next)
203 l2cap_pi(next)->prev_c = prev;
204 if (prev)
205 l2cap_pi(prev)->next_c = next;
206 write_unlock_bh(&l->lock);
208 __sock_put(sk);
211 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
213 struct l2cap_chan_list *l = &conn->chan_list;
215 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
216 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
218 conn->disc_reason = 0x13;
220 l2cap_pi(sk)->conn = conn;
222 if (sk->sk_type == SOCK_SEQPACKET) {
223 /* Alloc CID for connection-oriented socket */
224 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
225 } else if (sk->sk_type == SOCK_DGRAM) {
226 /* Connectionless socket */
227 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
228 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
230 } else {
231 /* Raw socket can send/recv signalling messages only */
232 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
233 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
237 __l2cap_chan_link(l, sk);
239 if (parent)
240 bt_accept_enqueue(parent, sk);
243 /* Delete channel.
244 * Must be called on the locked socket. */
245 static void l2cap_chan_del(struct sock *sk, int err)
247 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
248 struct sock *parent = bt_sk(sk)->parent;
250 l2cap_sock_clear_timer(sk);
252 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
254 if (conn) {
255 /* Unlink from channel list */
256 l2cap_chan_unlink(&conn->chan_list, sk);
257 l2cap_pi(sk)->conn = NULL;
258 hci_conn_put(conn->hcon);
261 sk->sk_state = BT_CLOSED;
262 sock_set_flag(sk, SOCK_ZAPPED);
264 if (err)
265 sk->sk_err = err;
267 if (parent) {
268 bt_accept_unlink(sk);
269 parent->sk_data_ready(parent, 0);
270 } else
271 sk->sk_state_change(sk);
274 /* Service level security */
275 static inline int l2cap_check_security(struct sock *sk)
277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
278 __u8 auth_type;
280 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
281 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
282 auth_type = HCI_AT_NO_BONDING_MITM;
283 else
284 auth_type = HCI_AT_NO_BONDING;
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
287 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
288 } else {
289 switch (l2cap_pi(sk)->sec_level) {
290 case BT_SECURITY_HIGH:
291 auth_type = HCI_AT_GENERAL_BONDING_MITM;
292 break;
293 case BT_SECURITY_MEDIUM:
294 auth_type = HCI_AT_GENERAL_BONDING;
295 break;
296 default:
297 auth_type = HCI_AT_NO_BONDING;
298 break;
302 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 auth_type);
306 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
308 u8 id;
310 /* Get next available identificator.
311 * 1 - 128 are used by kernel.
312 * 129 - 199 are reserved.
313 * 200 - 254 are used by utilities like l2ping, etc.
316 spin_lock_bh(&conn->lock);
318 if (++conn->tx_ident > 128)
319 conn->tx_ident = 1;
321 id = conn->tx_ident;
323 spin_unlock_bh(&conn->lock);
325 return id;
328 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
330 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
332 BT_DBG("code 0x%2.2x", code);
334 if (!skb)
335 return -ENOMEM;
337 return hci_send_acl(conn->hcon, skb, 0);
340 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
342 struct sk_buff *skb;
343 struct l2cap_hdr *lh;
344 struct l2cap_conn *conn = pi->conn;
345 int count, hlen = L2CAP_HDR_SIZE + 2;
347 if (pi->fcs == L2CAP_FCS_CRC16)
348 hlen += 2;
350 BT_DBG("pi %p, control 0x%2.2x", pi, control);
352 count = min_t(unsigned int, conn->mtu, hlen);
353 control |= L2CAP_CTRL_FRAME_TYPE;
355 skb = bt_skb_alloc(count, GFP_ATOMIC);
356 if (!skb)
357 return -ENOMEM;
359 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
360 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
361 lh->cid = cpu_to_le16(pi->dcid);
362 put_unaligned_le16(control, skb_put(skb, 2));
364 if (pi->fcs == L2CAP_FCS_CRC16) {
365 u16 fcs = crc16(0, (u8 *)lh, count - 2);
366 put_unaligned_le16(fcs, skb_put(skb, 2));
369 return hci_send_acl(pi->conn->hcon, skb, 0);
372 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
374 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
375 control |= L2CAP_SUPER_RCV_NOT_READY;
376 else
377 control |= L2CAP_SUPER_RCV_READY;
379 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
381 return l2cap_send_sframe(pi, control);
384 static void l2cap_do_start(struct sock *sk)
386 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
388 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
389 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
390 return;
392 if (l2cap_check_security(sk)) {
393 struct l2cap_conn_req req;
394 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
395 req.psm = l2cap_pi(sk)->psm;
397 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
399 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
400 L2CAP_CONN_REQ, sizeof(req), &req);
402 } else {
403 struct l2cap_info_req req;
404 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
406 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
407 conn->info_ident = l2cap_get_ident(conn);
409 mod_timer(&conn->info_timer, jiffies +
410 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
412 l2cap_send_cmd(conn, conn->info_ident,
413 L2CAP_INFO_REQ, sizeof(req), &req);
417 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
419 struct l2cap_disconn_req req;
421 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
422 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
423 l2cap_send_cmd(conn, l2cap_get_ident(conn),
424 L2CAP_DISCONN_REQ, sizeof(req), &req);
427 /* ---- L2CAP connections ---- */
428 static void l2cap_conn_start(struct l2cap_conn *conn)
430 struct l2cap_chan_list *l = &conn->chan_list;
431 struct sock *sk;
433 BT_DBG("conn %p", conn);
435 read_lock(&l->lock);
437 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
438 bh_lock_sock(sk);
440 if (sk->sk_type != SOCK_SEQPACKET) {
441 bh_unlock_sock(sk);
442 continue;
445 if (sk->sk_state == BT_CONNECT) {
446 if (l2cap_check_security(sk)) {
447 struct l2cap_conn_req req;
448 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
449 req.psm = l2cap_pi(sk)->psm;
451 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
453 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
454 L2CAP_CONN_REQ, sizeof(req), &req);
456 } else if (sk->sk_state == BT_CONNECT2) {
457 struct l2cap_conn_rsp rsp;
458 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
459 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
461 if (l2cap_check_security(sk)) {
462 if (bt_sk(sk)->defer_setup) {
463 struct sock *parent = bt_sk(sk)->parent;
464 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
465 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
466 parent->sk_data_ready(parent, 0);
468 } else {
469 sk->sk_state = BT_CONFIG;
470 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
471 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
473 } else {
474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
478 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
479 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
482 bh_unlock_sock(sk);
485 read_unlock(&l->lock);
488 static void l2cap_conn_ready(struct l2cap_conn *conn)
490 struct l2cap_chan_list *l = &conn->chan_list;
491 struct sock *sk;
493 BT_DBG("conn %p", conn);
495 read_lock(&l->lock);
497 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
498 bh_lock_sock(sk);
500 if (sk->sk_type != SOCK_SEQPACKET) {
501 l2cap_sock_clear_timer(sk);
502 sk->sk_state = BT_CONNECTED;
503 sk->sk_state_change(sk);
504 } else if (sk->sk_state == BT_CONNECT)
505 l2cap_do_start(sk);
507 bh_unlock_sock(sk);
510 read_unlock(&l->lock);
513 /* Notify sockets that we cannot guaranty reliability anymore */
514 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
516 struct l2cap_chan_list *l = &conn->chan_list;
517 struct sock *sk;
519 BT_DBG("conn %p", conn);
521 read_lock(&l->lock);
523 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
524 if (l2cap_pi(sk)->force_reliable)
525 sk->sk_err = err;
528 read_unlock(&l->lock);
531 static void l2cap_info_timeout(unsigned long arg)
533 struct l2cap_conn *conn = (void *) arg;
535 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
536 conn->info_ident = 0;
538 l2cap_conn_start(conn);
541 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
543 struct l2cap_conn *conn = hcon->l2cap_data;
545 if (conn || status)
546 return conn;
548 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
549 if (!conn)
550 return NULL;
552 hcon->l2cap_data = conn;
553 conn->hcon = hcon;
555 BT_DBG("hcon %p conn %p", hcon, conn);
557 conn->mtu = hcon->hdev->acl_mtu;
558 conn->src = &hcon->hdev->bdaddr;
559 conn->dst = &hcon->dst;
561 conn->feat_mask = 0;
563 spin_lock_init(&conn->lock);
564 rwlock_init(&conn->chan_list.lock);
566 setup_timer(&conn->info_timer, l2cap_info_timeout,
567 (unsigned long) conn);
569 conn->disc_reason = 0x13;
571 return conn;
574 static void l2cap_conn_del(struct hci_conn *hcon, int err)
576 struct l2cap_conn *conn = hcon->l2cap_data;
577 struct sock *sk;
579 if (!conn)
580 return;
582 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
584 kfree_skb(conn->rx_skb);
586 /* Kill channels */
587 while ((sk = conn->chan_list.head)) {
588 bh_lock_sock(sk);
589 l2cap_chan_del(sk, err);
590 bh_unlock_sock(sk);
591 l2cap_sock_kill(sk);
594 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
595 del_timer_sync(&conn->info_timer);
597 hcon->l2cap_data = NULL;
598 kfree(conn);
601 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
603 struct l2cap_chan_list *l = &conn->chan_list;
604 write_lock_bh(&l->lock);
605 __l2cap_chan_add(conn, sk, parent);
606 write_unlock_bh(&l->lock);
609 /* ---- Socket interface ---- */
610 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
612 struct sock *sk;
613 struct hlist_node *node;
614 sk_for_each(sk, node, &l2cap_sk_list.head)
615 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
616 goto found;
617 sk = NULL;
618 found:
619 return sk;
622 /* Find socket with psm and source bdaddr.
623 * Returns closest match.
625 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
627 struct sock *sk = NULL, *sk1 = NULL;
628 struct hlist_node *node;
630 sk_for_each(sk, node, &l2cap_sk_list.head) {
631 if (state && sk->sk_state != state)
632 continue;
634 if (l2cap_pi(sk)->psm == psm) {
635 /* Exact match. */
636 if (!bacmp(&bt_sk(sk)->src, src))
637 break;
639 /* Closest match */
640 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
641 sk1 = sk;
644 return node ? sk : sk1;
647 /* Find socket with given address (psm, src).
648 * Returns locked socket */
649 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
651 struct sock *s;
652 read_lock(&l2cap_sk_list.lock);
653 s = __l2cap_get_sock_by_psm(state, psm, src);
654 if (s)
655 bh_lock_sock(s);
656 read_unlock(&l2cap_sk_list.lock);
657 return s;
660 static void l2cap_sock_destruct(struct sock *sk)
662 BT_DBG("sk %p", sk);
664 skb_queue_purge(&sk->sk_receive_queue);
665 skb_queue_purge(&sk->sk_write_queue);
668 static void l2cap_sock_cleanup_listen(struct sock *parent)
670 struct sock *sk;
672 BT_DBG("parent %p", parent);
674 /* Close not yet accepted channels */
675 while ((sk = bt_accept_dequeue(parent, NULL)))
676 l2cap_sock_close(sk);
678 parent->sk_state = BT_CLOSED;
679 sock_set_flag(parent, SOCK_ZAPPED);
682 /* Kill socket (only if zapped and orphan)
683 * Must be called on unlocked socket.
685 static void l2cap_sock_kill(struct sock *sk)
687 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
688 return;
690 BT_DBG("sk %p state %d", sk, sk->sk_state);
692 /* Kill poor orphan */
693 bt_sock_unlink(&l2cap_sk_list, sk);
694 sock_set_flag(sk, SOCK_DEAD);
695 sock_put(sk);
698 static void __l2cap_sock_close(struct sock *sk, int reason)
700 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
702 switch (sk->sk_state) {
703 case BT_LISTEN:
704 l2cap_sock_cleanup_listen(sk);
705 break;
707 case BT_CONNECTED:
708 case BT_CONFIG:
709 if (sk->sk_type == SOCK_SEQPACKET) {
710 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
712 sk->sk_state = BT_DISCONN;
713 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
714 l2cap_send_disconn_req(conn, sk);
715 } else
716 l2cap_chan_del(sk, reason);
717 break;
719 case BT_CONNECT2:
720 if (sk->sk_type == SOCK_SEQPACKET) {
721 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
722 struct l2cap_conn_rsp rsp;
723 __u16 result;
725 if (bt_sk(sk)->defer_setup)
726 result = L2CAP_CR_SEC_BLOCK;
727 else
728 result = L2CAP_CR_BAD_PSM;
730 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
731 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
732 rsp.result = cpu_to_le16(result);
733 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
734 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
735 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
736 } else
737 l2cap_chan_del(sk, reason);
738 break;
740 case BT_CONNECT:
741 case BT_DISCONN:
742 l2cap_chan_del(sk, reason);
743 break;
745 default:
746 sock_set_flag(sk, SOCK_ZAPPED);
747 break;
751 /* Must be called on unlocked socket. */
752 static void l2cap_sock_close(struct sock *sk)
754 l2cap_sock_clear_timer(sk);
755 lock_sock(sk);
756 __l2cap_sock_close(sk, ECONNRESET);
757 release_sock(sk);
758 l2cap_sock_kill(sk);
761 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
763 struct l2cap_pinfo *pi = l2cap_pi(sk);
765 BT_DBG("sk %p", sk);
767 if (parent) {
768 sk->sk_type = parent->sk_type;
769 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
771 pi->imtu = l2cap_pi(parent)->imtu;
772 pi->omtu = l2cap_pi(parent)->omtu;
773 pi->mode = l2cap_pi(parent)->mode;
774 pi->fcs = l2cap_pi(parent)->fcs;
775 pi->sec_level = l2cap_pi(parent)->sec_level;
776 pi->role_switch = l2cap_pi(parent)->role_switch;
777 pi->force_reliable = l2cap_pi(parent)->force_reliable;
778 } else {
779 pi->imtu = L2CAP_DEFAULT_MTU;
780 pi->omtu = 0;
781 pi->mode = L2CAP_MODE_BASIC;
782 pi->fcs = L2CAP_FCS_CRC16;
783 pi->sec_level = BT_SECURITY_LOW;
784 pi->role_switch = 0;
785 pi->force_reliable = 0;
788 /* Default config options */
789 pi->conf_len = 0;
790 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
791 skb_queue_head_init(TX_QUEUE(sk));
792 skb_queue_head_init(SREJ_QUEUE(sk));
793 INIT_LIST_HEAD(SREJ_LIST(sk));
796 static struct proto l2cap_proto = {
797 .name = "L2CAP",
798 .owner = THIS_MODULE,
799 .obj_size = sizeof(struct l2cap_pinfo)
802 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
804 struct sock *sk;
806 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
807 if (!sk)
808 return NULL;
810 sock_init_data(sock, sk);
811 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
813 sk->sk_destruct = l2cap_sock_destruct;
814 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
816 sock_reset_flag(sk, SOCK_ZAPPED);
818 sk->sk_protocol = proto;
819 sk->sk_state = BT_OPEN;
821 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
823 bt_sock_link(&l2cap_sk_list, sk);
824 return sk;
827 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
828 int kern)
830 struct sock *sk;
832 BT_DBG("sock %p", sock);
834 sock->state = SS_UNCONNECTED;
836 if (sock->type != SOCK_SEQPACKET &&
837 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
838 return -ESOCKTNOSUPPORT;
840 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
841 return -EPERM;
843 sock->ops = &l2cap_sock_ops;
845 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
846 if (!sk)
847 return -ENOMEM;
849 l2cap_sock_init(sk, NULL);
850 return 0;
853 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
855 struct sock *sk = sock->sk;
856 struct sockaddr_l2 la;
857 int len, err = 0;
859 BT_DBG("sk %p", sk);
861 if (!addr || addr->sa_family != AF_BLUETOOTH)
862 return -EINVAL;
864 memset(&la, 0, sizeof(la));
865 len = min_t(unsigned int, sizeof(la), alen);
866 memcpy(&la, addr, len);
868 if (la.l2_cid)
869 return -EINVAL;
871 lock_sock(sk);
873 if (sk->sk_state != BT_OPEN) {
874 err = -EBADFD;
875 goto done;
878 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
879 !capable(CAP_NET_BIND_SERVICE)) {
880 err = -EACCES;
881 goto done;
884 write_lock_bh(&l2cap_sk_list.lock);
886 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
887 err = -EADDRINUSE;
888 } else {
889 /* Save source address */
890 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
891 l2cap_pi(sk)->psm = la.l2_psm;
892 l2cap_pi(sk)->sport = la.l2_psm;
893 sk->sk_state = BT_BOUND;
895 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
896 __le16_to_cpu(la.l2_psm) == 0x0003)
897 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
900 write_unlock_bh(&l2cap_sk_list.lock);
902 done:
903 release_sock(sk);
904 return err;
907 static int l2cap_do_connect(struct sock *sk)
909 bdaddr_t *src = &bt_sk(sk)->src;
910 bdaddr_t *dst = &bt_sk(sk)->dst;
911 struct l2cap_conn *conn;
912 struct hci_conn *hcon;
913 struct hci_dev *hdev;
914 __u8 auth_type;
915 int err;
917 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
918 l2cap_pi(sk)->psm);
920 hdev = hci_get_route(dst, src);
921 if (!hdev)
922 return -EHOSTUNREACH;
924 hci_dev_lock_bh(hdev);
926 err = -ENOMEM;
928 if (sk->sk_type == SOCK_RAW) {
929 switch (l2cap_pi(sk)->sec_level) {
930 case BT_SECURITY_HIGH:
931 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
932 break;
933 case BT_SECURITY_MEDIUM:
934 auth_type = HCI_AT_DEDICATED_BONDING;
935 break;
936 default:
937 auth_type = HCI_AT_NO_BONDING;
938 break;
940 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
941 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
942 auth_type = HCI_AT_NO_BONDING_MITM;
943 else
944 auth_type = HCI_AT_NO_BONDING;
946 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
947 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
948 } else {
949 switch (l2cap_pi(sk)->sec_level) {
950 case BT_SECURITY_HIGH:
951 auth_type = HCI_AT_GENERAL_BONDING_MITM;
952 break;
953 case BT_SECURITY_MEDIUM:
954 auth_type = HCI_AT_GENERAL_BONDING;
955 break;
956 default:
957 auth_type = HCI_AT_NO_BONDING;
958 break;
962 hcon = hci_connect(hdev, ACL_LINK, dst,
963 l2cap_pi(sk)->sec_level, auth_type);
964 if (!hcon)
965 goto done;
967 conn = l2cap_conn_add(hcon, 0);
968 if (!conn) {
969 hci_conn_put(hcon);
970 goto done;
973 err = 0;
975 /* Update source addr of the socket */
976 bacpy(src, conn->src);
978 l2cap_chan_add(conn, sk, NULL);
980 sk->sk_state = BT_CONNECT;
981 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
983 if (hcon->state == BT_CONNECTED) {
984 if (sk->sk_type != SOCK_SEQPACKET) {
985 l2cap_sock_clear_timer(sk);
986 sk->sk_state = BT_CONNECTED;
987 } else
988 l2cap_do_start(sk);
991 done:
992 hci_dev_unlock_bh(hdev);
993 hci_dev_put(hdev);
994 return err;
997 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
999 struct sock *sk = sock->sk;
1000 struct sockaddr_l2 la;
1001 int len, err = 0;
1003 BT_DBG("sk %p", sk);
1005 if (!addr || alen < sizeof(addr->sa_family) ||
1006 addr->sa_family != AF_BLUETOOTH)
1007 return -EINVAL;
1009 memset(&la, 0, sizeof(la));
1010 len = min_t(unsigned int, sizeof(la), alen);
1011 memcpy(&la, addr, len);
1013 if (la.l2_cid)
1014 return -EINVAL;
1016 lock_sock(sk);
1018 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1019 err = -EINVAL;
1020 goto done;
1023 switch (l2cap_pi(sk)->mode) {
1024 case L2CAP_MODE_BASIC:
1025 break;
1026 case L2CAP_MODE_ERTM:
1027 case L2CAP_MODE_STREAMING:
1028 if (enable_ertm)
1029 break;
1030 /* fall through */
1031 default:
1032 err = -ENOTSUPP;
1033 goto done;
1036 switch (sk->sk_state) {
1037 case BT_CONNECT:
1038 case BT_CONNECT2:
1039 case BT_CONFIG:
1040 /* Already connecting */
1041 goto wait;
1043 case BT_CONNECTED:
1044 /* Already connected */
1045 goto done;
1047 case BT_OPEN:
1048 case BT_BOUND:
1049 /* Can connect */
1050 break;
1052 default:
1053 err = -EBADFD;
1054 goto done;
1057 /* Set destination address and psm */
1058 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1059 l2cap_pi(sk)->psm = la.l2_psm;
1061 err = l2cap_do_connect(sk);
1062 if (err)
1063 goto done;
1065 wait:
1066 err = bt_sock_wait_state(sk, BT_CONNECTED,
1067 sock_sndtimeo(sk, flags & O_NONBLOCK));
1068 done:
1069 release_sock(sk);
1070 return err;
1073 static int l2cap_sock_listen(struct socket *sock, int backlog)
1075 struct sock *sk = sock->sk;
1076 int err = 0;
1078 BT_DBG("sk %p backlog %d", sk, backlog);
1080 lock_sock(sk);
1082 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1083 err = -EBADFD;
1084 goto done;
1087 switch (l2cap_pi(sk)->mode) {
1088 case L2CAP_MODE_BASIC:
1089 break;
1090 case L2CAP_MODE_ERTM:
1091 case L2CAP_MODE_STREAMING:
1092 if (enable_ertm)
1093 break;
1094 /* fall through */
1095 default:
1096 err = -ENOTSUPP;
1097 goto done;
1100 if (!l2cap_pi(sk)->psm) {
1101 bdaddr_t *src = &bt_sk(sk)->src;
1102 u16 psm;
1104 err = -EINVAL;
1106 write_lock_bh(&l2cap_sk_list.lock);
1108 for (psm = 0x1001; psm < 0x1100; psm += 2)
1109 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1110 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1111 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1112 err = 0;
1113 break;
1116 write_unlock_bh(&l2cap_sk_list.lock);
1118 if (err < 0)
1119 goto done;
1122 sk->sk_max_ack_backlog = backlog;
1123 sk->sk_ack_backlog = 0;
1124 sk->sk_state = BT_LISTEN;
1126 done:
1127 release_sock(sk);
1128 return err;
1131 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1133 DECLARE_WAITQUEUE(wait, current);
1134 struct sock *sk = sock->sk, *nsk;
1135 long timeo;
1136 int err = 0;
1138 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1140 if (sk->sk_state != BT_LISTEN) {
1141 err = -EBADFD;
1142 goto done;
1145 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1147 BT_DBG("sk %p timeo %ld", sk, timeo);
1149 /* Wait for an incoming connection. (wake-one). */
1150 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1151 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1152 set_current_state(TASK_INTERRUPTIBLE);
1153 if (!timeo) {
1154 err = -EAGAIN;
1155 break;
1158 release_sock(sk);
1159 timeo = schedule_timeout(timeo);
1160 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1162 if (sk->sk_state != BT_LISTEN) {
1163 err = -EBADFD;
1164 break;
1167 if (signal_pending(current)) {
1168 err = sock_intr_errno(timeo);
1169 break;
1172 set_current_state(TASK_RUNNING);
1173 remove_wait_queue(sk_sleep(sk), &wait);
1175 if (err)
1176 goto done;
1178 newsock->state = SS_CONNECTED;
1180 BT_DBG("new socket %p", nsk);
1182 done:
1183 release_sock(sk);
1184 return err;
1187 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1189 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1190 struct sock *sk = sock->sk;
1192 BT_DBG("sock %p, sk %p", sock, sk);
1194 addr->sa_family = AF_BLUETOOTH;
1195 *len = sizeof(struct sockaddr_l2);
1197 if (peer) {
1198 la->l2_psm = l2cap_pi(sk)->psm;
1199 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1200 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1201 } else {
1202 la->l2_psm = l2cap_pi(sk)->sport;
1203 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1204 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1207 return 0;
1210 static void l2cap_monitor_timeout(unsigned long arg)
1212 struct sock *sk = (void *) arg;
1213 u16 control;
1215 bh_lock_sock(sk);
1216 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1217 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1218 bh_unlock_sock(sk);
1219 return;
1222 l2cap_pi(sk)->retry_count++;
1223 __mod_monitor_timer();
1225 control = L2CAP_CTRL_POLL;
1226 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1227 bh_unlock_sock(sk);
1230 static void l2cap_retrans_timeout(unsigned long arg)
1232 struct sock *sk = (void *) arg;
1233 u16 control;
1235 bh_lock_sock(sk);
1236 l2cap_pi(sk)->retry_count = 1;
1237 __mod_monitor_timer();
1239 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1241 control = L2CAP_CTRL_POLL;
1242 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1243 bh_unlock_sock(sk);
1246 static void l2cap_drop_acked_frames(struct sock *sk)
1248 struct sk_buff *skb;
1250 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1251 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1252 break;
1254 skb = skb_dequeue(TX_QUEUE(sk));
1255 kfree_skb(skb);
1257 l2cap_pi(sk)->unacked_frames--;
1260 if (!l2cap_pi(sk)->unacked_frames)
1261 del_timer(&l2cap_pi(sk)->retrans_timer);
1263 return;
1266 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1268 struct l2cap_pinfo *pi = l2cap_pi(sk);
1269 int err;
1271 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1273 err = hci_send_acl(pi->conn->hcon, skb, 0);
1274 if (err < 0)
1275 kfree_skb(skb);
1277 return err;
1280 static int l2cap_streaming_send(struct sock *sk)
1282 struct sk_buff *skb, *tx_skb;
1283 struct l2cap_pinfo *pi = l2cap_pi(sk);
1284 u16 control, fcs;
1285 int err;
1287 while ((skb = sk->sk_send_head)) {
1288 tx_skb = skb_clone(skb, GFP_ATOMIC);
1290 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1291 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1292 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1294 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1295 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1296 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1299 err = l2cap_do_send(sk, tx_skb);
1300 if (err < 0) {
1301 l2cap_send_disconn_req(pi->conn, sk);
1302 return err;
1305 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1307 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1308 sk->sk_send_head = NULL;
1309 else
1310 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1312 skb = skb_dequeue(TX_QUEUE(sk));
1313 kfree_skb(skb);
1315 return 0;
1318 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1320 struct l2cap_pinfo *pi = l2cap_pi(sk);
1321 struct sk_buff *skb, *tx_skb;
1322 u16 control, fcs;
1323 int err;
1325 skb = skb_peek(TX_QUEUE(sk));
1326 do {
1327 if (bt_cb(skb)->tx_seq != tx_seq) {
1328 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1329 break;
1330 skb = skb_queue_next(TX_QUEUE(sk), skb);
1331 continue;
1334 if (pi->remote_max_tx &&
1335 bt_cb(skb)->retries == pi->remote_max_tx) {
1336 l2cap_send_disconn_req(pi->conn, sk);
1337 break;
1340 tx_skb = skb_clone(skb, GFP_ATOMIC);
1341 bt_cb(skb)->retries++;
1342 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1343 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1344 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1345 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1347 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1348 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1349 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1352 err = l2cap_do_send(sk, tx_skb);
1353 if (err < 0) {
1354 l2cap_send_disconn_req(pi->conn, sk);
1355 return err;
1357 break;
1358 } while(1);
1359 return 0;
1362 static int l2cap_ertm_send(struct sock *sk)
1364 struct sk_buff *skb, *tx_skb;
1365 struct l2cap_pinfo *pi = l2cap_pi(sk);
1366 u16 control, fcs;
1367 int err;
1369 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1370 return 0;
1372 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1373 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1375 if (pi->remote_max_tx &&
1376 bt_cb(skb)->retries == pi->remote_max_tx) {
1377 l2cap_send_disconn_req(pi->conn, sk);
1378 break;
1381 tx_skb = skb_clone(skb, GFP_ATOMIC);
1383 bt_cb(skb)->retries++;
1385 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1386 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1387 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1388 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1391 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1392 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1393 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1396 err = l2cap_do_send(sk, tx_skb);
1397 if (err < 0) {
1398 l2cap_send_disconn_req(pi->conn, sk);
1399 return err;
1401 __mod_retrans_timer();
1403 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1404 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1406 pi->unacked_frames++;
1408 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1409 sk->sk_send_head = NULL;
1410 else
1411 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1414 return 0;
1417 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1419 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1420 struct sk_buff **frag;
1421 int err, sent = 0;
1423 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1424 return -EFAULT;
1427 sent += count;
1428 len -= count;
1430 /* Continuation fragments (no L2CAP header) */
1431 frag = &skb_shinfo(skb)->frag_list;
1432 while (len) {
1433 count = min_t(unsigned int, conn->mtu, len);
1435 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1436 if (!*frag)
1437 return -EFAULT;
1438 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1439 return -EFAULT;
1441 sent += count;
1442 len -= count;
1444 frag = &(*frag)->next;
1447 return sent;
1450 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1452 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1453 struct sk_buff *skb;
1454 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1455 struct l2cap_hdr *lh;
1457 BT_DBG("sk %p len %d", sk, (int)len);
1459 count = min_t(unsigned int, (conn->mtu - hlen), len);
1460 skb = bt_skb_send_alloc(sk, count + hlen,
1461 msg->msg_flags & MSG_DONTWAIT, &err);
1462 if (!skb)
1463 return ERR_PTR(-ENOMEM);
1465 /* Create L2CAP header */
1466 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1467 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1468 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1469 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1471 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1472 if (unlikely(err < 0)) {
1473 kfree_skb(skb);
1474 return ERR_PTR(err);
1476 return skb;
1479 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1481 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1482 struct sk_buff *skb;
1483 int err, count, hlen = L2CAP_HDR_SIZE;
1484 struct l2cap_hdr *lh;
1486 BT_DBG("sk %p len %d", sk, (int)len);
1488 count = min_t(unsigned int, (conn->mtu - hlen), len);
1489 skb = bt_skb_send_alloc(sk, count + hlen,
1490 msg->msg_flags & MSG_DONTWAIT, &err);
1491 if (!skb)
1492 return ERR_PTR(-ENOMEM);
1494 /* Create L2CAP header */
1495 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1496 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1497 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1499 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1500 if (unlikely(err < 0)) {
1501 kfree_skb(skb);
1502 return ERR_PTR(err);
1504 return skb;
1507 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1509 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1510 struct sk_buff *skb;
1511 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1512 struct l2cap_hdr *lh;
1514 BT_DBG("sk %p len %d", sk, (int)len);
1516 if (sdulen)
1517 hlen += 2;
1519 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1520 hlen += 2;
1522 count = min_t(unsigned int, (conn->mtu - hlen), len);
1523 skb = bt_skb_send_alloc(sk, count + hlen,
1524 msg->msg_flags & MSG_DONTWAIT, &err);
1525 if (!skb)
1526 return ERR_PTR(-ENOMEM);
1528 /* Create L2CAP header */
1529 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1530 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1531 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1532 put_unaligned_le16(control, skb_put(skb, 2));
1533 if (sdulen)
1534 put_unaligned_le16(sdulen, skb_put(skb, 2));
1536 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1537 if (unlikely(err < 0)) {
1538 kfree_skb(skb);
1539 return ERR_PTR(err);
1542 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1543 put_unaligned_le16(0, skb_put(skb, 2));
1545 bt_cb(skb)->retries = 0;
1546 return skb;
1549 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1551 struct l2cap_pinfo *pi = l2cap_pi(sk);
1552 struct sk_buff *skb;
1553 struct sk_buff_head sar_queue;
1554 u16 control;
1555 size_t size = 0;
1557 __skb_queue_head_init(&sar_queue);
1558 control = L2CAP_SDU_START;
1559 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1560 if (IS_ERR(skb))
1561 return PTR_ERR(skb);
1563 __skb_queue_tail(&sar_queue, skb);
1564 len -= pi->max_pdu_size;
1565 size +=pi->max_pdu_size;
1566 control = 0;
1568 while (len > 0) {
1569 size_t buflen;
1571 if (len > pi->max_pdu_size) {
1572 control |= L2CAP_SDU_CONTINUE;
1573 buflen = pi->max_pdu_size;
1574 } else {
1575 control |= L2CAP_SDU_END;
1576 buflen = len;
1579 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1580 if (IS_ERR(skb)) {
1581 skb_queue_purge(&sar_queue);
1582 return PTR_ERR(skb);
1585 __skb_queue_tail(&sar_queue, skb);
1586 len -= buflen;
1587 size += buflen;
1588 control = 0;
1590 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1591 if (sk->sk_send_head == NULL)
1592 sk->sk_send_head = sar_queue.next;
1594 return size;
1597 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1599 struct sock *sk = sock->sk;
1600 struct l2cap_pinfo *pi = l2cap_pi(sk);
1601 struct sk_buff *skb;
1602 u16 control;
1603 int err;
1605 BT_DBG("sock %p, sk %p", sock, sk);
1607 err = sock_error(sk);
1608 if (err)
1609 return err;
1611 if (msg->msg_flags & MSG_OOB)
1612 return -EOPNOTSUPP;
1614 lock_sock(sk);
1616 if (sk->sk_state != BT_CONNECTED) {
1617 err = -ENOTCONN;
1618 goto done;
1621 /* Connectionless channel */
1622 if (sk->sk_type == SOCK_DGRAM) {
1623 skb = l2cap_create_connless_pdu(sk, msg, len);
1624 if (IS_ERR(skb))
1625 err = PTR_ERR(skb);
1626 else
1627 err = l2cap_do_send(sk, skb);
1628 goto done;
1631 switch (pi->mode) {
1632 case L2CAP_MODE_BASIC:
1633 /* Check outgoing MTU */
1634 if (len > pi->omtu) {
1635 err = -EINVAL;
1636 goto done;
1639 /* Create a basic PDU */
1640 skb = l2cap_create_basic_pdu(sk, msg, len);
1641 if (IS_ERR(skb)) {
1642 err = PTR_ERR(skb);
1643 goto done;
1646 err = l2cap_do_send(sk, skb);
1647 if (!err)
1648 err = len;
1649 break;
1651 case L2CAP_MODE_ERTM:
1652 case L2CAP_MODE_STREAMING:
1653 /* Entire SDU fits into one PDU */
1654 if (len <= pi->max_pdu_size) {
1655 control = L2CAP_SDU_UNSEGMENTED;
1656 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1657 if (IS_ERR(skb)) {
1658 err = PTR_ERR(skb);
1659 goto done;
1661 __skb_queue_tail(TX_QUEUE(sk), skb);
1662 if (sk->sk_send_head == NULL)
1663 sk->sk_send_head = skb;
1664 } else {
1665 /* Segment SDU into multiples PDUs */
1666 err = l2cap_sar_segment_sdu(sk, msg, len);
1667 if (err < 0)
1668 goto done;
1671 if (pi->mode == L2CAP_MODE_STREAMING)
1672 err = l2cap_streaming_send(sk);
1673 else
1674 err = l2cap_ertm_send(sk);
1676 if (!err)
1677 err = len;
1678 break;
1680 default:
1681 BT_DBG("bad state %1.1x", pi->mode);
1682 err = -EINVAL;
1685 done:
1686 release_sock(sk);
1687 return err;
1690 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1692 struct sock *sk = sock->sk;
1694 lock_sock(sk);
1696 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1697 struct l2cap_conn_rsp rsp;
1699 sk->sk_state = BT_CONFIG;
1701 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1702 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1703 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1704 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1705 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1706 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1708 release_sock(sk);
1709 return 0;
1712 release_sock(sk);
1714 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1717 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1719 struct sock *sk = sock->sk;
1720 struct l2cap_options opts;
1721 int len, err = 0;
1722 u32 opt;
1724 BT_DBG("sk %p", sk);
1726 lock_sock(sk);
1728 switch (optname) {
1729 case L2CAP_OPTIONS:
1730 opts.imtu = l2cap_pi(sk)->imtu;
1731 opts.omtu = l2cap_pi(sk)->omtu;
1732 opts.flush_to = l2cap_pi(sk)->flush_to;
1733 opts.mode = l2cap_pi(sk)->mode;
1734 opts.fcs = l2cap_pi(sk)->fcs;
1736 len = min_t(unsigned int, sizeof(opts), optlen);
1737 if (copy_from_user((char *) &opts, optval, len)) {
1738 err = -EFAULT;
1739 break;
1742 l2cap_pi(sk)->imtu = opts.imtu;
1743 l2cap_pi(sk)->omtu = opts.omtu;
1744 l2cap_pi(sk)->mode = opts.mode;
1745 l2cap_pi(sk)->fcs = opts.fcs;
1746 break;
1748 case L2CAP_LM:
1749 if (get_user(opt, (u32 __user *) optval)) {
1750 err = -EFAULT;
1751 break;
1754 if (opt & L2CAP_LM_AUTH)
1755 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1756 if (opt & L2CAP_LM_ENCRYPT)
1757 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1758 if (opt & L2CAP_LM_SECURE)
1759 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1761 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1762 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1763 break;
1765 default:
1766 err = -ENOPROTOOPT;
1767 break;
1770 release_sock(sk);
1771 return err;
1774 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1776 struct sock *sk = sock->sk;
1777 struct bt_security sec;
1778 int len, err = 0;
1779 u32 opt;
1781 BT_DBG("sk %p", sk);
1783 if (level == SOL_L2CAP)
1784 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1786 if (level != SOL_BLUETOOTH)
1787 return -ENOPROTOOPT;
1789 lock_sock(sk);
1791 switch (optname) {
1792 case BT_SECURITY:
1793 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1794 err = -EINVAL;
1795 break;
1798 sec.level = BT_SECURITY_LOW;
1800 len = min_t(unsigned int, sizeof(sec), optlen);
1801 if (copy_from_user((char *) &sec, optval, len)) {
1802 err = -EFAULT;
1803 break;
1806 if (sec.level < BT_SECURITY_LOW ||
1807 sec.level > BT_SECURITY_HIGH) {
1808 err = -EINVAL;
1809 break;
1812 l2cap_pi(sk)->sec_level = sec.level;
1813 break;
1815 case BT_DEFER_SETUP:
1816 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1817 err = -EINVAL;
1818 break;
1821 if (get_user(opt, (u32 __user *) optval)) {
1822 err = -EFAULT;
1823 break;
1826 bt_sk(sk)->defer_setup = opt;
1827 break;
1829 default:
1830 err = -ENOPROTOOPT;
1831 break;
1834 release_sock(sk);
1835 return err;
1838 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1840 struct sock *sk = sock->sk;
1841 struct l2cap_options opts;
1842 struct l2cap_conninfo cinfo;
1843 int len, err = 0;
1844 u32 opt;
1846 BT_DBG("sk %p", sk);
1848 if (get_user(len, optlen))
1849 return -EFAULT;
1851 lock_sock(sk);
1853 switch (optname) {
1854 case L2CAP_OPTIONS:
1855 opts.imtu = l2cap_pi(sk)->imtu;
1856 opts.omtu = l2cap_pi(sk)->omtu;
1857 opts.flush_to = l2cap_pi(sk)->flush_to;
1858 opts.mode = l2cap_pi(sk)->mode;
1859 opts.fcs = l2cap_pi(sk)->fcs;
1861 len = min_t(unsigned int, len, sizeof(opts));
1862 if (copy_to_user(optval, (char *) &opts, len))
1863 err = -EFAULT;
1865 break;
1867 case L2CAP_LM:
1868 switch (l2cap_pi(sk)->sec_level) {
1869 case BT_SECURITY_LOW:
1870 opt = L2CAP_LM_AUTH;
1871 break;
1872 case BT_SECURITY_MEDIUM:
1873 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1874 break;
1875 case BT_SECURITY_HIGH:
1876 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1877 L2CAP_LM_SECURE;
1878 break;
1879 default:
1880 opt = 0;
1881 break;
1884 if (l2cap_pi(sk)->role_switch)
1885 opt |= L2CAP_LM_MASTER;
1887 if (l2cap_pi(sk)->force_reliable)
1888 opt |= L2CAP_LM_RELIABLE;
1890 if (put_user(opt, (u32 __user *) optval))
1891 err = -EFAULT;
1892 break;
1894 case L2CAP_CONNINFO:
1895 if (sk->sk_state != BT_CONNECTED &&
1896 !(sk->sk_state == BT_CONNECT2 &&
1897 bt_sk(sk)->defer_setup)) {
1898 err = -ENOTCONN;
1899 break;
1902 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1903 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1905 len = min_t(unsigned int, len, sizeof(cinfo));
1906 if (copy_to_user(optval, (char *) &cinfo, len))
1907 err = -EFAULT;
1909 break;
1911 default:
1912 err = -ENOPROTOOPT;
1913 break;
1916 release_sock(sk);
1917 return err;
1920 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1922 struct sock *sk = sock->sk;
1923 struct bt_security sec;
1924 int len, err = 0;
1926 BT_DBG("sk %p", sk);
1928 if (level == SOL_L2CAP)
1929 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1931 if (level != SOL_BLUETOOTH)
1932 return -ENOPROTOOPT;
1934 if (get_user(len, optlen))
1935 return -EFAULT;
1937 lock_sock(sk);
1939 switch (optname) {
1940 case BT_SECURITY:
1941 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1942 err = -EINVAL;
1943 break;
1946 sec.level = l2cap_pi(sk)->sec_level;
1948 len = min_t(unsigned int, len, sizeof(sec));
1949 if (copy_to_user(optval, (char *) &sec, len))
1950 err = -EFAULT;
1952 break;
1954 case BT_DEFER_SETUP:
1955 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1956 err = -EINVAL;
1957 break;
1960 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1961 err = -EFAULT;
1963 break;
1965 default:
1966 err = -ENOPROTOOPT;
1967 break;
1970 release_sock(sk);
1971 return err;
1974 static int l2cap_sock_shutdown(struct socket *sock, int how)
1976 struct sock *sk = sock->sk;
1977 int err = 0;
1979 BT_DBG("sock %p, sk %p", sock, sk);
1981 if (!sk)
1982 return 0;
1984 lock_sock(sk);
1985 if (!sk->sk_shutdown) {
1986 sk->sk_shutdown = SHUTDOWN_MASK;
1987 l2cap_sock_clear_timer(sk);
1988 __l2cap_sock_close(sk, 0);
1990 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1991 err = bt_sock_wait_state(sk, BT_CLOSED,
1992 sk->sk_lingertime);
1994 release_sock(sk);
1995 return err;
1998 static int l2cap_sock_release(struct socket *sock)
2000 struct sock *sk = sock->sk;
2001 int err;
2003 BT_DBG("sock %p, sk %p", sock, sk);
2005 if (!sk)
2006 return 0;
2008 err = l2cap_sock_shutdown(sock, 2);
2010 sock_orphan(sk);
2011 l2cap_sock_kill(sk);
2012 return err;
2015 static void l2cap_chan_ready(struct sock *sk)
2017 struct sock *parent = bt_sk(sk)->parent;
2019 BT_DBG("sk %p, parent %p", sk, parent);
2021 l2cap_pi(sk)->conf_state = 0;
2022 l2cap_sock_clear_timer(sk);
2024 if (!parent) {
2025 /* Outgoing channel.
2026 * Wake up socket sleeping on connect.
2028 sk->sk_state = BT_CONNECTED;
2029 sk->sk_state_change(sk);
2030 } else {
2031 /* Incoming channel.
2032 * Wake up socket sleeping on accept.
2034 parent->sk_data_ready(parent, 0);
2038 /* Copy frame to all raw sockets on that connection */
2039 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2041 struct l2cap_chan_list *l = &conn->chan_list;
2042 struct sk_buff *nskb;
2043 struct sock *sk;
2045 BT_DBG("conn %p", conn);
2047 read_lock(&l->lock);
2048 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2049 if (sk->sk_type != SOCK_RAW)
2050 continue;
2052 /* Don't send frame to the socket it came from */
2053 if (skb->sk == sk)
2054 continue;
2055 nskb = skb_clone(skb, GFP_ATOMIC);
2056 if (!nskb)
2057 continue;
2059 if (sock_queue_rcv_skb(sk, nskb))
2060 kfree_skb(nskb);
2062 read_unlock(&l->lock);
2065 /* ---- L2CAP signalling commands ---- */
2066 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2067 u8 code, u8 ident, u16 dlen, void *data)
2069 struct sk_buff *skb, **frag;
2070 struct l2cap_cmd_hdr *cmd;
2071 struct l2cap_hdr *lh;
2072 int len, count;
2074 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2075 conn, code, ident, dlen);
2077 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2078 count = min_t(unsigned int, conn->mtu, len);
2080 skb = bt_skb_alloc(count, GFP_ATOMIC);
2081 if (!skb)
2082 return NULL;
2084 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2085 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2086 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2088 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2089 cmd->code = code;
2090 cmd->ident = ident;
2091 cmd->len = cpu_to_le16(dlen);
2093 if (dlen) {
2094 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2095 memcpy(skb_put(skb, count), data, count);
2096 data += count;
2099 len -= skb->len;
2101 /* Continuation fragments (no L2CAP header) */
2102 frag = &skb_shinfo(skb)->frag_list;
2103 while (len) {
2104 count = min_t(unsigned int, conn->mtu, len);
2106 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2107 if (!*frag)
2108 goto fail;
2110 memcpy(skb_put(*frag, count), data, count);
2112 len -= count;
2113 data += count;
2115 frag = &(*frag)->next;
2118 return skb;
2120 fail:
2121 kfree_skb(skb);
2122 return NULL;
2125 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2127 struct l2cap_conf_opt *opt = *ptr;
2128 int len;
2130 len = L2CAP_CONF_OPT_SIZE + opt->len;
2131 *ptr += len;
2133 *type = opt->type;
2134 *olen = opt->len;
2136 switch (opt->len) {
2137 case 1:
2138 *val = *((u8 *) opt->val);
2139 break;
2141 case 2:
2142 *val = __le16_to_cpu(*((__le16 *) opt->val));
2143 break;
2145 case 4:
2146 *val = __le32_to_cpu(*((__le32 *) opt->val));
2147 break;
2149 default:
2150 *val = (unsigned long) opt->val;
2151 break;
2154 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2155 return len;
2158 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2160 struct l2cap_conf_opt *opt = *ptr;
2162 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2164 opt->type = type;
2165 opt->len = len;
2167 switch (len) {
2168 case 1:
2169 *((u8 *) opt->val) = val;
2170 break;
2172 case 2:
2173 *((__le16 *) opt->val) = cpu_to_le16(val);
2174 break;
2176 case 4:
2177 *((__le32 *) opt->val) = cpu_to_le32(val);
2178 break;
2180 default:
2181 memcpy(opt->val, (void *) val, len);
2182 break;
2185 *ptr += L2CAP_CONF_OPT_SIZE + len;
2188 static inline void l2cap_ertm_init(struct sock *sk)
2190 l2cap_pi(sk)->expected_ack_seq = 0;
2191 l2cap_pi(sk)->unacked_frames = 0;
2192 l2cap_pi(sk)->buffer_seq = 0;
2193 l2cap_pi(sk)->num_to_ack = 0;
2195 setup_timer(&l2cap_pi(sk)->retrans_timer,
2196 l2cap_retrans_timeout, (unsigned long) sk);
2197 setup_timer(&l2cap_pi(sk)->monitor_timer,
2198 l2cap_monitor_timeout, (unsigned long) sk);
2200 __skb_queue_head_init(SREJ_QUEUE(sk));
2203 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2205 u32 local_feat_mask = l2cap_feat_mask;
2206 if (enable_ertm)
2207 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2209 switch (mode) {
2210 case L2CAP_MODE_ERTM:
2211 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2212 case L2CAP_MODE_STREAMING:
2213 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2214 default:
2215 return 0x00;
2219 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2221 switch (mode) {
2222 case L2CAP_MODE_STREAMING:
2223 case L2CAP_MODE_ERTM:
2224 if (l2cap_mode_supported(mode, remote_feat_mask))
2225 return mode;
2226 /* fall through */
2227 default:
2228 return L2CAP_MODE_BASIC;
2232 static int l2cap_build_conf_req(struct sock *sk, void *data)
2234 struct l2cap_pinfo *pi = l2cap_pi(sk);
2235 struct l2cap_conf_req *req = data;
2236 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2237 void *ptr = req->data;
2239 BT_DBG("sk %p", sk);
2241 if (pi->num_conf_req || pi->num_conf_rsp)
2242 goto done;
2244 switch (pi->mode) {
2245 case L2CAP_MODE_STREAMING:
2246 case L2CAP_MODE_ERTM:
2247 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2248 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2249 l2cap_send_disconn_req(pi->conn, sk);
2250 break;
2251 default:
2252 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2253 break;
2256 done:
2257 switch (pi->mode) {
2258 case L2CAP_MODE_BASIC:
2259 if (pi->imtu != L2CAP_DEFAULT_MTU)
2260 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2261 break;
2263 case L2CAP_MODE_ERTM:
2264 rfc.mode = L2CAP_MODE_ERTM;
2265 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2266 rfc.max_transmit = max_transmit;
2267 rfc.retrans_timeout = 0;
2268 rfc.monitor_timeout = 0;
2269 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2270 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2271 rfc.max_pdu_size = pi->conn->mtu - 10;
2273 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2274 sizeof(rfc), (unsigned long) &rfc);
2276 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2277 break;
2279 if (pi->fcs == L2CAP_FCS_NONE ||
2280 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2281 pi->fcs = L2CAP_FCS_NONE;
2282 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2284 break;
2286 case L2CAP_MODE_STREAMING:
2287 rfc.mode = L2CAP_MODE_STREAMING;
2288 rfc.txwin_size = 0;
2289 rfc.max_transmit = 0;
2290 rfc.retrans_timeout = 0;
2291 rfc.monitor_timeout = 0;
2292 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2293 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2294 rfc.max_pdu_size = pi->conn->mtu - 10;
2296 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2297 sizeof(rfc), (unsigned long) &rfc);
2299 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2300 break;
2302 if (pi->fcs == L2CAP_FCS_NONE ||
2303 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2304 pi->fcs = L2CAP_FCS_NONE;
2305 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2307 break;
2310 /* FIXME: Need actual value of the flush timeout */
2311 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2312 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2314 req->dcid = cpu_to_le16(pi->dcid);
2315 req->flags = cpu_to_le16(0);
2317 return ptr - data;
2320 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2322 struct l2cap_pinfo *pi = l2cap_pi(sk);
2323 struct l2cap_conf_rsp *rsp = data;
2324 void *ptr = rsp->data;
2325 void *req = pi->conf_req;
2326 int len = pi->conf_len;
2327 int type, hint, olen;
2328 unsigned long val;
2329 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2330 u16 mtu = L2CAP_DEFAULT_MTU;
2331 u16 result = L2CAP_CONF_SUCCESS;
2333 BT_DBG("sk %p", sk);
2335 while (len >= L2CAP_CONF_OPT_SIZE) {
2336 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2338 hint = type & L2CAP_CONF_HINT;
2339 type &= L2CAP_CONF_MASK;
2341 switch (type) {
2342 case L2CAP_CONF_MTU:
2343 mtu = val;
2344 break;
2346 case L2CAP_CONF_FLUSH_TO:
2347 pi->flush_to = val;
2348 break;
2350 case L2CAP_CONF_QOS:
2351 break;
2353 case L2CAP_CONF_RFC:
2354 if (olen == sizeof(rfc))
2355 memcpy(&rfc, (void *) val, olen);
2356 break;
2358 case L2CAP_CONF_FCS:
2359 if (val == L2CAP_FCS_NONE)
2360 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2362 break;
2364 default:
2365 if (hint)
2366 break;
2368 result = L2CAP_CONF_UNKNOWN;
2369 *((u8 *) ptr++) = type;
2370 break;
2374 if (pi->num_conf_rsp || pi->num_conf_req)
2375 goto done;
2377 switch (pi->mode) {
2378 case L2CAP_MODE_STREAMING:
2379 case L2CAP_MODE_ERTM:
2380 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2381 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2382 return -ECONNREFUSED;
2383 break;
2384 default:
2385 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2386 break;
2389 done:
2390 if (pi->mode != rfc.mode) {
2391 result = L2CAP_CONF_UNACCEPT;
2392 rfc.mode = pi->mode;
2394 if (pi->num_conf_rsp == 1)
2395 return -ECONNREFUSED;
2397 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2398 sizeof(rfc), (unsigned long) &rfc);
2402 if (result == L2CAP_CONF_SUCCESS) {
2403 /* Configure output options and let the other side know
2404 * which ones we don't like. */
2406 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2407 result = L2CAP_CONF_UNACCEPT;
2408 else {
2409 pi->omtu = mtu;
2410 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2412 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2414 switch (rfc.mode) {
2415 case L2CAP_MODE_BASIC:
2416 pi->fcs = L2CAP_FCS_NONE;
2417 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2418 break;
2420 case L2CAP_MODE_ERTM:
2421 pi->remote_tx_win = rfc.txwin_size;
2422 pi->remote_max_tx = rfc.max_transmit;
2423 pi->max_pdu_size = rfc.max_pdu_size;
2425 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2426 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2428 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2431 sizeof(rfc), (unsigned long) &rfc);
2433 break;
2435 case L2CAP_MODE_STREAMING:
2436 pi->remote_tx_win = rfc.txwin_size;
2437 pi->max_pdu_size = rfc.max_pdu_size;
2439 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2441 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2442 sizeof(rfc), (unsigned long) &rfc);
2444 break;
2446 default:
2447 result = L2CAP_CONF_UNACCEPT;
2449 memset(&rfc, 0, sizeof(rfc));
2450 rfc.mode = pi->mode;
2453 if (result == L2CAP_CONF_SUCCESS)
2454 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2456 rsp->scid = cpu_to_le16(pi->dcid);
2457 rsp->result = cpu_to_le16(result);
2458 rsp->flags = cpu_to_le16(0x0000);
2460 return ptr - data;
2463 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2465 struct l2cap_pinfo *pi = l2cap_pi(sk);
2466 struct l2cap_conf_req *req = data;
2467 void *ptr = req->data;
2468 int type, olen;
2469 unsigned long val;
2470 struct l2cap_conf_rfc rfc;
2472 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2474 while (len >= L2CAP_CONF_OPT_SIZE) {
2475 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2477 switch (type) {
2478 case L2CAP_CONF_MTU:
2479 if (val < L2CAP_DEFAULT_MIN_MTU) {
2480 *result = L2CAP_CONF_UNACCEPT;
2481 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2482 } else
2483 pi->omtu = val;
2484 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2485 break;
2487 case L2CAP_CONF_FLUSH_TO:
2488 pi->flush_to = val;
2489 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2490 2, pi->flush_to);
2491 break;
2493 case L2CAP_CONF_RFC:
2494 if (olen == sizeof(rfc))
2495 memcpy(&rfc, (void *)val, olen);
2497 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2498 rfc.mode != pi->mode)
2499 return -ECONNREFUSED;
2501 pi->mode = rfc.mode;
2502 pi->fcs = 0;
2504 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2505 sizeof(rfc), (unsigned long) &rfc);
2506 break;
2510 if (*result == L2CAP_CONF_SUCCESS) {
2511 switch (rfc.mode) {
2512 case L2CAP_MODE_ERTM:
2513 pi->remote_tx_win = rfc.txwin_size;
2514 pi->retrans_timeout = rfc.retrans_timeout;
2515 pi->monitor_timeout = rfc.monitor_timeout;
2516 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2517 break;
2518 case L2CAP_MODE_STREAMING:
2519 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2520 break;
2524 req->dcid = cpu_to_le16(pi->dcid);
2525 req->flags = cpu_to_le16(0x0000);
2527 return ptr - data;
2530 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2532 struct l2cap_conf_rsp *rsp = data;
2533 void *ptr = rsp->data;
2535 BT_DBG("sk %p", sk);
2537 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2538 rsp->result = cpu_to_le16(result);
2539 rsp->flags = cpu_to_le16(flags);
2541 return ptr - data;
2544 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2546 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2548 if (rej->reason != 0x0000)
2549 return 0;
2551 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2552 cmd->ident == conn->info_ident) {
2553 del_timer(&conn->info_timer);
2555 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2556 conn->info_ident = 0;
2558 l2cap_conn_start(conn);
2561 return 0;
2564 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2566 struct l2cap_chan_list *list = &conn->chan_list;
2567 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2568 struct l2cap_conn_rsp rsp;
2569 struct sock *sk, *parent;
2570 int result, status = L2CAP_CS_NO_INFO;
2572 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2573 __le16 psm = req->psm;
2575 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2577 /* Check if we have socket listening on psm */
2578 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2579 if (!parent) {
2580 result = L2CAP_CR_BAD_PSM;
2581 goto sendresp;
2584 /* Check if the ACL is secure enough (if not SDP) */
2585 if (psm != cpu_to_le16(0x0001) &&
2586 !hci_conn_check_link_mode(conn->hcon)) {
2587 conn->disc_reason = 0x05;
2588 result = L2CAP_CR_SEC_BLOCK;
2589 goto response;
2592 result = L2CAP_CR_NO_MEM;
2594 /* Check for backlog size */
2595 if (sk_acceptq_is_full(parent)) {
2596 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2597 goto response;
2600 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2601 if (!sk)
2602 goto response;
2604 write_lock_bh(&list->lock);
2606 /* Check if we already have channel with that dcid */
2607 if (__l2cap_get_chan_by_dcid(list, scid)) {
2608 write_unlock_bh(&list->lock);
2609 sock_set_flag(sk, SOCK_ZAPPED);
2610 l2cap_sock_kill(sk);
2611 goto response;
2614 hci_conn_hold(conn->hcon);
2616 l2cap_sock_init(sk, parent);
2617 bacpy(&bt_sk(sk)->src, conn->src);
2618 bacpy(&bt_sk(sk)->dst, conn->dst);
2619 l2cap_pi(sk)->psm = psm;
2620 l2cap_pi(sk)->dcid = scid;
2622 __l2cap_chan_add(conn, sk, parent);
2623 dcid = l2cap_pi(sk)->scid;
2625 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2627 l2cap_pi(sk)->ident = cmd->ident;
2629 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2630 if (l2cap_check_security(sk)) {
2631 if (bt_sk(sk)->defer_setup) {
2632 sk->sk_state = BT_CONNECT2;
2633 result = L2CAP_CR_PEND;
2634 status = L2CAP_CS_AUTHOR_PEND;
2635 parent->sk_data_ready(parent, 0);
2636 } else {
2637 sk->sk_state = BT_CONFIG;
2638 result = L2CAP_CR_SUCCESS;
2639 status = L2CAP_CS_NO_INFO;
2641 } else {
2642 sk->sk_state = BT_CONNECT2;
2643 result = L2CAP_CR_PEND;
2644 status = L2CAP_CS_AUTHEN_PEND;
2646 } else {
2647 sk->sk_state = BT_CONNECT2;
2648 result = L2CAP_CR_PEND;
2649 status = L2CAP_CS_NO_INFO;
2652 write_unlock_bh(&list->lock);
2654 response:
2655 bh_unlock_sock(parent);
2657 sendresp:
2658 rsp.scid = cpu_to_le16(scid);
2659 rsp.dcid = cpu_to_le16(dcid);
2660 rsp.result = cpu_to_le16(result);
2661 rsp.status = cpu_to_le16(status);
2662 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2664 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2665 struct l2cap_info_req info;
2666 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2668 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2669 conn->info_ident = l2cap_get_ident(conn);
2671 mod_timer(&conn->info_timer, jiffies +
2672 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2674 l2cap_send_cmd(conn, conn->info_ident,
2675 L2CAP_INFO_REQ, sizeof(info), &info);
2678 return 0;
2681 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2683 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2684 u16 scid, dcid, result, status;
2685 struct sock *sk;
2686 u8 req[128];
2688 scid = __le16_to_cpu(rsp->scid);
2689 dcid = __le16_to_cpu(rsp->dcid);
2690 result = __le16_to_cpu(rsp->result);
2691 status = __le16_to_cpu(rsp->status);
2693 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2695 if (scid) {
2696 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2697 if (!sk)
2698 return 0;
2699 } else {
2700 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2701 if (!sk)
2702 return 0;
2705 switch (result) {
2706 case L2CAP_CR_SUCCESS:
2707 sk->sk_state = BT_CONFIG;
2708 l2cap_pi(sk)->ident = 0;
2709 l2cap_pi(sk)->dcid = dcid;
2710 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2712 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2714 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2715 l2cap_build_conf_req(sk, req), req);
2716 l2cap_pi(sk)->num_conf_req++;
2717 break;
2719 case L2CAP_CR_PEND:
2720 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2721 break;
2723 default:
2724 l2cap_chan_del(sk, ECONNREFUSED);
2725 break;
2728 bh_unlock_sock(sk);
2729 return 0;
2732 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2734 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2735 u16 dcid, flags;
2736 u8 rsp[64];
2737 struct sock *sk;
2738 int len;
2740 dcid = __le16_to_cpu(req->dcid);
2741 flags = __le16_to_cpu(req->flags);
2743 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2745 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2746 if (!sk)
2747 return -ENOENT;
2749 if (sk->sk_state == BT_DISCONN)
2750 goto unlock;
2752 /* Reject if config buffer is too small. */
2753 len = cmd_len - sizeof(*req);
2754 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2755 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2756 l2cap_build_conf_rsp(sk, rsp,
2757 L2CAP_CONF_REJECT, flags), rsp);
2758 goto unlock;
2761 /* Store config. */
2762 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2763 l2cap_pi(sk)->conf_len += len;
2765 if (flags & 0x0001) {
2766 /* Incomplete config. Send empty response. */
2767 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2768 l2cap_build_conf_rsp(sk, rsp,
2769 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2770 goto unlock;
2773 /* Complete config. */
2774 len = l2cap_parse_conf_req(sk, rsp);
2775 if (len < 0) {
2776 l2cap_send_disconn_req(conn, sk);
2777 goto unlock;
2780 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2781 l2cap_pi(sk)->num_conf_rsp++;
2783 /* Reset config buffer. */
2784 l2cap_pi(sk)->conf_len = 0;
2786 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2787 goto unlock;
2789 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2790 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2791 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2792 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2794 sk->sk_state = BT_CONNECTED;
2796 l2cap_pi(sk)->next_tx_seq = 0;
2797 l2cap_pi(sk)->expected_tx_seq = 0;
2798 __skb_queue_head_init(TX_QUEUE(sk));
2799 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2800 l2cap_ertm_init(sk);
2802 l2cap_chan_ready(sk);
2803 goto unlock;
2806 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2807 u8 buf[64];
2808 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2809 l2cap_build_conf_req(sk, buf), buf);
2810 l2cap_pi(sk)->num_conf_req++;
2813 unlock:
2814 bh_unlock_sock(sk);
2815 return 0;
2818 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2820 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2821 u16 scid, flags, result;
2822 struct sock *sk;
2824 scid = __le16_to_cpu(rsp->scid);
2825 flags = __le16_to_cpu(rsp->flags);
2826 result = __le16_to_cpu(rsp->result);
2828 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2829 scid, flags, result);
2831 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2832 if (!sk)
2833 return 0;
2835 switch (result) {
2836 case L2CAP_CONF_SUCCESS:
2837 break;
2839 case L2CAP_CONF_UNACCEPT:
2840 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2841 int len = cmd->len - sizeof(*rsp);
2842 char req[64];
2844 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2845 l2cap_send_disconn_req(conn, sk);
2846 goto done;
2849 /* throw out any old stored conf requests */
2850 result = L2CAP_CONF_SUCCESS;
2851 len = l2cap_parse_conf_rsp(sk, rsp->data,
2852 len, req, &result);
2853 if (len < 0) {
2854 l2cap_send_disconn_req(conn, sk);
2855 goto done;
2858 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2859 L2CAP_CONF_REQ, len, req);
2860 l2cap_pi(sk)->num_conf_req++;
2861 if (result != L2CAP_CONF_SUCCESS)
2862 goto done;
2863 break;
2866 default:
2867 sk->sk_state = BT_DISCONN;
2868 sk->sk_err = ECONNRESET;
2869 l2cap_sock_set_timer(sk, HZ * 5);
2870 l2cap_send_disconn_req(conn, sk);
2871 goto done;
2874 if (flags & 0x01)
2875 goto done;
2877 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2879 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2880 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2881 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2882 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2884 sk->sk_state = BT_CONNECTED;
2885 l2cap_pi(sk)->next_tx_seq = 0;
2886 l2cap_pi(sk)->expected_tx_seq = 0;
2887 __skb_queue_head_init(TX_QUEUE(sk));
2888 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2889 l2cap_ertm_init(sk);
2891 l2cap_chan_ready(sk);
2894 done:
2895 bh_unlock_sock(sk);
2896 return 0;
2899 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2901 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2902 struct l2cap_disconn_rsp rsp;
2903 u16 dcid, scid;
2904 struct sock *sk;
2906 scid = __le16_to_cpu(req->scid);
2907 dcid = __le16_to_cpu(req->dcid);
2909 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2911 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2912 if (!sk)
2913 return 0;
2915 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2916 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2917 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2919 sk->sk_shutdown = SHUTDOWN_MASK;
2921 skb_queue_purge(TX_QUEUE(sk));
2923 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2924 skb_queue_purge(SREJ_QUEUE(sk));
2925 del_timer(&l2cap_pi(sk)->retrans_timer);
2926 del_timer(&l2cap_pi(sk)->monitor_timer);
2929 l2cap_chan_del(sk, ECONNRESET);
2930 bh_unlock_sock(sk);
2932 l2cap_sock_kill(sk);
2933 return 0;
2936 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2938 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2939 u16 dcid, scid;
2940 struct sock *sk;
2942 scid = __le16_to_cpu(rsp->scid);
2943 dcid = __le16_to_cpu(rsp->dcid);
2945 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2947 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2948 if (!sk)
2949 return 0;
2951 skb_queue_purge(TX_QUEUE(sk));
2953 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2954 skb_queue_purge(SREJ_QUEUE(sk));
2955 del_timer(&l2cap_pi(sk)->retrans_timer);
2956 del_timer(&l2cap_pi(sk)->monitor_timer);
2959 l2cap_chan_del(sk, 0);
2960 bh_unlock_sock(sk);
2962 l2cap_sock_kill(sk);
2963 return 0;
2966 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2968 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2969 u16 type;
2971 type = __le16_to_cpu(req->type);
2973 BT_DBG("type 0x%4.4x", type);
2975 if (type == L2CAP_IT_FEAT_MASK) {
2976 u8 buf[8];
2977 u32 feat_mask = l2cap_feat_mask;
2978 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2979 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2980 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2981 if (enable_ertm)
2982 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2983 | L2CAP_FEAT_FCS;
2984 put_unaligned_le32(feat_mask, rsp->data);
2985 l2cap_send_cmd(conn, cmd->ident,
2986 L2CAP_INFO_RSP, sizeof(buf), buf);
2987 } else if (type == L2CAP_IT_FIXED_CHAN) {
2988 u8 buf[12];
2989 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2990 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2991 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2992 memcpy(buf + 4, l2cap_fixed_chan, 8);
2993 l2cap_send_cmd(conn, cmd->ident,
2994 L2CAP_INFO_RSP, sizeof(buf), buf);
2995 } else {
2996 struct l2cap_info_rsp rsp;
2997 rsp.type = cpu_to_le16(type);
2998 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2999 l2cap_send_cmd(conn, cmd->ident,
3000 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3003 return 0;
3006 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3008 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3009 u16 type, result;
3011 type = __le16_to_cpu(rsp->type);
3012 result = __le16_to_cpu(rsp->result);
3014 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3016 del_timer(&conn->info_timer);
3018 if (type == L2CAP_IT_FEAT_MASK) {
3019 conn->feat_mask = get_unaligned_le32(rsp->data);
3021 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3022 struct l2cap_info_req req;
3023 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3025 conn->info_ident = l2cap_get_ident(conn);
3027 l2cap_send_cmd(conn, conn->info_ident,
3028 L2CAP_INFO_REQ, sizeof(req), &req);
3029 } else {
3030 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3031 conn->info_ident = 0;
3033 l2cap_conn_start(conn);
3035 } else if (type == L2CAP_IT_FIXED_CHAN) {
3036 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3037 conn->info_ident = 0;
3039 l2cap_conn_start(conn);
3042 return 0;
3045 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3047 u8 *data = skb->data;
3048 int len = skb->len;
3049 struct l2cap_cmd_hdr cmd;
3050 int err = 0;
3052 l2cap_raw_recv(conn, skb);
3054 while (len >= L2CAP_CMD_HDR_SIZE) {
3055 u16 cmd_len;
3056 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3057 data += L2CAP_CMD_HDR_SIZE;
3058 len -= L2CAP_CMD_HDR_SIZE;
3060 cmd_len = le16_to_cpu(cmd.len);
3062 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3064 if (cmd_len > len || !cmd.ident) {
3065 BT_DBG("corrupted command");
3066 break;
3069 switch (cmd.code) {
3070 case L2CAP_COMMAND_REJ:
3071 l2cap_command_rej(conn, &cmd, data);
3072 break;
3074 case L2CAP_CONN_REQ:
3075 err = l2cap_connect_req(conn, &cmd, data);
3076 break;
3078 case L2CAP_CONN_RSP:
3079 err = l2cap_connect_rsp(conn, &cmd, data);
3080 break;
3082 case L2CAP_CONF_REQ:
3083 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3084 break;
3086 case L2CAP_CONF_RSP:
3087 err = l2cap_config_rsp(conn, &cmd, data);
3088 break;
3090 case L2CAP_DISCONN_REQ:
3091 err = l2cap_disconnect_req(conn, &cmd, data);
3092 break;
3094 case L2CAP_DISCONN_RSP:
3095 err = l2cap_disconnect_rsp(conn, &cmd, data);
3096 break;
3098 case L2CAP_ECHO_REQ:
3099 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3100 break;
3102 case L2CAP_ECHO_RSP:
3103 break;
3105 case L2CAP_INFO_REQ:
3106 err = l2cap_information_req(conn, &cmd, data);
3107 break;
3109 case L2CAP_INFO_RSP:
3110 err = l2cap_information_rsp(conn, &cmd, data);
3111 break;
3113 default:
3114 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3115 err = -EINVAL;
3116 break;
3119 if (err) {
3120 struct l2cap_cmd_rej rej;
3121 BT_DBG("error %d", err);
3123 /* FIXME: Map err to a valid reason */
3124 rej.reason = cpu_to_le16(0);
3125 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3128 data += cmd_len;
3129 len -= cmd_len;
3132 kfree_skb(skb);
3135 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3137 u16 our_fcs, rcv_fcs;
3138 int hdr_size = L2CAP_HDR_SIZE + 2;
3140 if (pi->fcs == L2CAP_FCS_CRC16) {
3141 skb_trim(skb, skb->len - 2);
3142 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3143 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3145 if (our_fcs != rcv_fcs)
3146 return -EINVAL;
3148 return 0;
3151 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3153 struct sk_buff *next_skb;
3155 bt_cb(skb)->tx_seq = tx_seq;
3156 bt_cb(skb)->sar = sar;
3158 next_skb = skb_peek(SREJ_QUEUE(sk));
3159 if (!next_skb) {
3160 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3161 return;
3164 do {
3165 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3166 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3167 return;
3170 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3171 break;
3173 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3175 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3178 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3180 struct l2cap_pinfo *pi = l2cap_pi(sk);
3181 struct sk_buff *_skb;
3182 int err = -EINVAL;
3184 switch (control & L2CAP_CTRL_SAR) {
3185 case L2CAP_SDU_UNSEGMENTED:
3186 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3187 kfree_skb(pi->sdu);
3188 break;
3191 err = sock_queue_rcv_skb(sk, skb);
3192 if (!err)
3193 return 0;
3195 break;
3197 case L2CAP_SDU_START:
3198 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3199 kfree_skb(pi->sdu);
3200 break;
3203 pi->sdu_len = get_unaligned_le16(skb->data);
3204 skb_pull(skb, 2);
3206 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3207 if (!pi->sdu) {
3208 err = -ENOMEM;
3209 break;
3212 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3214 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3215 pi->partial_sdu_len = skb->len;
3216 err = 0;
3217 break;
3219 case L2CAP_SDU_CONTINUE:
3220 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3221 break;
3223 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3225 pi->partial_sdu_len += skb->len;
3226 if (pi->partial_sdu_len > pi->sdu_len)
3227 kfree_skb(pi->sdu);
3228 else
3229 err = 0;
3231 break;
3233 case L2CAP_SDU_END:
3234 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3235 break;
3237 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3239 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3240 pi->partial_sdu_len += skb->len;
3242 if (pi->partial_sdu_len == pi->sdu_len) {
3243 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3244 err = sock_queue_rcv_skb(sk, _skb);
3245 if (err < 0)
3246 kfree_skb(_skb);
3248 kfree_skb(pi->sdu);
3249 err = 0;
3251 break;
3254 kfree_skb(skb);
3255 return err;
3258 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3260 struct sk_buff *skb;
3261 u16 control = 0;
3263 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3264 if (bt_cb(skb)->tx_seq != tx_seq)
3265 break;
3267 skb = skb_dequeue(SREJ_QUEUE(sk));
3268 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3269 l2cap_sar_reassembly_sdu(sk, skb, control);
3270 l2cap_pi(sk)->buffer_seq_srej =
3271 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3272 tx_seq++;
3276 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3278 struct l2cap_pinfo *pi = l2cap_pi(sk);
3279 struct srej_list *l, *tmp;
3280 u16 control;
3282 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3283 if (l->tx_seq == tx_seq) {
3284 list_del(&l->list);
3285 kfree(l);
3286 return;
3288 control = L2CAP_SUPER_SELECT_REJECT;
3289 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3290 l2cap_send_sframe(pi, control);
3291 list_del(&l->list);
3292 list_add_tail(&l->list, SREJ_LIST(sk));
3296 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3298 struct l2cap_pinfo *pi = l2cap_pi(sk);
3299 struct srej_list *new;
3300 u16 control;
3302 while (tx_seq != pi->expected_tx_seq) {
3303 control = L2CAP_SUPER_SELECT_REJECT;
3304 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3305 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3306 control |= L2CAP_CTRL_POLL;
3307 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3309 l2cap_send_sframe(pi, control);
3311 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3312 new->tx_seq = pi->expected_tx_seq++;
3313 list_add_tail(&new->list, SREJ_LIST(sk));
3315 pi->expected_tx_seq++;
3318 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3320 struct l2cap_pinfo *pi = l2cap_pi(sk);
3321 u8 tx_seq = __get_txseq(rx_control);
3322 u8 req_seq = __get_reqseq(rx_control);
3323 u16 tx_control = 0;
3324 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3325 int err = 0;
3327 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3329 pi->expected_ack_seq = req_seq;
3330 l2cap_drop_acked_frames(sk);
3332 if (tx_seq == pi->expected_tx_seq)
3333 goto expected;
3335 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3336 struct srej_list *first;
3338 first = list_first_entry(SREJ_LIST(sk),
3339 struct srej_list, list);
3340 if (tx_seq == first->tx_seq) {
3341 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3342 l2cap_check_srej_gap(sk, tx_seq);
3344 list_del(&first->list);
3345 kfree(first);
3347 if (list_empty(SREJ_LIST(sk))) {
3348 pi->buffer_seq = pi->buffer_seq_srej;
3349 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3351 } else {
3352 struct srej_list *l;
3353 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3355 list_for_each_entry(l, SREJ_LIST(sk), list) {
3356 if (l->tx_seq == tx_seq) {
3357 l2cap_resend_srejframe(sk, tx_seq);
3358 return 0;
3361 l2cap_send_srejframe(sk, tx_seq);
3363 } else {
3364 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3366 INIT_LIST_HEAD(SREJ_LIST(sk));
3367 pi->buffer_seq_srej = pi->buffer_seq;
3369 __skb_queue_head_init(SREJ_QUEUE(sk));
3370 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3372 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3374 l2cap_send_srejframe(sk, tx_seq);
3376 return 0;
3378 expected:
3379 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3381 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3382 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3383 return 0;
3386 if (rx_control & L2CAP_CTRL_FINAL) {
3387 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3388 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3389 else {
3390 sk->sk_send_head = TX_QUEUE(sk)->next;
3391 pi->next_tx_seq = pi->expected_ack_seq;
3392 l2cap_ertm_send(sk);
3396 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3398 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3399 if (err < 0)
3400 return err;
3402 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3403 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3404 tx_control |= L2CAP_SUPER_RCV_READY;
3405 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3406 l2cap_send_sframe(pi, tx_control);
3408 return 0;
3411 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3413 struct l2cap_pinfo *pi = l2cap_pi(sk);
3414 u8 tx_seq = __get_reqseq(rx_control);
3416 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3418 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3419 case L2CAP_SUPER_RCV_READY:
3420 if (rx_control & L2CAP_CTRL_POLL) {
3421 u16 control = L2CAP_CTRL_FINAL;
3422 control |= L2CAP_SUPER_RCV_READY |
3423 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3424 l2cap_send_sframe(l2cap_pi(sk), control);
3425 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3427 } else if (rx_control & L2CAP_CTRL_FINAL) {
3428 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3429 pi->expected_ack_seq = tx_seq;
3430 l2cap_drop_acked_frames(sk);
3432 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3433 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3434 else {
3435 sk->sk_send_head = TX_QUEUE(sk)->next;
3436 pi->next_tx_seq = pi->expected_ack_seq;
3437 l2cap_ertm_send(sk);
3440 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3441 break;
3443 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3444 del_timer(&pi->monitor_timer);
3446 if (pi->unacked_frames > 0)
3447 __mod_retrans_timer();
3448 } else {
3449 pi->expected_ack_seq = tx_seq;
3450 l2cap_drop_acked_frames(sk);
3452 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3453 (pi->unacked_frames > 0))
3454 __mod_retrans_timer();
3456 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3457 l2cap_ertm_send(sk);
3459 break;
3461 case L2CAP_SUPER_REJECT:
3462 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3464 pi->expected_ack_seq = __get_reqseq(rx_control);
3465 l2cap_drop_acked_frames(sk);
3467 if (rx_control & L2CAP_CTRL_FINAL) {
3468 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3469 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3470 else {
3471 sk->sk_send_head = TX_QUEUE(sk)->next;
3472 pi->next_tx_seq = pi->expected_ack_seq;
3473 l2cap_ertm_send(sk);
3475 } else {
3476 sk->sk_send_head = TX_QUEUE(sk)->next;
3477 pi->next_tx_seq = pi->expected_ack_seq;
3478 l2cap_ertm_send(sk);
3480 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3481 pi->srej_save_reqseq = tx_seq;
3482 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3486 break;
3488 case L2CAP_SUPER_SELECT_REJECT:
3489 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3491 if (rx_control & L2CAP_CTRL_POLL) {
3492 pi->expected_ack_seq = tx_seq;
3493 l2cap_drop_acked_frames(sk);
3494 l2cap_retransmit_frame(sk, tx_seq);
3495 l2cap_ertm_send(sk);
3496 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3497 pi->srej_save_reqseq = tx_seq;
3498 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3500 } else if (rx_control & L2CAP_CTRL_FINAL) {
3501 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3502 pi->srej_save_reqseq == tx_seq)
3503 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3504 else
3505 l2cap_retransmit_frame(sk, tx_seq);
3507 else {
3508 l2cap_retransmit_frame(sk, tx_seq);
3509 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3510 pi->srej_save_reqseq = tx_seq;
3511 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3514 break;
3516 case L2CAP_SUPER_RCV_NOT_READY:
3517 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3518 pi->expected_ack_seq = tx_seq;
3519 l2cap_drop_acked_frames(sk);
3521 del_timer(&l2cap_pi(sk)->retrans_timer);
3522 if (rx_control & L2CAP_CTRL_POLL) {
3523 u16 control = L2CAP_CTRL_FINAL;
3524 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3526 break;
3529 kfree_skb(skb);
3530 return 0;
3533 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3535 struct sock *sk;
3536 struct l2cap_pinfo *pi;
3537 u16 control, len;
3538 u8 tx_seq;
3540 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3541 if (!sk) {
3542 BT_DBG("unknown cid 0x%4.4x", cid);
3543 goto drop;
3546 pi = l2cap_pi(sk);
3548 BT_DBG("sk %p, len %d", sk, skb->len);
3550 if (sk->sk_state != BT_CONNECTED)
3551 goto drop;
3553 switch (pi->mode) {
3554 case L2CAP_MODE_BASIC:
3555 /* If socket recv buffers overflows we drop data here
3556 * which is *bad* because L2CAP has to be reliable.
3557 * But we don't have any other choice. L2CAP doesn't
3558 * provide flow control mechanism. */
3560 if (pi->imtu < skb->len)
3561 goto drop;
3563 if (!sock_queue_rcv_skb(sk, skb))
3564 goto done;
3565 break;
3567 case L2CAP_MODE_ERTM:
3568 control = get_unaligned_le16(skb->data);
3569 skb_pull(skb, 2);
3570 len = skb->len;
3572 if (__is_sar_start(control))
3573 len -= 2;
3575 if (pi->fcs == L2CAP_FCS_CRC16)
3576 len -= 2;
3579 * We can just drop the corrupted I-frame here.
3580 * Receiver will miss it and start proper recovery
3581 * procedures and ask retransmission.
3583 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3584 goto drop;
3586 if (l2cap_check_fcs(pi, skb))
3587 goto drop;
3589 if (__is_iframe(control))
3590 l2cap_data_channel_iframe(sk, control, skb);
3591 else
3592 l2cap_data_channel_sframe(sk, control, skb);
3594 goto done;
3596 case L2CAP_MODE_STREAMING:
3597 control = get_unaligned_le16(skb->data);
3598 skb_pull(skb, 2);
3599 len = skb->len;
3601 if (__is_sar_start(control))
3602 len -= 2;
3604 if (pi->fcs == L2CAP_FCS_CRC16)
3605 len -= 2;
3607 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3608 goto drop;
3610 if (l2cap_check_fcs(pi, skb))
3611 goto drop;
3613 tx_seq = __get_txseq(control);
3615 if (pi->expected_tx_seq == tx_seq)
3616 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3617 else
3618 pi->expected_tx_seq = (tx_seq + 1) % 64;
3620 l2cap_sar_reassembly_sdu(sk, skb, control);
3622 goto done;
3624 default:
3625 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3626 break;
3629 drop:
3630 kfree_skb(skb);
3632 done:
3633 if (sk)
3634 bh_unlock_sock(sk);
3636 return 0;
3639 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3641 struct sock *sk;
3643 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3644 if (!sk)
3645 goto drop;
3647 BT_DBG("sk %p, len %d", sk, skb->len);
3649 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3650 goto drop;
3652 if (l2cap_pi(sk)->imtu < skb->len)
3653 goto drop;
3655 if (!sock_queue_rcv_skb(sk, skb))
3656 goto done;
3658 drop:
3659 kfree_skb(skb);
3661 done:
3662 if (sk)
3663 bh_unlock_sock(sk);
3664 return 0;
3667 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3669 struct l2cap_hdr *lh = (void *) skb->data;
3670 u16 cid, len;
3671 __le16 psm;
3673 skb_pull(skb, L2CAP_HDR_SIZE);
3674 cid = __le16_to_cpu(lh->cid);
3675 len = __le16_to_cpu(lh->len);
3677 if (len != skb->len) {
3678 kfree_skb(skb);
3679 return;
3682 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3684 switch (cid) {
3685 case L2CAP_CID_SIGNALING:
3686 l2cap_sig_channel(conn, skb);
3687 break;
3689 case L2CAP_CID_CONN_LESS:
3690 psm = get_unaligned_le16(skb->data);
3691 skb_pull(skb, 2);
3692 l2cap_conless_channel(conn, psm, skb);
3693 break;
3695 default:
3696 l2cap_data_channel(conn, cid, skb);
3697 break;
3701 /* ---- L2CAP interface with lower layer (HCI) ---- */
3703 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3705 int exact = 0, lm1 = 0, lm2 = 0;
3706 register struct sock *sk;
3707 struct hlist_node *node;
3709 if (type != ACL_LINK)
3710 return 0;
3712 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3714 /* Find listening sockets and check their link_mode */
3715 read_lock(&l2cap_sk_list.lock);
3716 sk_for_each(sk, node, &l2cap_sk_list.head) {
3717 if (sk->sk_state != BT_LISTEN)
3718 continue;
3720 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3721 lm1 |= HCI_LM_ACCEPT;
3722 if (l2cap_pi(sk)->role_switch)
3723 lm1 |= HCI_LM_MASTER;
3724 exact++;
3725 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3726 lm2 |= HCI_LM_ACCEPT;
3727 if (l2cap_pi(sk)->role_switch)
3728 lm2 |= HCI_LM_MASTER;
3731 read_unlock(&l2cap_sk_list.lock);
3733 return exact ? lm1 : lm2;
3736 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3738 struct l2cap_conn *conn;
3740 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3742 if (hcon->type != ACL_LINK)
3743 return 0;
3745 if (!status) {
3746 conn = l2cap_conn_add(hcon, status);
3747 if (conn)
3748 l2cap_conn_ready(conn);
3749 } else
3750 l2cap_conn_del(hcon, bt_err(status));
3752 return 0;
3755 static int l2cap_disconn_ind(struct hci_conn *hcon)
3757 struct l2cap_conn *conn = hcon->l2cap_data;
3759 BT_DBG("hcon %p", hcon);
3761 if (hcon->type != ACL_LINK || !conn)
3762 return 0x13;
3764 return conn->disc_reason;
3767 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3769 BT_DBG("hcon %p reason %d", hcon, reason);
3771 if (hcon->type != ACL_LINK)
3772 return 0;
3774 l2cap_conn_del(hcon, bt_err(reason));
3776 return 0;
3779 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3781 if (sk->sk_type != SOCK_SEQPACKET)
3782 return;
3784 if (encrypt == 0x00) {
3785 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3786 l2cap_sock_clear_timer(sk);
3787 l2cap_sock_set_timer(sk, HZ * 5);
3788 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3789 __l2cap_sock_close(sk, ECONNREFUSED);
3790 } else {
3791 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3792 l2cap_sock_clear_timer(sk);
3796 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3798 struct l2cap_chan_list *l;
3799 struct l2cap_conn *conn = hcon->l2cap_data;
3800 struct sock *sk;
3802 if (!conn)
3803 return 0;
3805 l = &conn->chan_list;
3807 BT_DBG("conn %p", conn);
3809 read_lock(&l->lock);
3811 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3812 bh_lock_sock(sk);
3814 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3815 bh_unlock_sock(sk);
3816 continue;
3819 if (!status && (sk->sk_state == BT_CONNECTED ||
3820 sk->sk_state == BT_CONFIG)) {
3821 l2cap_check_encryption(sk, encrypt);
3822 bh_unlock_sock(sk);
3823 continue;
3826 if (sk->sk_state == BT_CONNECT) {
3827 if (!status) {
3828 struct l2cap_conn_req req;
3829 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3830 req.psm = l2cap_pi(sk)->psm;
3832 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3834 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3835 L2CAP_CONN_REQ, sizeof(req), &req);
3836 } else {
3837 l2cap_sock_clear_timer(sk);
3838 l2cap_sock_set_timer(sk, HZ / 10);
3840 } else if (sk->sk_state == BT_CONNECT2) {
3841 struct l2cap_conn_rsp rsp;
3842 __u16 result;
3844 if (!status) {
3845 sk->sk_state = BT_CONFIG;
3846 result = L2CAP_CR_SUCCESS;
3847 } else {
3848 sk->sk_state = BT_DISCONN;
3849 l2cap_sock_set_timer(sk, HZ / 10);
3850 result = L2CAP_CR_SEC_BLOCK;
3853 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3854 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3855 rsp.result = cpu_to_le16(result);
3856 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3857 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3858 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3861 bh_unlock_sock(sk);
3864 read_unlock(&l->lock);
3866 return 0;
3869 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3871 struct l2cap_conn *conn = hcon->l2cap_data;
3873 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3874 goto drop;
3876 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3878 if (flags & ACL_START) {
3879 struct l2cap_hdr *hdr;
3880 int len;
3882 if (conn->rx_len) {
3883 BT_ERR("Unexpected start frame (len %d)", skb->len);
3884 kfree_skb(conn->rx_skb);
3885 conn->rx_skb = NULL;
3886 conn->rx_len = 0;
3887 l2cap_conn_unreliable(conn, ECOMM);
3890 if (skb->len < 2) {
3891 BT_ERR("Frame is too short (len %d)", skb->len);
3892 l2cap_conn_unreliable(conn, ECOMM);
3893 goto drop;
3896 hdr = (struct l2cap_hdr *) skb->data;
3897 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3899 if (len == skb->len) {
3900 /* Complete frame received */
3901 l2cap_recv_frame(conn, skb);
3902 return 0;
3905 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3907 if (skb->len > len) {
3908 BT_ERR("Frame is too long (len %d, expected len %d)",
3909 skb->len, len);
3910 l2cap_conn_unreliable(conn, ECOMM);
3911 goto drop;
3914 /* Allocate skb for the complete frame (with header) */
3915 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3916 if (!conn->rx_skb)
3917 goto drop;
3919 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3920 skb->len);
3921 conn->rx_len = len - skb->len;
3922 } else {
3923 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3925 if (!conn->rx_len) {
3926 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3927 l2cap_conn_unreliable(conn, ECOMM);
3928 goto drop;
3931 if (skb->len > conn->rx_len) {
3932 BT_ERR("Fragment is too long (len %d, expected %d)",
3933 skb->len, conn->rx_len);
3934 kfree_skb(conn->rx_skb);
3935 conn->rx_skb = NULL;
3936 conn->rx_len = 0;
3937 l2cap_conn_unreliable(conn, ECOMM);
3938 goto drop;
3941 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3942 skb->len);
3943 conn->rx_len -= skb->len;
3945 if (!conn->rx_len) {
3946 /* Complete frame received */
3947 l2cap_recv_frame(conn, conn->rx_skb);
3948 conn->rx_skb = NULL;
3952 drop:
3953 kfree_skb(skb);
3954 return 0;
3957 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3959 struct sock *sk;
3960 struct hlist_node *node;
3962 read_lock_bh(&l2cap_sk_list.lock);
3964 sk_for_each(sk, node, &l2cap_sk_list.head) {
3965 struct l2cap_pinfo *pi = l2cap_pi(sk);
3967 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3968 batostr(&bt_sk(sk)->src),
3969 batostr(&bt_sk(sk)->dst),
3970 sk->sk_state, __le16_to_cpu(pi->psm),
3971 pi->scid, pi->dcid,
3972 pi->imtu, pi->omtu, pi->sec_level);
3975 read_unlock_bh(&l2cap_sk_list.lock);
3977 return 0;
3980 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3982 return single_open(file, l2cap_debugfs_show, inode->i_private);
3985 static const struct file_operations l2cap_debugfs_fops = {
3986 .open = l2cap_debugfs_open,
3987 .read = seq_read,
3988 .llseek = seq_lseek,
3989 .release = single_release,
3992 static struct dentry *l2cap_debugfs;
3994 static const struct proto_ops l2cap_sock_ops = {
3995 .family = PF_BLUETOOTH,
3996 .owner = THIS_MODULE,
3997 .release = l2cap_sock_release,
3998 .bind = l2cap_sock_bind,
3999 .connect = l2cap_sock_connect,
4000 .listen = l2cap_sock_listen,
4001 .accept = l2cap_sock_accept,
4002 .getname = l2cap_sock_getname,
4003 .sendmsg = l2cap_sock_sendmsg,
4004 .recvmsg = l2cap_sock_recvmsg,
4005 .poll = bt_sock_poll,
4006 .ioctl = bt_sock_ioctl,
4007 .mmap = sock_no_mmap,
4008 .socketpair = sock_no_socketpair,
4009 .shutdown = l2cap_sock_shutdown,
4010 .setsockopt = l2cap_sock_setsockopt,
4011 .getsockopt = l2cap_sock_getsockopt
4014 static const struct net_proto_family l2cap_sock_family_ops = {
4015 .family = PF_BLUETOOTH,
4016 .owner = THIS_MODULE,
4017 .create = l2cap_sock_create,
4020 static struct hci_proto l2cap_hci_proto = {
4021 .name = "L2CAP",
4022 .id = HCI_PROTO_L2CAP,
4023 .connect_ind = l2cap_connect_ind,
4024 .connect_cfm = l2cap_connect_cfm,
4025 .disconn_ind = l2cap_disconn_ind,
4026 .disconn_cfm = l2cap_disconn_cfm,
4027 .security_cfm = l2cap_security_cfm,
4028 .recv_acldata = l2cap_recv_acldata
4031 static int __init l2cap_init(void)
4033 int err;
4035 err = proto_register(&l2cap_proto, 0);
4036 if (err < 0)
4037 return err;
4039 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4040 if (err < 0) {
4041 BT_ERR("L2CAP socket registration failed");
4042 goto error;
4045 err = hci_register_proto(&l2cap_hci_proto);
4046 if (err < 0) {
4047 BT_ERR("L2CAP protocol registration failed");
4048 bt_sock_unregister(BTPROTO_L2CAP);
4049 goto error;
4052 if (bt_debugfs) {
4053 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4054 bt_debugfs, NULL, &l2cap_debugfs_fops);
4055 if (!l2cap_debugfs)
4056 BT_ERR("Failed to create L2CAP debug file");
4059 BT_INFO("L2CAP ver %s", VERSION);
4060 BT_INFO("L2CAP socket layer initialized");
4062 return 0;
4064 error:
4065 proto_unregister(&l2cap_proto);
4066 return err;
4069 static void __exit l2cap_exit(void)
4071 debugfs_remove(l2cap_debugfs);
4073 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4074 BT_ERR("L2CAP socket unregistration failed");
4076 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4077 BT_ERR("L2CAP protocol unregistration failed");
4079 proto_unregister(&l2cap_proto);
4082 void l2cap_load(void)
4084 /* Dummy function to trigger automatic L2CAP module loading by
4085 * other modules that use L2CAP sockets but don't use any other
4086 * symbols from it. */
4087 return;
4089 EXPORT_SYMBOL(l2cap_load);
4091 module_init(l2cap_init);
4092 module_exit(l2cap_exit);
4094 module_param(enable_ertm, bool, 0644);
4095 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4097 module_param(max_transmit, uint, 0644);
4098 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4100 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4101 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4102 MODULE_VERSION(VERSION);
4103 MODULE_LICENSE("GPL");
4104 MODULE_ALIAS("bt-proto-0");