Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6/kvm.git] / net / bluetooth / l2cap.c
blob80d929842f04e59e79cadda105532be13752393a
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
45 #include <net/sock.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm = 0;
58 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
59 static u8 l2cap_fixed_chan[8] = { 0x02, };
61 static const struct proto_ops l2cap_sock_ops;
63 static struct bt_sock_list l2cap_sk_list = {
64 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 static void __l2cap_sock_close(struct sock *sk, int reason);
68 static void l2cap_sock_close(struct sock *sk);
69 static void l2cap_sock_kill(struct sock *sk);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
74 /* ---- L2CAP timers ---- */
75 static void l2cap_sock_timeout(unsigned long arg)
77 struct sock *sk = (struct sock *) arg;
78 int reason;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
82 bh_lock_sock(sk);
84 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
85 reason = ECONNREFUSED;
86 else if (sk->sk_state == BT_CONNECT &&
87 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
88 reason = ECONNREFUSED;
89 else
90 reason = ETIMEDOUT;
92 __l2cap_sock_close(sk, reason);
94 bh_unlock_sock(sk);
96 l2cap_sock_kill(sk);
97 sock_put(sk);
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
106 static void l2cap_sock_clear_timer(struct sock *sk)
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
118 break;
120 return s;
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
125 struct sock *s;
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
128 break;
130 return s;
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
137 struct sock *s;
138 read_lock(&l->lock);
139 s = __l2cap_get_chan_by_scid(l, cid);
140 if (s)
141 bh_lock_sock(s);
142 read_unlock(&l->lock);
143 return s;
146 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
148 struct sock *s;
149 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
150 if (l2cap_pi(s)->ident == ident)
151 break;
153 return s;
156 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
158 struct sock *s;
159 read_lock(&l->lock);
160 s = __l2cap_get_chan_by_ident(l, ident);
161 if (s)
162 bh_lock_sock(s);
163 read_unlock(&l->lock);
164 return s;
167 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
169 u16 cid = L2CAP_CID_DYN_START;
171 for (; cid < L2CAP_CID_DYN_END; cid++) {
172 if (!__l2cap_get_chan_by_scid(l, cid))
173 return cid;
176 return 0;
179 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
181 sock_hold(sk);
183 if (l->head)
184 l2cap_pi(l->head)->prev_c = sk;
186 l2cap_pi(sk)->next_c = l->head;
187 l2cap_pi(sk)->prev_c = NULL;
188 l->head = sk;
191 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
193 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
195 write_lock_bh(&l->lock);
196 if (sk == l->head)
197 l->head = next;
199 if (next)
200 l2cap_pi(next)->prev_c = prev;
201 if (prev)
202 l2cap_pi(prev)->next_c = next;
203 write_unlock_bh(&l->lock);
205 __sock_put(sk);
208 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
210 struct l2cap_chan_list *l = &conn->chan_list;
212 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
213 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
215 conn->disc_reason = 0x13;
217 l2cap_pi(sk)->conn = conn;
219 if (sk->sk_type == SOCK_SEQPACKET) {
220 /* Alloc CID for connection-oriented socket */
221 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
222 } else if (sk->sk_type == SOCK_DGRAM) {
223 /* Connectionless socket */
224 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 } else {
228 /* Raw socket can send/recv signalling messages only */
229 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 __l2cap_chan_link(l, sk);
236 if (parent)
237 bt_accept_enqueue(parent, sk);
240 /* Delete channel.
241 * Must be called on the locked socket. */
242 static void l2cap_chan_del(struct sock *sk, int err)
244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
245 struct sock *parent = bt_sk(sk)->parent;
247 l2cap_sock_clear_timer(sk);
249 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
251 if (conn) {
252 /* Unlink from channel list */
253 l2cap_chan_unlink(&conn->chan_list, sk);
254 l2cap_pi(sk)->conn = NULL;
255 hci_conn_put(conn->hcon);
258 sk->sk_state = BT_CLOSED;
259 sock_set_flag(sk, SOCK_ZAPPED);
261 if (err)
262 sk->sk_err = err;
264 if (parent) {
265 bt_accept_unlink(sk);
266 parent->sk_data_ready(parent, 0);
267 } else
268 sk->sk_state_change(sk);
271 /* Service level security */
272 static inline int l2cap_check_security(struct sock *sk)
274 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
275 __u8 auth_type;
277 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
278 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
279 auth_type = HCI_AT_NO_BONDING_MITM;
280 else
281 auth_type = HCI_AT_NO_BONDING;
283 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
284 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285 } else {
286 switch (l2cap_pi(sk)->sec_level) {
287 case BT_SECURITY_HIGH:
288 auth_type = HCI_AT_GENERAL_BONDING_MITM;
289 break;
290 case BT_SECURITY_MEDIUM:
291 auth_type = HCI_AT_GENERAL_BONDING;
292 break;
293 default:
294 auth_type = HCI_AT_NO_BONDING;
295 break;
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
300 auth_type);
303 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
305 u8 id;
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
313 spin_lock_bh(&conn->lock);
315 if (++conn->tx_ident > 128)
316 conn->tx_ident = 1;
318 id = conn->tx_ident;
320 spin_unlock_bh(&conn->lock);
322 return id;
325 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 BT_DBG("code 0x%2.2x", code);
331 if (!skb)
332 return -ENOMEM;
334 return hci_send_acl(conn->hcon, skb, 0);
337 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
339 struct sk_buff *skb;
340 struct l2cap_hdr *lh;
341 struct l2cap_conn *conn = pi->conn;
342 int count, hlen = L2CAP_HDR_SIZE + 2;
344 if (pi->fcs == L2CAP_FCS_CRC16)
345 hlen += 2;
347 BT_DBG("pi %p, control 0x%2.2x", pi, control);
349 count = min_t(unsigned int, conn->mtu, hlen);
350 control |= L2CAP_CTRL_FRAME_TYPE;
352 skb = bt_skb_alloc(count, GFP_ATOMIC);
353 if (!skb)
354 return -ENOMEM;
356 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
357 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
358 lh->cid = cpu_to_le16(pi->dcid);
359 put_unaligned_le16(control, skb_put(skb, 2));
361 if (pi->fcs == L2CAP_FCS_CRC16) {
362 u16 fcs = crc16(0, (u8 *)lh, count - 2);
363 put_unaligned_le16(fcs, skb_put(skb, 2));
366 return hci_send_acl(pi->conn->hcon, skb, 0);
369 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
371 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
372 control |= L2CAP_SUPER_RCV_NOT_READY;
373 else
374 control |= L2CAP_SUPER_RCV_READY;
376 return l2cap_send_sframe(pi, control);
379 static void l2cap_do_start(struct sock *sk)
381 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
383 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
384 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
385 return;
387 if (l2cap_check_security(sk)) {
388 struct l2cap_conn_req req;
389 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
390 req.psm = l2cap_pi(sk)->psm;
392 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
394 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
395 L2CAP_CONN_REQ, sizeof(req), &req);
397 } else {
398 struct l2cap_info_req req;
399 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
402 conn->info_ident = l2cap_get_ident(conn);
404 mod_timer(&conn->info_timer, jiffies +
405 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
407 l2cap_send_cmd(conn, conn->info_ident,
408 L2CAP_INFO_REQ, sizeof(req), &req);
412 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
414 struct l2cap_disconn_req req;
416 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
417 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
418 l2cap_send_cmd(conn, l2cap_get_ident(conn),
419 L2CAP_DISCONN_REQ, sizeof(req), &req);
422 /* ---- L2CAP connections ---- */
423 static void l2cap_conn_start(struct l2cap_conn *conn)
425 struct l2cap_chan_list *l = &conn->chan_list;
426 struct sock *sk;
428 BT_DBG("conn %p", conn);
430 read_lock(&l->lock);
432 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
433 bh_lock_sock(sk);
435 if (sk->sk_type != SOCK_SEQPACKET) {
436 bh_unlock_sock(sk);
437 continue;
440 if (sk->sk_state == BT_CONNECT) {
441 if (l2cap_check_security(sk)) {
442 struct l2cap_conn_req req;
443 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
444 req.psm = l2cap_pi(sk)->psm;
446 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
448 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
449 L2CAP_CONN_REQ, sizeof(req), &req);
451 } else if (sk->sk_state == BT_CONNECT2) {
452 struct l2cap_conn_rsp rsp;
453 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
454 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
456 if (l2cap_check_security(sk)) {
457 if (bt_sk(sk)->defer_setup) {
458 struct sock *parent = bt_sk(sk)->parent;
459 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
460 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
461 parent->sk_data_ready(parent, 0);
463 } else {
464 sk->sk_state = BT_CONFIG;
465 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
466 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
468 } else {
469 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
470 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
473 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
474 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
477 bh_unlock_sock(sk);
480 read_unlock(&l->lock);
483 static void l2cap_conn_ready(struct l2cap_conn *conn)
485 struct l2cap_chan_list *l = &conn->chan_list;
486 struct sock *sk;
488 BT_DBG("conn %p", conn);
490 read_lock(&l->lock);
492 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
493 bh_lock_sock(sk);
495 if (sk->sk_type != SOCK_SEQPACKET) {
496 l2cap_sock_clear_timer(sk);
497 sk->sk_state = BT_CONNECTED;
498 sk->sk_state_change(sk);
499 } else if (sk->sk_state == BT_CONNECT)
500 l2cap_do_start(sk);
502 bh_unlock_sock(sk);
505 read_unlock(&l->lock);
508 /* Notify sockets that we cannot guaranty reliability anymore */
509 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
511 struct l2cap_chan_list *l = &conn->chan_list;
512 struct sock *sk;
514 BT_DBG("conn %p", conn);
516 read_lock(&l->lock);
518 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
519 if (l2cap_pi(sk)->force_reliable)
520 sk->sk_err = err;
523 read_unlock(&l->lock);
526 static void l2cap_info_timeout(unsigned long arg)
528 struct l2cap_conn *conn = (void *) arg;
530 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
531 conn->info_ident = 0;
533 l2cap_conn_start(conn);
536 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
538 struct l2cap_conn *conn = hcon->l2cap_data;
540 if (conn || status)
541 return conn;
543 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
544 if (!conn)
545 return NULL;
547 hcon->l2cap_data = conn;
548 conn->hcon = hcon;
550 BT_DBG("hcon %p conn %p", hcon, conn);
552 conn->mtu = hcon->hdev->acl_mtu;
553 conn->src = &hcon->hdev->bdaddr;
554 conn->dst = &hcon->dst;
556 conn->feat_mask = 0;
558 spin_lock_init(&conn->lock);
559 rwlock_init(&conn->chan_list.lock);
561 setup_timer(&conn->info_timer, l2cap_info_timeout,
562 (unsigned long) conn);
564 conn->disc_reason = 0x13;
566 return conn;
569 static void l2cap_conn_del(struct hci_conn *hcon, int err)
571 struct l2cap_conn *conn = hcon->l2cap_data;
572 struct sock *sk;
574 if (!conn)
575 return;
577 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
579 kfree_skb(conn->rx_skb);
581 /* Kill channels */
582 while ((sk = conn->chan_list.head)) {
583 bh_lock_sock(sk);
584 l2cap_chan_del(sk, err);
585 bh_unlock_sock(sk);
586 l2cap_sock_kill(sk);
589 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
590 del_timer_sync(&conn->info_timer);
592 hcon->l2cap_data = NULL;
593 kfree(conn);
596 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
598 struct l2cap_chan_list *l = &conn->chan_list;
599 write_lock_bh(&l->lock);
600 __l2cap_chan_add(conn, sk, parent);
601 write_unlock_bh(&l->lock);
604 /* ---- Socket interface ---- */
605 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
607 struct sock *sk;
608 struct hlist_node *node;
609 sk_for_each(sk, node, &l2cap_sk_list.head)
610 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
611 goto found;
612 sk = NULL;
613 found:
614 return sk;
617 /* Find socket with psm and source bdaddr.
618 * Returns closest match.
620 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
622 struct sock *sk = NULL, *sk1 = NULL;
623 struct hlist_node *node;
625 sk_for_each(sk, node, &l2cap_sk_list.head) {
626 if (state && sk->sk_state != state)
627 continue;
629 if (l2cap_pi(sk)->psm == psm) {
630 /* Exact match. */
631 if (!bacmp(&bt_sk(sk)->src, src))
632 break;
634 /* Closest match */
635 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
636 sk1 = sk;
639 return node ? sk : sk1;
642 /* Find socket with given address (psm, src).
643 * Returns locked socket */
644 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
646 struct sock *s;
647 read_lock(&l2cap_sk_list.lock);
648 s = __l2cap_get_sock_by_psm(state, psm, src);
649 if (s)
650 bh_lock_sock(s);
651 read_unlock(&l2cap_sk_list.lock);
652 return s;
655 static void l2cap_sock_destruct(struct sock *sk)
657 BT_DBG("sk %p", sk);
659 skb_queue_purge(&sk->sk_receive_queue);
660 skb_queue_purge(&sk->sk_write_queue);
663 static void l2cap_sock_cleanup_listen(struct sock *parent)
665 struct sock *sk;
667 BT_DBG("parent %p", parent);
669 /* Close not yet accepted channels */
670 while ((sk = bt_accept_dequeue(parent, NULL)))
671 l2cap_sock_close(sk);
673 parent->sk_state = BT_CLOSED;
674 sock_set_flag(parent, SOCK_ZAPPED);
677 /* Kill socket (only if zapped and orphan)
678 * Must be called on unlocked socket.
680 static void l2cap_sock_kill(struct sock *sk)
682 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
683 return;
685 BT_DBG("sk %p state %d", sk, sk->sk_state);
687 /* Kill poor orphan */
688 bt_sock_unlink(&l2cap_sk_list, sk);
689 sock_set_flag(sk, SOCK_DEAD);
690 sock_put(sk);
693 static void __l2cap_sock_close(struct sock *sk, int reason)
695 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
697 switch (sk->sk_state) {
698 case BT_LISTEN:
699 l2cap_sock_cleanup_listen(sk);
700 break;
702 case BT_CONNECTED:
703 case BT_CONFIG:
704 if (sk->sk_type == SOCK_SEQPACKET) {
705 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
707 sk->sk_state = BT_DISCONN;
708 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
709 l2cap_send_disconn_req(conn, sk);
710 } else
711 l2cap_chan_del(sk, reason);
712 break;
714 case BT_CONNECT2:
715 if (sk->sk_type == SOCK_SEQPACKET) {
716 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
717 struct l2cap_conn_rsp rsp;
718 __u16 result;
720 if (bt_sk(sk)->defer_setup)
721 result = L2CAP_CR_SEC_BLOCK;
722 else
723 result = L2CAP_CR_BAD_PSM;
725 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
726 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
727 rsp.result = cpu_to_le16(result);
728 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
729 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
730 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
731 } else
732 l2cap_chan_del(sk, reason);
733 break;
735 case BT_CONNECT:
736 case BT_DISCONN:
737 l2cap_chan_del(sk, reason);
738 break;
740 default:
741 sock_set_flag(sk, SOCK_ZAPPED);
742 break;
746 /* Must be called on unlocked socket. */
747 static void l2cap_sock_close(struct sock *sk)
749 l2cap_sock_clear_timer(sk);
750 lock_sock(sk);
751 __l2cap_sock_close(sk, ECONNRESET);
752 release_sock(sk);
753 l2cap_sock_kill(sk);
756 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
758 struct l2cap_pinfo *pi = l2cap_pi(sk);
760 BT_DBG("sk %p", sk);
762 if (parent) {
763 sk->sk_type = parent->sk_type;
764 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
766 pi->imtu = l2cap_pi(parent)->imtu;
767 pi->omtu = l2cap_pi(parent)->omtu;
768 pi->mode = l2cap_pi(parent)->mode;
769 pi->fcs = l2cap_pi(parent)->fcs;
770 pi->sec_level = l2cap_pi(parent)->sec_level;
771 pi->role_switch = l2cap_pi(parent)->role_switch;
772 pi->force_reliable = l2cap_pi(parent)->force_reliable;
773 } else {
774 pi->imtu = L2CAP_DEFAULT_MTU;
775 pi->omtu = 0;
776 pi->mode = L2CAP_MODE_BASIC;
777 pi->fcs = L2CAP_FCS_CRC16;
778 pi->sec_level = BT_SECURITY_LOW;
779 pi->role_switch = 0;
780 pi->force_reliable = 0;
783 /* Default config options */
784 pi->conf_len = 0;
785 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
786 skb_queue_head_init(TX_QUEUE(sk));
787 skb_queue_head_init(SREJ_QUEUE(sk));
788 INIT_LIST_HEAD(SREJ_LIST(sk));
791 static struct proto l2cap_proto = {
792 .name = "L2CAP",
793 .owner = THIS_MODULE,
794 .obj_size = sizeof(struct l2cap_pinfo)
797 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
799 struct sock *sk;
801 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
802 if (!sk)
803 return NULL;
805 sock_init_data(sock, sk);
806 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
808 sk->sk_destruct = l2cap_sock_destruct;
809 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
811 sock_reset_flag(sk, SOCK_ZAPPED);
813 sk->sk_protocol = proto;
814 sk->sk_state = BT_OPEN;
816 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
818 bt_sock_link(&l2cap_sk_list, sk);
819 return sk;
822 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
823 int kern)
825 struct sock *sk;
827 BT_DBG("sock %p", sock);
829 sock->state = SS_UNCONNECTED;
831 if (sock->type != SOCK_SEQPACKET &&
832 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
833 return -ESOCKTNOSUPPORT;
835 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
836 return -EPERM;
838 sock->ops = &l2cap_sock_ops;
840 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
841 if (!sk)
842 return -ENOMEM;
844 l2cap_sock_init(sk, NULL);
845 return 0;
848 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
850 struct sock *sk = sock->sk;
851 struct sockaddr_l2 la;
852 int len, err = 0;
854 BT_DBG("sk %p", sk);
856 if (!addr || addr->sa_family != AF_BLUETOOTH)
857 return -EINVAL;
859 memset(&la, 0, sizeof(la));
860 len = min_t(unsigned int, sizeof(la), alen);
861 memcpy(&la, addr, len);
863 if (la.l2_cid)
864 return -EINVAL;
866 lock_sock(sk);
868 if (sk->sk_state != BT_OPEN) {
869 err = -EBADFD;
870 goto done;
873 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
874 !capable(CAP_NET_BIND_SERVICE)) {
875 err = -EACCES;
876 goto done;
879 write_lock_bh(&l2cap_sk_list.lock);
881 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
882 err = -EADDRINUSE;
883 } else {
884 /* Save source address */
885 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
886 l2cap_pi(sk)->psm = la.l2_psm;
887 l2cap_pi(sk)->sport = la.l2_psm;
888 sk->sk_state = BT_BOUND;
890 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
891 __le16_to_cpu(la.l2_psm) == 0x0003)
892 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
895 write_unlock_bh(&l2cap_sk_list.lock);
897 done:
898 release_sock(sk);
899 return err;
902 static int l2cap_do_connect(struct sock *sk)
904 bdaddr_t *src = &bt_sk(sk)->src;
905 bdaddr_t *dst = &bt_sk(sk)->dst;
906 struct l2cap_conn *conn;
907 struct hci_conn *hcon;
908 struct hci_dev *hdev;
909 __u8 auth_type;
910 int err;
912 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
913 l2cap_pi(sk)->psm);
915 hdev = hci_get_route(dst, src);
916 if (!hdev)
917 return -EHOSTUNREACH;
919 hci_dev_lock_bh(hdev);
921 err = -ENOMEM;
923 if (sk->sk_type == SOCK_RAW) {
924 switch (l2cap_pi(sk)->sec_level) {
925 case BT_SECURITY_HIGH:
926 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
927 break;
928 case BT_SECURITY_MEDIUM:
929 auth_type = HCI_AT_DEDICATED_BONDING;
930 break;
931 default:
932 auth_type = HCI_AT_NO_BONDING;
933 break;
935 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
936 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
937 auth_type = HCI_AT_NO_BONDING_MITM;
938 else
939 auth_type = HCI_AT_NO_BONDING;
941 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
942 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
943 } else {
944 switch (l2cap_pi(sk)->sec_level) {
945 case BT_SECURITY_HIGH:
946 auth_type = HCI_AT_GENERAL_BONDING_MITM;
947 break;
948 case BT_SECURITY_MEDIUM:
949 auth_type = HCI_AT_GENERAL_BONDING;
950 break;
951 default:
952 auth_type = HCI_AT_NO_BONDING;
953 break;
957 hcon = hci_connect(hdev, ACL_LINK, dst,
958 l2cap_pi(sk)->sec_level, auth_type);
959 if (!hcon)
960 goto done;
962 conn = l2cap_conn_add(hcon, 0);
963 if (!conn) {
964 hci_conn_put(hcon);
965 goto done;
968 err = 0;
970 /* Update source addr of the socket */
971 bacpy(src, conn->src);
973 l2cap_chan_add(conn, sk, NULL);
975 sk->sk_state = BT_CONNECT;
976 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
978 if (hcon->state == BT_CONNECTED) {
979 if (sk->sk_type != SOCK_SEQPACKET) {
980 l2cap_sock_clear_timer(sk);
981 sk->sk_state = BT_CONNECTED;
982 } else
983 l2cap_do_start(sk);
986 done:
987 hci_dev_unlock_bh(hdev);
988 hci_dev_put(hdev);
989 return err;
992 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
994 struct sock *sk = sock->sk;
995 struct sockaddr_l2 la;
996 int len, err = 0;
998 BT_DBG("sk %p", sk);
1000 if (!addr || addr->sa_family != AF_BLUETOOTH)
1001 return -EINVAL;
1003 memset(&la, 0, sizeof(la));
1004 len = min_t(unsigned int, sizeof(la), alen);
1005 memcpy(&la, addr, len);
1007 if (la.l2_cid)
1008 return -EINVAL;
1010 lock_sock(sk);
1012 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1013 err = -EINVAL;
1014 goto done;
1017 switch (l2cap_pi(sk)->mode) {
1018 case L2CAP_MODE_BASIC:
1019 break;
1020 case L2CAP_MODE_ERTM:
1021 case L2CAP_MODE_STREAMING:
1022 if (enable_ertm)
1023 break;
1024 /* fall through */
1025 default:
1026 err = -ENOTSUPP;
1027 goto done;
1030 switch (sk->sk_state) {
1031 case BT_CONNECT:
1032 case BT_CONNECT2:
1033 case BT_CONFIG:
1034 /* Already connecting */
1035 goto wait;
1037 case BT_CONNECTED:
1038 /* Already connected */
1039 goto done;
1041 case BT_OPEN:
1042 case BT_BOUND:
1043 /* Can connect */
1044 break;
1046 default:
1047 err = -EBADFD;
1048 goto done;
1051 /* Set destination address and psm */
1052 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1053 l2cap_pi(sk)->psm = la.l2_psm;
1055 err = l2cap_do_connect(sk);
1056 if (err)
1057 goto done;
1059 wait:
1060 err = bt_sock_wait_state(sk, BT_CONNECTED,
1061 sock_sndtimeo(sk, flags & O_NONBLOCK));
1062 done:
1063 release_sock(sk);
1064 return err;
1067 static int l2cap_sock_listen(struct socket *sock, int backlog)
1069 struct sock *sk = sock->sk;
1070 int err = 0;
1072 BT_DBG("sk %p backlog %d", sk, backlog);
1074 lock_sock(sk);
1076 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1077 err = -EBADFD;
1078 goto done;
1081 switch (l2cap_pi(sk)->mode) {
1082 case L2CAP_MODE_BASIC:
1083 break;
1084 case L2CAP_MODE_ERTM:
1085 case L2CAP_MODE_STREAMING:
1086 if (enable_ertm)
1087 break;
1088 /* fall through */
1089 default:
1090 err = -ENOTSUPP;
1091 goto done;
1094 if (!l2cap_pi(sk)->psm) {
1095 bdaddr_t *src = &bt_sk(sk)->src;
1096 u16 psm;
1098 err = -EINVAL;
1100 write_lock_bh(&l2cap_sk_list.lock);
1102 for (psm = 0x1001; psm < 0x1100; psm += 2)
1103 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1104 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1105 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1106 err = 0;
1107 break;
1110 write_unlock_bh(&l2cap_sk_list.lock);
1112 if (err < 0)
1113 goto done;
1116 sk->sk_max_ack_backlog = backlog;
1117 sk->sk_ack_backlog = 0;
1118 sk->sk_state = BT_LISTEN;
1120 done:
1121 release_sock(sk);
1122 return err;
1125 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1127 DECLARE_WAITQUEUE(wait, current);
1128 struct sock *sk = sock->sk, *nsk;
1129 long timeo;
1130 int err = 0;
1132 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1134 if (sk->sk_state != BT_LISTEN) {
1135 err = -EBADFD;
1136 goto done;
1139 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1141 BT_DBG("sk %p timeo %ld", sk, timeo);
1143 /* Wait for an incoming connection. (wake-one). */
1144 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1145 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1146 set_current_state(TASK_INTERRUPTIBLE);
1147 if (!timeo) {
1148 err = -EAGAIN;
1149 break;
1152 release_sock(sk);
1153 timeo = schedule_timeout(timeo);
1154 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1156 if (sk->sk_state != BT_LISTEN) {
1157 err = -EBADFD;
1158 break;
1161 if (signal_pending(current)) {
1162 err = sock_intr_errno(timeo);
1163 break;
1166 set_current_state(TASK_RUNNING);
1167 remove_wait_queue(sk->sk_sleep, &wait);
1169 if (err)
1170 goto done;
1172 newsock->state = SS_CONNECTED;
1174 BT_DBG("new socket %p", nsk);
1176 done:
1177 release_sock(sk);
1178 return err;
1181 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1183 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1184 struct sock *sk = sock->sk;
1186 BT_DBG("sock %p, sk %p", sock, sk);
1188 addr->sa_family = AF_BLUETOOTH;
1189 *len = sizeof(struct sockaddr_l2);
1191 if (peer) {
1192 la->l2_psm = l2cap_pi(sk)->psm;
1193 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1194 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1195 } else {
1196 la->l2_psm = l2cap_pi(sk)->sport;
1197 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1198 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1201 return 0;
1204 static void l2cap_monitor_timeout(unsigned long arg)
1206 struct sock *sk = (void *) arg;
1207 u16 control;
1209 bh_lock_sock(sk);
1210 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1211 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1212 return;
1215 l2cap_pi(sk)->retry_count++;
1216 __mod_monitor_timer();
1218 control = L2CAP_CTRL_POLL;
1219 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1220 bh_unlock_sock(sk);
1223 static void l2cap_retrans_timeout(unsigned long arg)
1225 struct sock *sk = (void *) arg;
1226 u16 control;
1228 bh_lock_sock(sk);
1229 l2cap_pi(sk)->retry_count = 1;
1230 __mod_monitor_timer();
1232 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1234 control = L2CAP_CTRL_POLL;
1235 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1236 bh_unlock_sock(sk);
1239 static void l2cap_drop_acked_frames(struct sock *sk)
1241 struct sk_buff *skb;
1243 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1244 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1245 break;
1247 skb = skb_dequeue(TX_QUEUE(sk));
1248 kfree_skb(skb);
1250 l2cap_pi(sk)->unacked_frames--;
1253 if (!l2cap_pi(sk)->unacked_frames)
1254 del_timer(&l2cap_pi(sk)->retrans_timer);
1256 return;
1259 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1261 struct l2cap_pinfo *pi = l2cap_pi(sk);
1262 int err;
1264 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1266 err = hci_send_acl(pi->conn->hcon, skb, 0);
1267 if (err < 0)
1268 kfree_skb(skb);
1270 return err;
1273 static int l2cap_streaming_send(struct sock *sk)
1275 struct sk_buff *skb, *tx_skb;
1276 struct l2cap_pinfo *pi = l2cap_pi(sk);
1277 u16 control, fcs;
1278 int err;
1280 while ((skb = sk->sk_send_head)) {
1281 tx_skb = skb_clone(skb, GFP_ATOMIC);
1283 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1284 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1285 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1287 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1288 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1289 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1292 err = l2cap_do_send(sk, tx_skb);
1293 if (err < 0) {
1294 l2cap_send_disconn_req(pi->conn, sk);
1295 return err;
1298 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1300 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1301 sk->sk_send_head = NULL;
1302 else
1303 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1305 skb = skb_dequeue(TX_QUEUE(sk));
1306 kfree_skb(skb);
1308 return 0;
1311 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1313 struct l2cap_pinfo *pi = l2cap_pi(sk);
1314 struct sk_buff *skb, *tx_skb;
1315 u16 control, fcs;
1316 int err;
1318 skb = skb_peek(TX_QUEUE(sk));
1319 do {
1320 if (bt_cb(skb)->tx_seq != tx_seq) {
1321 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1322 break;
1323 skb = skb_queue_next(TX_QUEUE(sk), skb);
1324 continue;
1327 if (pi->remote_max_tx &&
1328 bt_cb(skb)->retries == pi->remote_max_tx) {
1329 l2cap_send_disconn_req(pi->conn, sk);
1330 break;
1333 tx_skb = skb_clone(skb, GFP_ATOMIC);
1334 bt_cb(skb)->retries++;
1335 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1336 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1337 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1338 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1340 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1341 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1342 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1345 err = l2cap_do_send(sk, tx_skb);
1346 if (err < 0) {
1347 l2cap_send_disconn_req(pi->conn, sk);
1348 return err;
1350 break;
1351 } while(1);
1352 return 0;
1355 static int l2cap_ertm_send(struct sock *sk)
1357 struct sk_buff *skb, *tx_skb;
1358 struct l2cap_pinfo *pi = l2cap_pi(sk);
1359 u16 control, fcs;
1360 int err;
1362 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1363 return 0;
1365 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))
1366 && !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1367 tx_skb = skb_clone(skb, GFP_ATOMIC);
1369 if (pi->remote_max_tx &&
1370 bt_cb(skb)->retries == pi->remote_max_tx) {
1371 l2cap_send_disconn_req(pi->conn, sk);
1372 break;
1375 bt_cb(skb)->retries++;
1377 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1378 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1379 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1380 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1383 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1384 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1385 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1388 err = l2cap_do_send(sk, tx_skb);
1389 if (err < 0) {
1390 l2cap_send_disconn_req(pi->conn, sk);
1391 return err;
1393 __mod_retrans_timer();
1395 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1396 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1398 pi->unacked_frames++;
1400 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1401 sk->sk_send_head = NULL;
1402 else
1403 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1406 return 0;
1409 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1411 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1412 struct sk_buff **frag;
1413 int err, sent = 0;
1415 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1416 return -EFAULT;
1419 sent += count;
1420 len -= count;
1422 /* Continuation fragments (no L2CAP header) */
1423 frag = &skb_shinfo(skb)->frag_list;
1424 while (len) {
1425 count = min_t(unsigned int, conn->mtu, len);
1427 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1428 if (!*frag)
1429 return -EFAULT;
1430 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1431 return -EFAULT;
1433 sent += count;
1434 len -= count;
1436 frag = &(*frag)->next;
1439 return sent;
1442 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1444 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1445 struct sk_buff *skb;
1446 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1447 struct l2cap_hdr *lh;
1449 BT_DBG("sk %p len %d", sk, (int)len);
1451 count = min_t(unsigned int, (conn->mtu - hlen), len);
1452 skb = bt_skb_send_alloc(sk, count + hlen,
1453 msg->msg_flags & MSG_DONTWAIT, &err);
1454 if (!skb)
1455 return ERR_PTR(-ENOMEM);
1457 /* Create L2CAP header */
1458 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1459 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1460 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1461 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1463 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1464 if (unlikely(err < 0)) {
1465 kfree_skb(skb);
1466 return ERR_PTR(err);
1468 return skb;
1471 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1473 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1474 struct sk_buff *skb;
1475 int err, count, hlen = L2CAP_HDR_SIZE;
1476 struct l2cap_hdr *lh;
1478 BT_DBG("sk %p len %d", sk, (int)len);
1480 count = min_t(unsigned int, (conn->mtu - hlen), len);
1481 skb = bt_skb_send_alloc(sk, count + hlen,
1482 msg->msg_flags & MSG_DONTWAIT, &err);
1483 if (!skb)
1484 return ERR_PTR(-ENOMEM);
1486 /* Create L2CAP header */
1487 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1488 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1489 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1491 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1492 if (unlikely(err < 0)) {
1493 kfree_skb(skb);
1494 return ERR_PTR(err);
1496 return skb;
1499 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1501 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1502 struct sk_buff *skb;
1503 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1504 struct l2cap_hdr *lh;
1506 BT_DBG("sk %p len %d", sk, (int)len);
1508 if (sdulen)
1509 hlen += 2;
1511 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1512 hlen += 2;
1514 count = min_t(unsigned int, (conn->mtu - hlen), len);
1515 skb = bt_skb_send_alloc(sk, count + hlen,
1516 msg->msg_flags & MSG_DONTWAIT, &err);
1517 if (!skb)
1518 return ERR_PTR(-ENOMEM);
1520 /* Create L2CAP header */
1521 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1522 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1523 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1524 put_unaligned_le16(control, skb_put(skb, 2));
1525 if (sdulen)
1526 put_unaligned_le16(sdulen, skb_put(skb, 2));
1528 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1529 if (unlikely(err < 0)) {
1530 kfree_skb(skb);
1531 return ERR_PTR(err);
1534 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1535 put_unaligned_le16(0, skb_put(skb, 2));
1537 bt_cb(skb)->retries = 0;
1538 return skb;
1541 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1543 struct l2cap_pinfo *pi = l2cap_pi(sk);
1544 struct sk_buff *skb;
1545 struct sk_buff_head sar_queue;
1546 u16 control;
1547 size_t size = 0;
1549 __skb_queue_head_init(&sar_queue);
1550 control = L2CAP_SDU_START;
1551 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1552 if (IS_ERR(skb))
1553 return PTR_ERR(skb);
1555 __skb_queue_tail(&sar_queue, skb);
1556 len -= pi->max_pdu_size;
1557 size +=pi->max_pdu_size;
1558 control = 0;
1560 while (len > 0) {
1561 size_t buflen;
1563 if (len > pi->max_pdu_size) {
1564 control |= L2CAP_SDU_CONTINUE;
1565 buflen = pi->max_pdu_size;
1566 } else {
1567 control |= L2CAP_SDU_END;
1568 buflen = len;
1571 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1572 if (IS_ERR(skb)) {
1573 skb_queue_purge(&sar_queue);
1574 return PTR_ERR(skb);
1577 __skb_queue_tail(&sar_queue, skb);
1578 len -= buflen;
1579 size += buflen;
1580 control = 0;
1582 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1583 if (sk->sk_send_head == NULL)
1584 sk->sk_send_head = sar_queue.next;
1586 return size;
1589 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1591 struct sock *sk = sock->sk;
1592 struct l2cap_pinfo *pi = l2cap_pi(sk);
1593 struct sk_buff *skb;
1594 u16 control;
1595 int err;
1597 BT_DBG("sock %p, sk %p", sock, sk);
1599 err = sock_error(sk);
1600 if (err)
1601 return err;
1603 if (msg->msg_flags & MSG_OOB)
1604 return -EOPNOTSUPP;
1606 /* Check outgoing MTU */
1607 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1608 && len > pi->omtu)
1609 return -EINVAL;
1611 lock_sock(sk);
1613 if (sk->sk_state != BT_CONNECTED) {
1614 err = -ENOTCONN;
1615 goto done;
1618 /* Connectionless channel */
1619 if (sk->sk_type == SOCK_DGRAM) {
1620 skb = l2cap_create_connless_pdu(sk, msg, len);
1621 err = l2cap_do_send(sk, skb);
1622 goto done;
1625 switch (pi->mode) {
1626 case L2CAP_MODE_BASIC:
1627 /* Create a basic PDU */
1628 skb = l2cap_create_basic_pdu(sk, msg, len);
1629 if (IS_ERR(skb)) {
1630 err = PTR_ERR(skb);
1631 goto done;
1634 err = l2cap_do_send(sk, skb);
1635 if (!err)
1636 err = len;
1637 break;
1639 case L2CAP_MODE_ERTM:
1640 case L2CAP_MODE_STREAMING:
1641 /* Entire SDU fits into one PDU */
1642 if (len <= pi->max_pdu_size) {
1643 control = L2CAP_SDU_UNSEGMENTED;
1644 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1645 if (IS_ERR(skb)) {
1646 err = PTR_ERR(skb);
1647 goto done;
1649 __skb_queue_tail(TX_QUEUE(sk), skb);
1650 if (sk->sk_send_head == NULL)
1651 sk->sk_send_head = skb;
1652 } else {
1653 /* Segment SDU into multiples PDUs */
1654 err = l2cap_sar_segment_sdu(sk, msg, len);
1655 if (err < 0)
1656 goto done;
1659 if (pi->mode == L2CAP_MODE_STREAMING)
1660 err = l2cap_streaming_send(sk);
1661 else
1662 err = l2cap_ertm_send(sk);
1664 if (!err)
1665 err = len;
1666 break;
1668 default:
1669 BT_DBG("bad state %1.1x", pi->mode);
1670 err = -EINVAL;
1673 done:
1674 release_sock(sk);
1675 return err;
1678 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1680 struct sock *sk = sock->sk;
1682 lock_sock(sk);
1684 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1685 struct l2cap_conn_rsp rsp;
1687 sk->sk_state = BT_CONFIG;
1689 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1690 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1691 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1692 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1693 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1694 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1696 release_sock(sk);
1697 return 0;
1700 release_sock(sk);
1702 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1705 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1707 struct sock *sk = sock->sk;
1708 struct l2cap_options opts;
1709 int len, err = 0;
1710 u32 opt;
1712 BT_DBG("sk %p", sk);
1714 lock_sock(sk);
1716 switch (optname) {
1717 case L2CAP_OPTIONS:
1718 opts.imtu = l2cap_pi(sk)->imtu;
1719 opts.omtu = l2cap_pi(sk)->omtu;
1720 opts.flush_to = l2cap_pi(sk)->flush_to;
1721 opts.mode = l2cap_pi(sk)->mode;
1722 opts.fcs = l2cap_pi(sk)->fcs;
1724 len = min_t(unsigned int, sizeof(opts), optlen);
1725 if (copy_from_user((char *) &opts, optval, len)) {
1726 err = -EFAULT;
1727 break;
1730 l2cap_pi(sk)->imtu = opts.imtu;
1731 l2cap_pi(sk)->omtu = opts.omtu;
1732 l2cap_pi(sk)->mode = opts.mode;
1733 l2cap_pi(sk)->fcs = opts.fcs;
1734 break;
1736 case L2CAP_LM:
1737 if (get_user(opt, (u32 __user *) optval)) {
1738 err = -EFAULT;
1739 break;
1742 if (opt & L2CAP_LM_AUTH)
1743 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1744 if (opt & L2CAP_LM_ENCRYPT)
1745 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1746 if (opt & L2CAP_LM_SECURE)
1747 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1749 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1750 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1751 break;
1753 default:
1754 err = -ENOPROTOOPT;
1755 break;
1758 release_sock(sk);
1759 return err;
1762 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1764 struct sock *sk = sock->sk;
1765 struct bt_security sec;
1766 int len, err = 0;
1767 u32 opt;
1769 BT_DBG("sk %p", sk);
1771 if (level == SOL_L2CAP)
1772 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1774 if (level != SOL_BLUETOOTH)
1775 return -ENOPROTOOPT;
1777 lock_sock(sk);
1779 switch (optname) {
1780 case BT_SECURITY:
1781 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1782 err = -EINVAL;
1783 break;
1786 sec.level = BT_SECURITY_LOW;
1788 len = min_t(unsigned int, sizeof(sec), optlen);
1789 if (copy_from_user((char *) &sec, optval, len)) {
1790 err = -EFAULT;
1791 break;
1794 if (sec.level < BT_SECURITY_LOW ||
1795 sec.level > BT_SECURITY_HIGH) {
1796 err = -EINVAL;
1797 break;
1800 l2cap_pi(sk)->sec_level = sec.level;
1801 break;
1803 case BT_DEFER_SETUP:
1804 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1805 err = -EINVAL;
1806 break;
1809 if (get_user(opt, (u32 __user *) optval)) {
1810 err = -EFAULT;
1811 break;
1814 bt_sk(sk)->defer_setup = opt;
1815 break;
1817 default:
1818 err = -ENOPROTOOPT;
1819 break;
1822 release_sock(sk);
1823 return err;
1826 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1828 struct sock *sk = sock->sk;
1829 struct l2cap_options opts;
1830 struct l2cap_conninfo cinfo;
1831 int len, err = 0;
1832 u32 opt;
1834 BT_DBG("sk %p", sk);
1836 if (get_user(len, optlen))
1837 return -EFAULT;
1839 lock_sock(sk);
1841 switch (optname) {
1842 case L2CAP_OPTIONS:
1843 opts.imtu = l2cap_pi(sk)->imtu;
1844 opts.omtu = l2cap_pi(sk)->omtu;
1845 opts.flush_to = l2cap_pi(sk)->flush_to;
1846 opts.mode = l2cap_pi(sk)->mode;
1847 opts.fcs = l2cap_pi(sk)->fcs;
1849 len = min_t(unsigned int, len, sizeof(opts));
1850 if (copy_to_user(optval, (char *) &opts, len))
1851 err = -EFAULT;
1853 break;
1855 case L2CAP_LM:
1856 switch (l2cap_pi(sk)->sec_level) {
1857 case BT_SECURITY_LOW:
1858 opt = L2CAP_LM_AUTH;
1859 break;
1860 case BT_SECURITY_MEDIUM:
1861 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1862 break;
1863 case BT_SECURITY_HIGH:
1864 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1865 L2CAP_LM_SECURE;
1866 break;
1867 default:
1868 opt = 0;
1869 break;
1872 if (l2cap_pi(sk)->role_switch)
1873 opt |= L2CAP_LM_MASTER;
1875 if (l2cap_pi(sk)->force_reliable)
1876 opt |= L2CAP_LM_RELIABLE;
1878 if (put_user(opt, (u32 __user *) optval))
1879 err = -EFAULT;
1880 break;
1882 case L2CAP_CONNINFO:
1883 if (sk->sk_state != BT_CONNECTED &&
1884 !(sk->sk_state == BT_CONNECT2 &&
1885 bt_sk(sk)->defer_setup)) {
1886 err = -ENOTCONN;
1887 break;
1890 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1891 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1893 len = min_t(unsigned int, len, sizeof(cinfo));
1894 if (copy_to_user(optval, (char *) &cinfo, len))
1895 err = -EFAULT;
1897 break;
1899 default:
1900 err = -ENOPROTOOPT;
1901 break;
1904 release_sock(sk);
1905 return err;
1908 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1910 struct sock *sk = sock->sk;
1911 struct bt_security sec;
1912 int len, err = 0;
1914 BT_DBG("sk %p", sk);
1916 if (level == SOL_L2CAP)
1917 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1919 if (level != SOL_BLUETOOTH)
1920 return -ENOPROTOOPT;
1922 if (get_user(len, optlen))
1923 return -EFAULT;
1925 lock_sock(sk);
1927 switch (optname) {
1928 case BT_SECURITY:
1929 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1930 err = -EINVAL;
1931 break;
1934 sec.level = l2cap_pi(sk)->sec_level;
1936 len = min_t(unsigned int, len, sizeof(sec));
1937 if (copy_to_user(optval, (char *) &sec, len))
1938 err = -EFAULT;
1940 break;
1942 case BT_DEFER_SETUP:
1943 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1944 err = -EINVAL;
1945 break;
1948 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1949 err = -EFAULT;
1951 break;
1953 default:
1954 err = -ENOPROTOOPT;
1955 break;
1958 release_sock(sk);
1959 return err;
1962 static int l2cap_sock_shutdown(struct socket *sock, int how)
1964 struct sock *sk = sock->sk;
1965 int err = 0;
1967 BT_DBG("sock %p, sk %p", sock, sk);
1969 if (!sk)
1970 return 0;
1972 lock_sock(sk);
1973 if (!sk->sk_shutdown) {
1974 sk->sk_shutdown = SHUTDOWN_MASK;
1975 l2cap_sock_clear_timer(sk);
1976 __l2cap_sock_close(sk, 0);
1978 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1979 err = bt_sock_wait_state(sk, BT_CLOSED,
1980 sk->sk_lingertime);
1982 release_sock(sk);
1983 return err;
1986 static int l2cap_sock_release(struct socket *sock)
1988 struct sock *sk = sock->sk;
1989 int err;
1991 BT_DBG("sock %p, sk %p", sock, sk);
1993 if (!sk)
1994 return 0;
1996 err = l2cap_sock_shutdown(sock, 2);
1998 sock_orphan(sk);
1999 l2cap_sock_kill(sk);
2000 return err;
2003 static void l2cap_chan_ready(struct sock *sk)
2005 struct sock *parent = bt_sk(sk)->parent;
2007 BT_DBG("sk %p, parent %p", sk, parent);
2009 l2cap_pi(sk)->conf_state = 0;
2010 l2cap_sock_clear_timer(sk);
2012 if (!parent) {
2013 /* Outgoing channel.
2014 * Wake up socket sleeping on connect.
2016 sk->sk_state = BT_CONNECTED;
2017 sk->sk_state_change(sk);
2018 } else {
2019 /* Incoming channel.
2020 * Wake up socket sleeping on accept.
2022 parent->sk_data_ready(parent, 0);
2026 /* Copy frame to all raw sockets on that connection */
2027 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2029 struct l2cap_chan_list *l = &conn->chan_list;
2030 struct sk_buff *nskb;
2031 struct sock *sk;
2033 BT_DBG("conn %p", conn);
2035 read_lock(&l->lock);
2036 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2037 if (sk->sk_type != SOCK_RAW)
2038 continue;
2040 /* Don't send frame to the socket it came from */
2041 if (skb->sk == sk)
2042 continue;
2043 nskb = skb_clone(skb, GFP_ATOMIC);
2044 if (!nskb)
2045 continue;
2047 if (sock_queue_rcv_skb(sk, nskb))
2048 kfree_skb(nskb);
2050 read_unlock(&l->lock);
2053 /* ---- L2CAP signalling commands ---- */
2054 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2055 u8 code, u8 ident, u16 dlen, void *data)
2057 struct sk_buff *skb, **frag;
2058 struct l2cap_cmd_hdr *cmd;
2059 struct l2cap_hdr *lh;
2060 int len, count;
2062 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2063 conn, code, ident, dlen);
2065 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2066 count = min_t(unsigned int, conn->mtu, len);
2068 skb = bt_skb_alloc(count, GFP_ATOMIC);
2069 if (!skb)
2070 return NULL;
2072 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2073 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2074 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2076 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2077 cmd->code = code;
2078 cmd->ident = ident;
2079 cmd->len = cpu_to_le16(dlen);
2081 if (dlen) {
2082 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2083 memcpy(skb_put(skb, count), data, count);
2084 data += count;
2087 len -= skb->len;
2089 /* Continuation fragments (no L2CAP header) */
2090 frag = &skb_shinfo(skb)->frag_list;
2091 while (len) {
2092 count = min_t(unsigned int, conn->mtu, len);
2094 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2095 if (!*frag)
2096 goto fail;
2098 memcpy(skb_put(*frag, count), data, count);
2100 len -= count;
2101 data += count;
2103 frag = &(*frag)->next;
2106 return skb;
2108 fail:
2109 kfree_skb(skb);
2110 return NULL;
2113 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2115 struct l2cap_conf_opt *opt = *ptr;
2116 int len;
2118 len = L2CAP_CONF_OPT_SIZE + opt->len;
2119 *ptr += len;
2121 *type = opt->type;
2122 *olen = opt->len;
2124 switch (opt->len) {
2125 case 1:
2126 *val = *((u8 *) opt->val);
2127 break;
2129 case 2:
2130 *val = __le16_to_cpu(*((__le16 *) opt->val));
2131 break;
2133 case 4:
2134 *val = __le32_to_cpu(*((__le32 *) opt->val));
2135 break;
2137 default:
2138 *val = (unsigned long) opt->val;
2139 break;
2142 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2143 return len;
2146 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2148 struct l2cap_conf_opt *opt = *ptr;
2150 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2152 opt->type = type;
2153 opt->len = len;
2155 switch (len) {
2156 case 1:
2157 *((u8 *) opt->val) = val;
2158 break;
2160 case 2:
2161 *((__le16 *) opt->val) = cpu_to_le16(val);
2162 break;
2164 case 4:
2165 *((__le32 *) opt->val) = cpu_to_le32(val);
2166 break;
2168 default:
2169 memcpy(opt->val, (void *) val, len);
2170 break;
2173 *ptr += L2CAP_CONF_OPT_SIZE + len;
2176 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2178 u32 local_feat_mask = l2cap_feat_mask;
2179 if (enable_ertm)
2180 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2182 switch (mode) {
2183 case L2CAP_MODE_ERTM:
2184 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2185 case L2CAP_MODE_STREAMING:
2186 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2187 default:
2188 return 0x00;
2192 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2194 switch (mode) {
2195 case L2CAP_MODE_STREAMING:
2196 case L2CAP_MODE_ERTM:
2197 if (l2cap_mode_supported(mode, remote_feat_mask))
2198 return mode;
2199 /* fall through */
2200 default:
2201 return L2CAP_MODE_BASIC;
2205 static int l2cap_build_conf_req(struct sock *sk, void *data)
2207 struct l2cap_pinfo *pi = l2cap_pi(sk);
2208 struct l2cap_conf_req *req = data;
2209 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2210 void *ptr = req->data;
2212 BT_DBG("sk %p", sk);
2214 if (pi->num_conf_req || pi->num_conf_rsp)
2215 goto done;
2217 switch (pi->mode) {
2218 case L2CAP_MODE_STREAMING:
2219 case L2CAP_MODE_ERTM:
2220 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2221 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2222 l2cap_send_disconn_req(pi->conn, sk);
2223 break;
2224 default:
2225 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2226 break;
2229 done:
2230 switch (pi->mode) {
2231 case L2CAP_MODE_BASIC:
2232 if (pi->imtu != L2CAP_DEFAULT_MTU)
2233 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2234 break;
2236 case L2CAP_MODE_ERTM:
2237 rfc.mode = L2CAP_MODE_ERTM;
2238 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2239 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2240 rfc.retrans_timeout = 0;
2241 rfc.monitor_timeout = 0;
2242 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2244 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2245 sizeof(rfc), (unsigned long) &rfc);
2247 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2248 break;
2250 if (pi->fcs == L2CAP_FCS_NONE ||
2251 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2252 pi->fcs = L2CAP_FCS_NONE;
2253 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2255 break;
2257 case L2CAP_MODE_STREAMING:
2258 rfc.mode = L2CAP_MODE_STREAMING;
2259 rfc.txwin_size = 0;
2260 rfc.max_transmit = 0;
2261 rfc.retrans_timeout = 0;
2262 rfc.monitor_timeout = 0;
2263 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2265 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2266 sizeof(rfc), (unsigned long) &rfc);
2268 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2269 break;
2271 if (pi->fcs == L2CAP_FCS_NONE ||
2272 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2273 pi->fcs = L2CAP_FCS_NONE;
2274 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2276 break;
2279 /* FIXME: Need actual value of the flush timeout */
2280 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2281 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2283 req->dcid = cpu_to_le16(pi->dcid);
2284 req->flags = cpu_to_le16(0);
2286 return ptr - data;
2289 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2291 struct l2cap_pinfo *pi = l2cap_pi(sk);
2292 struct l2cap_conf_rsp *rsp = data;
2293 void *ptr = rsp->data;
2294 void *req = pi->conf_req;
2295 int len = pi->conf_len;
2296 int type, hint, olen;
2297 unsigned long val;
2298 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2299 u16 mtu = L2CAP_DEFAULT_MTU;
2300 u16 result = L2CAP_CONF_SUCCESS;
2302 BT_DBG("sk %p", sk);
2304 while (len >= L2CAP_CONF_OPT_SIZE) {
2305 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2307 hint = type & L2CAP_CONF_HINT;
2308 type &= L2CAP_CONF_MASK;
2310 switch (type) {
2311 case L2CAP_CONF_MTU:
2312 mtu = val;
2313 break;
2315 case L2CAP_CONF_FLUSH_TO:
2316 pi->flush_to = val;
2317 break;
2319 case L2CAP_CONF_QOS:
2320 break;
2322 case L2CAP_CONF_RFC:
2323 if (olen == sizeof(rfc))
2324 memcpy(&rfc, (void *) val, olen);
2325 break;
2327 case L2CAP_CONF_FCS:
2328 if (val == L2CAP_FCS_NONE)
2329 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2331 break;
2333 default:
2334 if (hint)
2335 break;
2337 result = L2CAP_CONF_UNKNOWN;
2338 *((u8 *) ptr++) = type;
2339 break;
2343 if (pi->num_conf_rsp || pi->num_conf_req)
2344 goto done;
2346 switch (pi->mode) {
2347 case L2CAP_MODE_STREAMING:
2348 case L2CAP_MODE_ERTM:
2349 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2350 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2351 return -ECONNREFUSED;
2352 break;
2353 default:
2354 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2355 break;
2358 done:
2359 if (pi->mode != rfc.mode) {
2360 result = L2CAP_CONF_UNACCEPT;
2361 rfc.mode = pi->mode;
2363 if (pi->num_conf_rsp == 1)
2364 return -ECONNREFUSED;
2366 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2367 sizeof(rfc), (unsigned long) &rfc);
2371 if (result == L2CAP_CONF_SUCCESS) {
2372 /* Configure output options and let the other side know
2373 * which ones we don't like. */
2375 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2376 result = L2CAP_CONF_UNACCEPT;
2377 else {
2378 pi->omtu = mtu;
2379 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2381 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2383 switch (rfc.mode) {
2384 case L2CAP_MODE_BASIC:
2385 pi->fcs = L2CAP_FCS_NONE;
2386 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2387 break;
2389 case L2CAP_MODE_ERTM:
2390 pi->remote_tx_win = rfc.txwin_size;
2391 pi->remote_max_tx = rfc.max_transmit;
2392 pi->max_pdu_size = rfc.max_pdu_size;
2394 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2395 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2397 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2399 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2400 sizeof(rfc), (unsigned long) &rfc);
2402 break;
2404 case L2CAP_MODE_STREAMING:
2405 pi->remote_tx_win = rfc.txwin_size;
2406 pi->max_pdu_size = rfc.max_pdu_size;
2408 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2410 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2411 sizeof(rfc), (unsigned long) &rfc);
2413 break;
2415 default:
2416 result = L2CAP_CONF_UNACCEPT;
2418 memset(&rfc, 0, sizeof(rfc));
2419 rfc.mode = pi->mode;
2422 if (result == L2CAP_CONF_SUCCESS)
2423 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2425 rsp->scid = cpu_to_le16(pi->dcid);
2426 rsp->result = cpu_to_le16(result);
2427 rsp->flags = cpu_to_le16(0x0000);
2429 return ptr - data;
2432 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2434 struct l2cap_pinfo *pi = l2cap_pi(sk);
2435 struct l2cap_conf_req *req = data;
2436 void *ptr = req->data;
2437 int type, olen;
2438 unsigned long val;
2439 struct l2cap_conf_rfc rfc;
2441 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2443 while (len >= L2CAP_CONF_OPT_SIZE) {
2444 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2446 switch (type) {
2447 case L2CAP_CONF_MTU:
2448 if (val < L2CAP_DEFAULT_MIN_MTU) {
2449 *result = L2CAP_CONF_UNACCEPT;
2450 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2451 } else
2452 pi->omtu = val;
2453 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2454 break;
2456 case L2CAP_CONF_FLUSH_TO:
2457 pi->flush_to = val;
2458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2459 2, pi->flush_to);
2460 break;
2462 case L2CAP_CONF_RFC:
2463 if (olen == sizeof(rfc))
2464 memcpy(&rfc, (void *)val, olen);
2466 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2467 rfc.mode != pi->mode)
2468 return -ECONNREFUSED;
2470 pi->mode = rfc.mode;
2471 pi->fcs = 0;
2473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2474 sizeof(rfc), (unsigned long) &rfc);
2475 break;
2479 if (*result == L2CAP_CONF_SUCCESS) {
2480 switch (rfc.mode) {
2481 case L2CAP_MODE_ERTM:
2482 pi->remote_tx_win = rfc.txwin_size;
2483 pi->retrans_timeout = rfc.retrans_timeout;
2484 pi->monitor_timeout = rfc.monitor_timeout;
2485 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2486 break;
2487 case L2CAP_MODE_STREAMING:
2488 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2489 break;
2493 req->dcid = cpu_to_le16(pi->dcid);
2494 req->flags = cpu_to_le16(0x0000);
2496 return ptr - data;
2499 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2501 struct l2cap_conf_rsp *rsp = data;
2502 void *ptr = rsp->data;
2504 BT_DBG("sk %p", sk);
2506 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2507 rsp->result = cpu_to_le16(result);
2508 rsp->flags = cpu_to_le16(flags);
2510 return ptr - data;
2513 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2515 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2517 if (rej->reason != 0x0000)
2518 return 0;
2520 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2521 cmd->ident == conn->info_ident) {
2522 del_timer(&conn->info_timer);
2524 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2525 conn->info_ident = 0;
2527 l2cap_conn_start(conn);
2530 return 0;
2533 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2535 struct l2cap_chan_list *list = &conn->chan_list;
2536 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2537 struct l2cap_conn_rsp rsp;
2538 struct sock *sk, *parent;
2539 int result, status = L2CAP_CS_NO_INFO;
2541 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2542 __le16 psm = req->psm;
2544 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2546 /* Check if we have socket listening on psm */
2547 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2548 if (!parent) {
2549 result = L2CAP_CR_BAD_PSM;
2550 goto sendresp;
2553 /* Check if the ACL is secure enough (if not SDP) */
2554 if (psm != cpu_to_le16(0x0001) &&
2555 !hci_conn_check_link_mode(conn->hcon)) {
2556 conn->disc_reason = 0x05;
2557 result = L2CAP_CR_SEC_BLOCK;
2558 goto response;
2561 result = L2CAP_CR_NO_MEM;
2563 /* Check for backlog size */
2564 if (sk_acceptq_is_full(parent)) {
2565 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2566 goto response;
2569 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2570 if (!sk)
2571 goto response;
2573 write_lock_bh(&list->lock);
2575 /* Check if we already have channel with that dcid */
2576 if (__l2cap_get_chan_by_dcid(list, scid)) {
2577 write_unlock_bh(&list->lock);
2578 sock_set_flag(sk, SOCK_ZAPPED);
2579 l2cap_sock_kill(sk);
2580 goto response;
2583 hci_conn_hold(conn->hcon);
2585 l2cap_sock_init(sk, parent);
2586 bacpy(&bt_sk(sk)->src, conn->src);
2587 bacpy(&bt_sk(sk)->dst, conn->dst);
2588 l2cap_pi(sk)->psm = psm;
2589 l2cap_pi(sk)->dcid = scid;
2591 __l2cap_chan_add(conn, sk, parent);
2592 dcid = l2cap_pi(sk)->scid;
2594 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2596 l2cap_pi(sk)->ident = cmd->ident;
2598 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2599 if (l2cap_check_security(sk)) {
2600 if (bt_sk(sk)->defer_setup) {
2601 sk->sk_state = BT_CONNECT2;
2602 result = L2CAP_CR_PEND;
2603 status = L2CAP_CS_AUTHOR_PEND;
2604 parent->sk_data_ready(parent, 0);
2605 } else {
2606 sk->sk_state = BT_CONFIG;
2607 result = L2CAP_CR_SUCCESS;
2608 status = L2CAP_CS_NO_INFO;
2610 } else {
2611 sk->sk_state = BT_CONNECT2;
2612 result = L2CAP_CR_PEND;
2613 status = L2CAP_CS_AUTHEN_PEND;
2615 } else {
2616 sk->sk_state = BT_CONNECT2;
2617 result = L2CAP_CR_PEND;
2618 status = L2CAP_CS_NO_INFO;
2621 write_unlock_bh(&list->lock);
2623 response:
2624 bh_unlock_sock(parent);
2626 sendresp:
2627 rsp.scid = cpu_to_le16(scid);
2628 rsp.dcid = cpu_to_le16(dcid);
2629 rsp.result = cpu_to_le16(result);
2630 rsp.status = cpu_to_le16(status);
2631 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2633 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2634 struct l2cap_info_req info;
2635 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2637 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2638 conn->info_ident = l2cap_get_ident(conn);
2640 mod_timer(&conn->info_timer, jiffies +
2641 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2643 l2cap_send_cmd(conn, conn->info_ident,
2644 L2CAP_INFO_REQ, sizeof(info), &info);
2647 return 0;
2650 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2652 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2653 u16 scid, dcid, result, status;
2654 struct sock *sk;
2655 u8 req[128];
2657 scid = __le16_to_cpu(rsp->scid);
2658 dcid = __le16_to_cpu(rsp->dcid);
2659 result = __le16_to_cpu(rsp->result);
2660 status = __le16_to_cpu(rsp->status);
2662 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2664 if (scid) {
2665 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2666 if (!sk)
2667 return 0;
2668 } else {
2669 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2670 if (!sk)
2671 return 0;
2674 switch (result) {
2675 case L2CAP_CR_SUCCESS:
2676 sk->sk_state = BT_CONFIG;
2677 l2cap_pi(sk)->ident = 0;
2678 l2cap_pi(sk)->dcid = dcid;
2679 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2681 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2683 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2684 l2cap_build_conf_req(sk, req), req);
2685 l2cap_pi(sk)->num_conf_req++;
2686 break;
2688 case L2CAP_CR_PEND:
2689 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2690 break;
2692 default:
2693 l2cap_chan_del(sk, ECONNREFUSED);
2694 break;
2697 bh_unlock_sock(sk);
2698 return 0;
2701 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2703 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2704 u16 dcid, flags;
2705 u8 rsp[64];
2706 struct sock *sk;
2707 int len;
2709 dcid = __le16_to_cpu(req->dcid);
2710 flags = __le16_to_cpu(req->flags);
2712 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2714 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2715 if (!sk)
2716 return -ENOENT;
2718 if (sk->sk_state == BT_DISCONN)
2719 goto unlock;
2721 /* Reject if config buffer is too small. */
2722 len = cmd_len - sizeof(*req);
2723 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2724 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2725 l2cap_build_conf_rsp(sk, rsp,
2726 L2CAP_CONF_REJECT, flags), rsp);
2727 goto unlock;
2730 /* Store config. */
2731 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2732 l2cap_pi(sk)->conf_len += len;
2734 if (flags & 0x0001) {
2735 /* Incomplete config. Send empty response. */
2736 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2737 l2cap_build_conf_rsp(sk, rsp,
2738 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2739 goto unlock;
2742 /* Complete config. */
2743 len = l2cap_parse_conf_req(sk, rsp);
2744 if (len < 0) {
2745 l2cap_send_disconn_req(conn, sk);
2746 goto unlock;
2749 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2750 l2cap_pi(sk)->num_conf_rsp++;
2752 /* Reset config buffer. */
2753 l2cap_pi(sk)->conf_len = 0;
2755 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2756 goto unlock;
2758 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2759 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2760 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2761 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2763 sk->sk_state = BT_CONNECTED;
2764 l2cap_pi(sk)->next_tx_seq = 0;
2765 l2cap_pi(sk)->expected_ack_seq = 0;
2766 l2cap_pi(sk)->unacked_frames = 0;
2768 setup_timer(&l2cap_pi(sk)->retrans_timer,
2769 l2cap_retrans_timeout, (unsigned long) sk);
2770 setup_timer(&l2cap_pi(sk)->monitor_timer,
2771 l2cap_monitor_timeout, (unsigned long) sk);
2773 __skb_queue_head_init(TX_QUEUE(sk));
2774 __skb_queue_head_init(SREJ_QUEUE(sk));
2775 l2cap_chan_ready(sk);
2776 goto unlock;
2779 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2780 u8 buf[64];
2781 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2782 l2cap_build_conf_req(sk, buf), buf);
2783 l2cap_pi(sk)->num_conf_req++;
2786 unlock:
2787 bh_unlock_sock(sk);
2788 return 0;
2791 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2793 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2794 u16 scid, flags, result;
2795 struct sock *sk;
2797 scid = __le16_to_cpu(rsp->scid);
2798 flags = __le16_to_cpu(rsp->flags);
2799 result = __le16_to_cpu(rsp->result);
2801 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2802 scid, flags, result);
2804 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2805 if (!sk)
2806 return 0;
2808 switch (result) {
2809 case L2CAP_CONF_SUCCESS:
2810 break;
2812 case L2CAP_CONF_UNACCEPT:
2813 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2814 int len = cmd->len - sizeof(*rsp);
2815 char req[64];
2817 /* throw out any old stored conf requests */
2818 result = L2CAP_CONF_SUCCESS;
2819 len = l2cap_parse_conf_rsp(sk, rsp->data,
2820 len, req, &result);
2821 if (len < 0) {
2822 l2cap_send_disconn_req(conn, sk);
2823 goto done;
2826 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2827 L2CAP_CONF_REQ, len, req);
2828 l2cap_pi(sk)->num_conf_req++;
2829 if (result != L2CAP_CONF_SUCCESS)
2830 goto done;
2831 break;
2834 default:
2835 sk->sk_state = BT_DISCONN;
2836 sk->sk_err = ECONNRESET;
2837 l2cap_sock_set_timer(sk, HZ * 5);
2838 l2cap_send_disconn_req(conn, sk);
2839 goto done;
2842 if (flags & 0x01)
2843 goto done;
2845 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2847 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2848 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2849 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2850 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2852 sk->sk_state = BT_CONNECTED;
2853 l2cap_pi(sk)->expected_tx_seq = 0;
2854 l2cap_pi(sk)->buffer_seq = 0;
2855 l2cap_pi(sk)->num_to_ack = 0;
2856 __skb_queue_head_init(TX_QUEUE(sk));
2857 __skb_queue_head_init(SREJ_QUEUE(sk));
2858 l2cap_chan_ready(sk);
2861 done:
2862 bh_unlock_sock(sk);
2863 return 0;
2866 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2868 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2869 struct l2cap_disconn_rsp rsp;
2870 u16 dcid, scid;
2871 struct sock *sk;
2873 scid = __le16_to_cpu(req->scid);
2874 dcid = __le16_to_cpu(req->dcid);
2876 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2878 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2879 if (!sk)
2880 return 0;
2882 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2883 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2884 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2886 sk->sk_shutdown = SHUTDOWN_MASK;
2888 skb_queue_purge(TX_QUEUE(sk));
2889 skb_queue_purge(SREJ_QUEUE(sk));
2890 del_timer(&l2cap_pi(sk)->retrans_timer);
2891 del_timer(&l2cap_pi(sk)->monitor_timer);
2893 l2cap_chan_del(sk, ECONNRESET);
2894 bh_unlock_sock(sk);
2896 l2cap_sock_kill(sk);
2897 return 0;
2900 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2902 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2903 u16 dcid, scid;
2904 struct sock *sk;
2906 scid = __le16_to_cpu(rsp->scid);
2907 dcid = __le16_to_cpu(rsp->dcid);
2909 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2911 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2912 if (!sk)
2913 return 0;
2915 skb_queue_purge(TX_QUEUE(sk));
2916 skb_queue_purge(SREJ_QUEUE(sk));
2917 del_timer(&l2cap_pi(sk)->retrans_timer);
2918 del_timer(&l2cap_pi(sk)->monitor_timer);
2920 l2cap_chan_del(sk, 0);
2921 bh_unlock_sock(sk);
2923 l2cap_sock_kill(sk);
2924 return 0;
2927 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2929 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2930 u16 type;
2932 type = __le16_to_cpu(req->type);
2934 BT_DBG("type 0x%4.4x", type);
2936 if (type == L2CAP_IT_FEAT_MASK) {
2937 u8 buf[8];
2938 u32 feat_mask = l2cap_feat_mask;
2939 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2940 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2941 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2942 if (enable_ertm)
2943 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2944 | L2CAP_FEAT_FCS;
2945 put_unaligned_le32(feat_mask, rsp->data);
2946 l2cap_send_cmd(conn, cmd->ident,
2947 L2CAP_INFO_RSP, sizeof(buf), buf);
2948 } else if (type == L2CAP_IT_FIXED_CHAN) {
2949 u8 buf[12];
2950 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2951 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2952 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2953 memcpy(buf + 4, l2cap_fixed_chan, 8);
2954 l2cap_send_cmd(conn, cmd->ident,
2955 L2CAP_INFO_RSP, sizeof(buf), buf);
2956 } else {
2957 struct l2cap_info_rsp rsp;
2958 rsp.type = cpu_to_le16(type);
2959 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2960 l2cap_send_cmd(conn, cmd->ident,
2961 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2964 return 0;
2967 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2969 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2970 u16 type, result;
2972 type = __le16_to_cpu(rsp->type);
2973 result = __le16_to_cpu(rsp->result);
2975 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2977 del_timer(&conn->info_timer);
2979 if (type == L2CAP_IT_FEAT_MASK) {
2980 conn->feat_mask = get_unaligned_le32(rsp->data);
2982 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2983 struct l2cap_info_req req;
2984 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2986 conn->info_ident = l2cap_get_ident(conn);
2988 l2cap_send_cmd(conn, conn->info_ident,
2989 L2CAP_INFO_REQ, sizeof(req), &req);
2990 } else {
2991 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2992 conn->info_ident = 0;
2994 l2cap_conn_start(conn);
2996 } else if (type == L2CAP_IT_FIXED_CHAN) {
2997 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2998 conn->info_ident = 0;
3000 l2cap_conn_start(conn);
3003 return 0;
3006 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3008 u8 *data = skb->data;
3009 int len = skb->len;
3010 struct l2cap_cmd_hdr cmd;
3011 int err = 0;
3013 l2cap_raw_recv(conn, skb);
3015 while (len >= L2CAP_CMD_HDR_SIZE) {
3016 u16 cmd_len;
3017 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3018 data += L2CAP_CMD_HDR_SIZE;
3019 len -= L2CAP_CMD_HDR_SIZE;
3021 cmd_len = le16_to_cpu(cmd.len);
3023 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3025 if (cmd_len > len || !cmd.ident) {
3026 BT_DBG("corrupted command");
3027 break;
3030 switch (cmd.code) {
3031 case L2CAP_COMMAND_REJ:
3032 l2cap_command_rej(conn, &cmd, data);
3033 break;
3035 case L2CAP_CONN_REQ:
3036 err = l2cap_connect_req(conn, &cmd, data);
3037 break;
3039 case L2CAP_CONN_RSP:
3040 err = l2cap_connect_rsp(conn, &cmd, data);
3041 break;
3043 case L2CAP_CONF_REQ:
3044 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3045 break;
3047 case L2CAP_CONF_RSP:
3048 err = l2cap_config_rsp(conn, &cmd, data);
3049 break;
3051 case L2CAP_DISCONN_REQ:
3052 err = l2cap_disconnect_req(conn, &cmd, data);
3053 break;
3055 case L2CAP_DISCONN_RSP:
3056 err = l2cap_disconnect_rsp(conn, &cmd, data);
3057 break;
3059 case L2CAP_ECHO_REQ:
3060 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3061 break;
3063 case L2CAP_ECHO_RSP:
3064 break;
3066 case L2CAP_INFO_REQ:
3067 err = l2cap_information_req(conn, &cmd, data);
3068 break;
3070 case L2CAP_INFO_RSP:
3071 err = l2cap_information_rsp(conn, &cmd, data);
3072 break;
3074 default:
3075 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3076 err = -EINVAL;
3077 break;
3080 if (err) {
3081 struct l2cap_cmd_rej rej;
3082 BT_DBG("error %d", err);
3084 /* FIXME: Map err to a valid reason */
3085 rej.reason = cpu_to_le16(0);
3086 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3089 data += cmd_len;
3090 len -= cmd_len;
3093 kfree_skb(skb);
3096 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3098 u16 our_fcs, rcv_fcs;
3099 int hdr_size = L2CAP_HDR_SIZE + 2;
3101 if (pi->fcs == L2CAP_FCS_CRC16) {
3102 skb_trim(skb, skb->len - 2);
3103 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3104 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3106 if (our_fcs != rcv_fcs)
3107 return -EINVAL;
3109 return 0;
3112 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3114 struct sk_buff *next_skb;
3116 bt_cb(skb)->tx_seq = tx_seq;
3117 bt_cb(skb)->sar = sar;
3119 next_skb = skb_peek(SREJ_QUEUE(sk));
3120 if (!next_skb) {
3121 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3122 return;
3125 do {
3126 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3127 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3128 return;
3131 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3132 break;
3134 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3136 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3139 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3141 struct l2cap_pinfo *pi = l2cap_pi(sk);
3142 struct sk_buff *_skb;
3143 int err = -EINVAL;
3145 switch (control & L2CAP_CTRL_SAR) {
3146 case L2CAP_SDU_UNSEGMENTED:
3147 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3148 kfree_skb(pi->sdu);
3149 break;
3152 err = sock_queue_rcv_skb(sk, skb);
3153 if (!err)
3154 return 0;
3156 break;
3158 case L2CAP_SDU_START:
3159 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3160 kfree_skb(pi->sdu);
3161 break;
3164 pi->sdu_len = get_unaligned_le16(skb->data);
3165 skb_pull(skb, 2);
3167 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3168 if (!pi->sdu) {
3169 err = -ENOMEM;
3170 break;
3173 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3175 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3176 pi->partial_sdu_len = skb->len;
3177 err = 0;
3178 break;
3180 case L2CAP_SDU_CONTINUE:
3181 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3182 break;
3184 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3186 pi->partial_sdu_len += skb->len;
3187 if (pi->partial_sdu_len > pi->sdu_len)
3188 kfree_skb(pi->sdu);
3189 else
3190 err = 0;
3192 break;
3194 case L2CAP_SDU_END:
3195 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3196 break;
3198 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3200 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3201 pi->partial_sdu_len += skb->len;
3203 if (pi->partial_sdu_len == pi->sdu_len) {
3204 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3205 err = sock_queue_rcv_skb(sk, _skb);
3206 if (err < 0)
3207 kfree_skb(_skb);
3209 kfree_skb(pi->sdu);
3210 err = 0;
3212 break;
3215 kfree_skb(skb);
3216 return err;
3219 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3221 struct sk_buff *skb;
3222 u16 control = 0;
3224 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3225 if (bt_cb(skb)->tx_seq != tx_seq)
3226 break;
3228 skb = skb_dequeue(SREJ_QUEUE(sk));
3229 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3230 l2cap_sar_reassembly_sdu(sk, skb, control);
3231 l2cap_pi(sk)->buffer_seq_srej =
3232 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3233 tx_seq++;
3237 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3239 struct l2cap_pinfo *pi = l2cap_pi(sk);
3240 struct srej_list *l, *tmp;
3241 u16 control;
3243 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3244 if (l->tx_seq == tx_seq) {
3245 list_del(&l->list);
3246 kfree(l);
3247 return;
3249 control = L2CAP_SUPER_SELECT_REJECT;
3250 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3251 l2cap_send_sframe(pi, control);
3252 list_del(&l->list);
3253 list_add_tail(&l->list, SREJ_LIST(sk));
3257 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3259 struct l2cap_pinfo *pi = l2cap_pi(sk);
3260 struct srej_list *new;
3261 u16 control;
3263 while (tx_seq != pi->expected_tx_seq) {
3264 control = L2CAP_SUPER_SELECT_REJECT;
3265 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3266 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3267 control |= L2CAP_CTRL_POLL;
3268 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3270 l2cap_send_sframe(pi, control);
3272 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3273 new->tx_seq = pi->expected_tx_seq++;
3274 list_add_tail(&new->list, SREJ_LIST(sk));
3276 pi->expected_tx_seq++;
3279 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3281 struct l2cap_pinfo *pi = l2cap_pi(sk);
3282 u8 tx_seq = __get_txseq(rx_control);
3283 u16 tx_control = 0;
3284 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3285 int err = 0;
3287 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3289 if (tx_seq == pi->expected_tx_seq)
3290 goto expected;
3292 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3293 struct srej_list *first;
3295 first = list_first_entry(SREJ_LIST(sk),
3296 struct srej_list, list);
3297 if (tx_seq == first->tx_seq) {
3298 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3299 l2cap_check_srej_gap(sk, tx_seq);
3301 list_del(&first->list);
3302 kfree(first);
3304 if (list_empty(SREJ_LIST(sk))) {
3305 pi->buffer_seq = pi->buffer_seq_srej;
3306 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3308 } else {
3309 struct srej_list *l;
3310 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3312 list_for_each_entry(l, SREJ_LIST(sk), list) {
3313 if (l->tx_seq == tx_seq) {
3314 l2cap_resend_srejframe(sk, tx_seq);
3315 return 0;
3318 l2cap_send_srejframe(sk, tx_seq);
3320 } else {
3321 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3323 INIT_LIST_HEAD(SREJ_LIST(sk));
3324 pi->buffer_seq_srej = pi->buffer_seq;
3326 __skb_queue_head_init(SREJ_QUEUE(sk));
3327 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3329 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3331 l2cap_send_srejframe(sk, tx_seq);
3333 return 0;
3335 expected:
3336 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3338 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3339 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3340 return 0;
3343 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3345 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3346 if (err < 0)
3347 return err;
3349 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3350 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3351 tx_control |= L2CAP_SUPER_RCV_READY;
3352 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3353 l2cap_send_sframe(pi, tx_control);
3355 return 0;
3358 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3360 struct l2cap_pinfo *pi = l2cap_pi(sk);
3361 u8 tx_seq = __get_reqseq(rx_control);
3363 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3365 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3366 case L2CAP_SUPER_RCV_READY:
3367 if (rx_control & L2CAP_CTRL_POLL) {
3368 u16 control = L2CAP_CTRL_FINAL;
3369 control |= L2CAP_SUPER_RCV_READY |
3370 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3371 l2cap_send_sframe(l2cap_pi(sk), control);
3372 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3374 } else if (rx_control & L2CAP_CTRL_FINAL) {
3375 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3376 pi->expected_ack_seq = tx_seq;
3377 l2cap_drop_acked_frames(sk);
3379 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3380 break;
3382 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3383 del_timer(&pi->monitor_timer);
3385 if (pi->unacked_frames > 0)
3386 __mod_retrans_timer();
3387 } else {
3388 pi->expected_ack_seq = tx_seq;
3389 l2cap_drop_acked_frames(sk);
3391 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3392 && (pi->unacked_frames > 0))
3393 __mod_retrans_timer();
3395 l2cap_ertm_send(sk);
3396 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3398 break;
3400 case L2CAP_SUPER_REJECT:
3401 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3403 pi->expected_ack_seq = __get_reqseq(rx_control);
3404 l2cap_drop_acked_frames(sk);
3406 sk->sk_send_head = TX_QUEUE(sk)->next;
3407 pi->next_tx_seq = pi->expected_ack_seq;
3409 l2cap_ertm_send(sk);
3411 break;
3413 case L2CAP_SUPER_SELECT_REJECT:
3414 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3416 if (rx_control & L2CAP_CTRL_POLL) {
3417 l2cap_retransmit_frame(sk, tx_seq);
3418 pi->expected_ack_seq = tx_seq;
3419 l2cap_drop_acked_frames(sk);
3420 l2cap_ertm_send(sk);
3421 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3422 pi->srej_save_reqseq = tx_seq;
3423 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3425 } else if (rx_control & L2CAP_CTRL_FINAL) {
3426 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3427 pi->srej_save_reqseq == tx_seq)
3428 pi->srej_save_reqseq &= ~L2CAP_CONN_SREJ_ACT;
3429 else
3430 l2cap_retransmit_frame(sk, tx_seq);
3432 else {
3433 l2cap_retransmit_frame(sk, tx_seq);
3434 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3435 pi->srej_save_reqseq = tx_seq;
3436 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3439 break;
3441 case L2CAP_SUPER_RCV_NOT_READY:
3442 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3443 pi->expected_ack_seq = tx_seq;
3444 l2cap_drop_acked_frames(sk);
3446 del_timer(&l2cap_pi(sk)->retrans_timer);
3447 if (rx_control & L2CAP_CTRL_POLL) {
3448 u16 control = L2CAP_CTRL_FINAL;
3449 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3451 break;
3454 return 0;
3457 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3459 struct sock *sk;
3460 struct l2cap_pinfo *pi;
3461 u16 control, len;
3462 u8 tx_seq;
3463 int err;
3465 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3466 if (!sk) {
3467 BT_DBG("unknown cid 0x%4.4x", cid);
3468 goto drop;
3471 pi = l2cap_pi(sk);
3473 BT_DBG("sk %p, len %d", sk, skb->len);
3475 if (sk->sk_state != BT_CONNECTED)
3476 goto drop;
3478 switch (pi->mode) {
3479 case L2CAP_MODE_BASIC:
3480 /* If socket recv buffers overflows we drop data here
3481 * which is *bad* because L2CAP has to be reliable.
3482 * But we don't have any other choice. L2CAP doesn't
3483 * provide flow control mechanism. */
3485 if (pi->imtu < skb->len)
3486 goto drop;
3488 if (!sock_queue_rcv_skb(sk, skb))
3489 goto done;
3490 break;
3492 case L2CAP_MODE_ERTM:
3493 control = get_unaligned_le16(skb->data);
3494 skb_pull(skb, 2);
3495 len = skb->len;
3497 if (__is_sar_start(control))
3498 len -= 2;
3500 if (pi->fcs == L2CAP_FCS_CRC16)
3501 len -= 2;
3504 * We can just drop the corrupted I-frame here.
3505 * Receiver will miss it and start proper recovery
3506 * procedures and ask retransmission.
3508 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3509 goto drop;
3511 if (l2cap_check_fcs(pi, skb))
3512 goto drop;
3514 if (__is_iframe(control))
3515 err = l2cap_data_channel_iframe(sk, control, skb);
3516 else
3517 err = l2cap_data_channel_sframe(sk, control, skb);
3519 if (!err)
3520 goto done;
3521 break;
3523 case L2CAP_MODE_STREAMING:
3524 control = get_unaligned_le16(skb->data);
3525 skb_pull(skb, 2);
3526 len = skb->len;
3528 if (__is_sar_start(control))
3529 len -= 2;
3531 if (pi->fcs == L2CAP_FCS_CRC16)
3532 len -= 2;
3534 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3535 goto drop;
3537 if (l2cap_check_fcs(pi, skb))
3538 goto drop;
3540 tx_seq = __get_txseq(control);
3542 if (pi->expected_tx_seq == tx_seq)
3543 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3544 else
3545 pi->expected_tx_seq = tx_seq + 1;
3547 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3549 goto done;
3551 default:
3552 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3553 break;
3556 drop:
3557 kfree_skb(skb);
3559 done:
3560 if (sk)
3561 bh_unlock_sock(sk);
3563 return 0;
3566 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3568 struct sock *sk;
3570 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3571 if (!sk)
3572 goto drop;
3574 BT_DBG("sk %p, len %d", sk, skb->len);
3576 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3577 goto drop;
3579 if (l2cap_pi(sk)->imtu < skb->len)
3580 goto drop;
3582 if (!sock_queue_rcv_skb(sk, skb))
3583 goto done;
3585 drop:
3586 kfree_skb(skb);
3588 done:
3589 if (sk)
3590 bh_unlock_sock(sk);
3591 return 0;
3594 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3596 struct l2cap_hdr *lh = (void *) skb->data;
3597 u16 cid, len;
3598 __le16 psm;
3600 skb_pull(skb, L2CAP_HDR_SIZE);
3601 cid = __le16_to_cpu(lh->cid);
3602 len = __le16_to_cpu(lh->len);
3604 if (len != skb->len) {
3605 kfree_skb(skb);
3606 return;
3609 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3611 switch (cid) {
3612 case L2CAP_CID_SIGNALING:
3613 l2cap_sig_channel(conn, skb);
3614 break;
3616 case L2CAP_CID_CONN_LESS:
3617 psm = get_unaligned_le16(skb->data);
3618 skb_pull(skb, 2);
3619 l2cap_conless_channel(conn, psm, skb);
3620 break;
3622 default:
3623 l2cap_data_channel(conn, cid, skb);
3624 break;
3628 /* ---- L2CAP interface with lower layer (HCI) ---- */
3630 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3632 int exact = 0, lm1 = 0, lm2 = 0;
3633 register struct sock *sk;
3634 struct hlist_node *node;
3636 if (type != ACL_LINK)
3637 return 0;
3639 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3641 /* Find listening sockets and check their link_mode */
3642 read_lock(&l2cap_sk_list.lock);
3643 sk_for_each(sk, node, &l2cap_sk_list.head) {
3644 if (sk->sk_state != BT_LISTEN)
3645 continue;
3647 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3648 lm1 |= HCI_LM_ACCEPT;
3649 if (l2cap_pi(sk)->role_switch)
3650 lm1 |= HCI_LM_MASTER;
3651 exact++;
3652 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3653 lm2 |= HCI_LM_ACCEPT;
3654 if (l2cap_pi(sk)->role_switch)
3655 lm2 |= HCI_LM_MASTER;
3658 read_unlock(&l2cap_sk_list.lock);
3660 return exact ? lm1 : lm2;
3663 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3665 struct l2cap_conn *conn;
3667 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3669 if (hcon->type != ACL_LINK)
3670 return 0;
3672 if (!status) {
3673 conn = l2cap_conn_add(hcon, status);
3674 if (conn)
3675 l2cap_conn_ready(conn);
3676 } else
3677 l2cap_conn_del(hcon, bt_err(status));
3679 return 0;
3682 static int l2cap_disconn_ind(struct hci_conn *hcon)
3684 struct l2cap_conn *conn = hcon->l2cap_data;
3686 BT_DBG("hcon %p", hcon);
3688 if (hcon->type != ACL_LINK || !conn)
3689 return 0x13;
3691 return conn->disc_reason;
3694 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3696 BT_DBG("hcon %p reason %d", hcon, reason);
3698 if (hcon->type != ACL_LINK)
3699 return 0;
3701 l2cap_conn_del(hcon, bt_err(reason));
3703 return 0;
3706 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3708 if (sk->sk_type != SOCK_SEQPACKET)
3709 return;
3711 if (encrypt == 0x00) {
3712 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3713 l2cap_sock_clear_timer(sk);
3714 l2cap_sock_set_timer(sk, HZ * 5);
3715 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3716 __l2cap_sock_close(sk, ECONNREFUSED);
3717 } else {
3718 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3719 l2cap_sock_clear_timer(sk);
3723 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3725 struct l2cap_chan_list *l;
3726 struct l2cap_conn *conn = hcon->l2cap_data;
3727 struct sock *sk;
3729 if (!conn)
3730 return 0;
3732 l = &conn->chan_list;
3734 BT_DBG("conn %p", conn);
3736 read_lock(&l->lock);
3738 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3739 bh_lock_sock(sk);
3741 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3742 bh_unlock_sock(sk);
3743 continue;
3746 if (!status && (sk->sk_state == BT_CONNECTED ||
3747 sk->sk_state == BT_CONFIG)) {
3748 l2cap_check_encryption(sk, encrypt);
3749 bh_unlock_sock(sk);
3750 continue;
3753 if (sk->sk_state == BT_CONNECT) {
3754 if (!status) {
3755 struct l2cap_conn_req req;
3756 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3757 req.psm = l2cap_pi(sk)->psm;
3759 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3761 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3762 L2CAP_CONN_REQ, sizeof(req), &req);
3763 } else {
3764 l2cap_sock_clear_timer(sk);
3765 l2cap_sock_set_timer(sk, HZ / 10);
3767 } else if (sk->sk_state == BT_CONNECT2) {
3768 struct l2cap_conn_rsp rsp;
3769 __u16 result;
3771 if (!status) {
3772 sk->sk_state = BT_CONFIG;
3773 result = L2CAP_CR_SUCCESS;
3774 } else {
3775 sk->sk_state = BT_DISCONN;
3776 l2cap_sock_set_timer(sk, HZ / 10);
3777 result = L2CAP_CR_SEC_BLOCK;
3780 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3781 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3782 rsp.result = cpu_to_le16(result);
3783 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3784 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3785 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3788 bh_unlock_sock(sk);
3791 read_unlock(&l->lock);
3793 return 0;
3796 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3798 struct l2cap_conn *conn = hcon->l2cap_data;
3800 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3801 goto drop;
3803 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3805 if (flags & ACL_START) {
3806 struct l2cap_hdr *hdr;
3807 int len;
3809 if (conn->rx_len) {
3810 BT_ERR("Unexpected start frame (len %d)", skb->len);
3811 kfree_skb(conn->rx_skb);
3812 conn->rx_skb = NULL;
3813 conn->rx_len = 0;
3814 l2cap_conn_unreliable(conn, ECOMM);
3817 if (skb->len < 2) {
3818 BT_ERR("Frame is too short (len %d)", skb->len);
3819 l2cap_conn_unreliable(conn, ECOMM);
3820 goto drop;
3823 hdr = (struct l2cap_hdr *) skb->data;
3824 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3826 if (len == skb->len) {
3827 /* Complete frame received */
3828 l2cap_recv_frame(conn, skb);
3829 return 0;
3832 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3834 if (skb->len > len) {
3835 BT_ERR("Frame is too long (len %d, expected len %d)",
3836 skb->len, len);
3837 l2cap_conn_unreliable(conn, ECOMM);
3838 goto drop;
3841 /* Allocate skb for the complete frame (with header) */
3842 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3843 if (!conn->rx_skb)
3844 goto drop;
3846 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3847 skb->len);
3848 conn->rx_len = len - skb->len;
3849 } else {
3850 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3852 if (!conn->rx_len) {
3853 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3854 l2cap_conn_unreliable(conn, ECOMM);
3855 goto drop;
3858 if (skb->len > conn->rx_len) {
3859 BT_ERR("Fragment is too long (len %d, expected %d)",
3860 skb->len, conn->rx_len);
3861 kfree_skb(conn->rx_skb);
3862 conn->rx_skb = NULL;
3863 conn->rx_len = 0;
3864 l2cap_conn_unreliable(conn, ECOMM);
3865 goto drop;
3868 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3869 skb->len);
3870 conn->rx_len -= skb->len;
3872 if (!conn->rx_len) {
3873 /* Complete frame received */
3874 l2cap_recv_frame(conn, conn->rx_skb);
3875 conn->rx_skb = NULL;
3879 drop:
3880 kfree_skb(skb);
3881 return 0;
3884 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3886 struct sock *sk;
3887 struct hlist_node *node;
3888 char *str = buf;
3890 read_lock_bh(&l2cap_sk_list.lock);
3892 sk_for_each(sk, node, &l2cap_sk_list.head) {
3893 struct l2cap_pinfo *pi = l2cap_pi(sk);
3895 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3896 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3897 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3898 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3901 read_unlock_bh(&l2cap_sk_list.lock);
3903 return str - buf;
3906 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3908 static const struct proto_ops l2cap_sock_ops = {
3909 .family = PF_BLUETOOTH,
3910 .owner = THIS_MODULE,
3911 .release = l2cap_sock_release,
3912 .bind = l2cap_sock_bind,
3913 .connect = l2cap_sock_connect,
3914 .listen = l2cap_sock_listen,
3915 .accept = l2cap_sock_accept,
3916 .getname = l2cap_sock_getname,
3917 .sendmsg = l2cap_sock_sendmsg,
3918 .recvmsg = l2cap_sock_recvmsg,
3919 .poll = bt_sock_poll,
3920 .ioctl = bt_sock_ioctl,
3921 .mmap = sock_no_mmap,
3922 .socketpair = sock_no_socketpair,
3923 .shutdown = l2cap_sock_shutdown,
3924 .setsockopt = l2cap_sock_setsockopt,
3925 .getsockopt = l2cap_sock_getsockopt
3928 static const struct net_proto_family l2cap_sock_family_ops = {
3929 .family = PF_BLUETOOTH,
3930 .owner = THIS_MODULE,
3931 .create = l2cap_sock_create,
3934 static struct hci_proto l2cap_hci_proto = {
3935 .name = "L2CAP",
3936 .id = HCI_PROTO_L2CAP,
3937 .connect_ind = l2cap_connect_ind,
3938 .connect_cfm = l2cap_connect_cfm,
3939 .disconn_ind = l2cap_disconn_ind,
3940 .disconn_cfm = l2cap_disconn_cfm,
3941 .security_cfm = l2cap_security_cfm,
3942 .recv_acldata = l2cap_recv_acldata
3945 static int __init l2cap_init(void)
3947 int err;
3949 err = proto_register(&l2cap_proto, 0);
3950 if (err < 0)
3951 return err;
3953 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3954 if (err < 0) {
3955 BT_ERR("L2CAP socket registration failed");
3956 goto error;
3959 err = hci_register_proto(&l2cap_hci_proto);
3960 if (err < 0) {
3961 BT_ERR("L2CAP protocol registration failed");
3962 bt_sock_unregister(BTPROTO_L2CAP);
3963 goto error;
3966 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3967 BT_ERR("Failed to create L2CAP info file");
3969 BT_INFO("L2CAP ver %s", VERSION);
3970 BT_INFO("L2CAP socket layer initialized");
3972 return 0;
3974 error:
3975 proto_unregister(&l2cap_proto);
3976 return err;
3979 static void __exit l2cap_exit(void)
3981 class_remove_file(bt_class, &class_attr_l2cap);
3983 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3984 BT_ERR("L2CAP socket unregistration failed");
3986 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3987 BT_ERR("L2CAP protocol unregistration failed");
3989 proto_unregister(&l2cap_proto);
3992 void l2cap_load(void)
3994 /* Dummy function to trigger automatic L2CAP module loading by
3995 * other modules that use L2CAP sockets but don't use any other
3996 * symbols from it. */
3997 return;
3999 EXPORT_SYMBOL(l2cap_load);
4001 module_init(l2cap_init);
4002 module_exit(l2cap_exit);
4004 module_param(enable_ertm, bool, 0644);
4005 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4007 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4008 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4009 MODULE_VERSION(VERSION);
4010 MODULE_LICENSE("GPL");
4011 MODULE_ALIAS("bt-proto-0");