sound: seq_midi_event: fix decoding of (N)RPN events
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blobb93748e224ff1505bd0f8c2448f1070f3854a796
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.11"
55 static u32 l2cap_feat_mask = 0x0000;
57 static const struct proto_ops l2cap_sock_ops;
59 static struct bt_sock_list l2cap_sk_list = {
60 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
63 static void __l2cap_sock_close(struct sock *sk, int reason);
64 static void l2cap_sock_close(struct sock *sk);
65 static void l2cap_sock_kill(struct sock *sk);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
70 /* ---- L2CAP timers ---- */
71 static void l2cap_sock_timeout(unsigned long arg)
73 struct sock *sk = (struct sock *) arg;
74 int reason;
76 BT_DBG("sock %p state %d", sk, sk->sk_state);
78 bh_lock_sock(sk);
80 if (sk->sk_state == BT_CONNECT &&
81 (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH |
82 L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)))
83 reason = ECONNREFUSED;
84 else
85 reason = ETIMEDOUT;
87 __l2cap_sock_close(sk, reason);
89 bh_unlock_sock(sk);
91 l2cap_sock_kill(sk);
92 sock_put(sk);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
98 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 static void l2cap_sock_clear_timer(struct sock *sk)
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104 sk_stop_timer(sk, &sk->sk_timer);
107 /* ---- L2CAP channels ---- */
108 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
110 struct sock *s;
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
113 break;
115 return s;
118 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
120 struct sock *s;
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
123 break;
125 return s;
128 /* Find channel with given SCID.
129 * Returns locked socket */
130 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 struct sock *s;
133 read_lock(&l->lock);
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
137 return s;
140 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
142 struct sock *s;
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
145 break;
147 return s;
150 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
152 struct sock *s;
153 read_lock(&l->lock);
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
157 return s;
160 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
162 u16 cid = 0x0040;
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
166 return cid;
169 return 0;
172 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
174 sock_hold(sk);
176 if (l->head)
177 l2cap_pi(l->head)->prev_c = sk;
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
181 l->head = sk;
184 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
188 write_lock_bh(&l->lock);
189 if (sk == l->head)
190 l->head = next;
192 if (next)
193 l2cap_pi(next)->prev_c = prev;
194 if (prev)
195 l2cap_pi(prev)->next_c = next;
196 write_unlock_bh(&l->lock);
198 __sock_put(sk);
201 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
203 struct l2cap_chan_list *l = &conn->chan_list;
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
207 l2cap_pi(sk)->conn = conn;
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
217 } else {
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
224 __l2cap_chan_link(l, sk);
226 if (parent)
227 bt_accept_enqueue(parent, sk);
230 /* Delete channel.
231 * Must be called on the locked socket. */
232 static void l2cap_chan_del(struct sock *sk, int err)
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
237 l2cap_sock_clear_timer(sk);
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
241 if (conn) {
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
251 if (err)
252 sk->sk_err = err;
254 if (parent) {
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
257 } else
258 sk->sk_state_change(sk);
261 /* Service level security */
262 static inline int l2cap_check_link_mode(struct sock *sk)
264 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
266 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
267 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE))
268 return hci_conn_encrypt(conn->hcon);
270 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
271 return hci_conn_auth(conn->hcon);
273 return 1;
276 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
278 u8 id;
280 /* Get next available identificator.
281 * 1 - 128 are used by kernel.
282 * 129 - 199 are reserved.
283 * 200 - 254 are used by utilities like l2ping, etc.
286 spin_lock_bh(&conn->lock);
288 if (++conn->tx_ident > 128)
289 conn->tx_ident = 1;
291 id = conn->tx_ident;
293 spin_unlock_bh(&conn->lock);
295 return id;
298 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
300 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
302 BT_DBG("code 0x%2.2x", code);
304 if (!skb)
305 return -ENOMEM;
307 return hci_send_acl(conn->hcon, skb, 0);
310 static void l2cap_do_start(struct sock *sk)
312 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
314 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
315 if (l2cap_check_link_mode(sk)) {
316 struct l2cap_conn_req req;
317 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
318 req.psm = l2cap_pi(sk)->psm;
320 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
322 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
323 L2CAP_CONN_REQ, sizeof(req), &req);
325 } else {
326 struct l2cap_info_req req;
327 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
329 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
330 conn->info_ident = l2cap_get_ident(conn);
332 mod_timer(&conn->info_timer, jiffies +
333 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
335 l2cap_send_cmd(conn, conn->info_ident,
336 L2CAP_INFO_REQ, sizeof(req), &req);
340 /* ---- L2CAP connections ---- */
341 static void l2cap_conn_start(struct l2cap_conn *conn)
343 struct l2cap_chan_list *l = &conn->chan_list;
344 struct sock *sk;
346 BT_DBG("conn %p", conn);
348 read_lock(&l->lock);
350 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
351 bh_lock_sock(sk);
353 if (sk->sk_type != SOCK_SEQPACKET) {
354 bh_unlock_sock(sk);
355 continue;
358 if (sk->sk_state == BT_CONNECT) {
359 if (l2cap_check_link_mode(sk)) {
360 struct l2cap_conn_req req;
361 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
362 req.psm = l2cap_pi(sk)->psm;
364 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
366 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
367 L2CAP_CONN_REQ, sizeof(req), &req);
369 } else if (sk->sk_state == BT_CONNECT2) {
370 struct l2cap_conn_rsp rsp;
371 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
372 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
374 if (l2cap_check_link_mode(sk)) {
375 sk->sk_state = BT_CONFIG;
376 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
377 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
378 } else {
379 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
380 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
383 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
384 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
387 bh_unlock_sock(sk);
390 read_unlock(&l->lock);
393 static void l2cap_conn_ready(struct l2cap_conn *conn)
395 struct l2cap_chan_list *l = &conn->chan_list;
396 struct sock *sk;
398 BT_DBG("conn %p", conn);
400 read_lock(&l->lock);
402 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
403 bh_lock_sock(sk);
405 if (sk->sk_type != SOCK_SEQPACKET) {
406 l2cap_sock_clear_timer(sk);
407 sk->sk_state = BT_CONNECTED;
408 sk->sk_state_change(sk);
409 } else if (sk->sk_state == BT_CONNECT)
410 l2cap_do_start(sk);
412 bh_unlock_sock(sk);
415 read_unlock(&l->lock);
418 /* Notify sockets that we cannot guaranty reliability anymore */
419 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
421 struct l2cap_chan_list *l = &conn->chan_list;
422 struct sock *sk;
424 BT_DBG("conn %p", conn);
426 read_lock(&l->lock);
428 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
429 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
430 sk->sk_err = err;
433 read_unlock(&l->lock);
436 static void l2cap_info_timeout(unsigned long arg)
438 struct l2cap_conn *conn = (void *) arg;
440 conn->info_ident = 0;
442 l2cap_conn_start(conn);
445 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
447 struct l2cap_conn *conn = hcon->l2cap_data;
449 if (conn || status)
450 return conn;
452 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
453 if (!conn)
454 return NULL;
456 hcon->l2cap_data = conn;
457 conn->hcon = hcon;
459 BT_DBG("hcon %p conn %p", hcon, conn);
461 conn->mtu = hcon->hdev->acl_mtu;
462 conn->src = &hcon->hdev->bdaddr;
463 conn->dst = &hcon->dst;
465 conn->feat_mask = 0;
467 setup_timer(&conn->info_timer, l2cap_info_timeout,
468 (unsigned long) conn);
470 spin_lock_init(&conn->lock);
471 rwlock_init(&conn->chan_list.lock);
473 return conn;
476 static void l2cap_conn_del(struct hci_conn *hcon, int err)
478 struct l2cap_conn *conn = hcon->l2cap_data;
479 struct sock *sk;
481 if (!conn)
482 return;
484 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
486 if (conn->rx_skb)
487 kfree_skb(conn->rx_skb);
489 /* Kill channels */
490 while ((sk = conn->chan_list.head)) {
491 bh_lock_sock(sk);
492 l2cap_chan_del(sk, err);
493 bh_unlock_sock(sk);
494 l2cap_sock_kill(sk);
497 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
498 del_timer_sync(&conn->info_timer);
500 hcon->l2cap_data = NULL;
501 kfree(conn);
504 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
506 struct l2cap_chan_list *l = &conn->chan_list;
507 write_lock_bh(&l->lock);
508 __l2cap_chan_add(conn, sk, parent);
509 write_unlock_bh(&l->lock);
512 /* ---- Socket interface ---- */
513 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
515 struct sock *sk;
516 struct hlist_node *node;
517 sk_for_each(sk, node, &l2cap_sk_list.head)
518 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
519 goto found;
520 sk = NULL;
521 found:
522 return sk;
525 /* Find socket with psm and source bdaddr.
526 * Returns closest match.
528 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
530 struct sock *sk = NULL, *sk1 = NULL;
531 struct hlist_node *node;
533 sk_for_each(sk, node, &l2cap_sk_list.head) {
534 if (state && sk->sk_state != state)
535 continue;
537 if (l2cap_pi(sk)->psm == psm) {
538 /* Exact match. */
539 if (!bacmp(&bt_sk(sk)->src, src))
540 break;
542 /* Closest match */
543 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
544 sk1 = sk;
547 return node ? sk : sk1;
550 /* Find socket with given address (psm, src).
551 * Returns locked socket */
552 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
554 struct sock *s;
555 read_lock(&l2cap_sk_list.lock);
556 s = __l2cap_get_sock_by_psm(state, psm, src);
557 if (s) bh_lock_sock(s);
558 read_unlock(&l2cap_sk_list.lock);
559 return s;
562 static void l2cap_sock_destruct(struct sock *sk)
564 BT_DBG("sk %p", sk);
566 skb_queue_purge(&sk->sk_receive_queue);
567 skb_queue_purge(&sk->sk_write_queue);
570 static void l2cap_sock_cleanup_listen(struct sock *parent)
572 struct sock *sk;
574 BT_DBG("parent %p", parent);
576 /* Close not yet accepted channels */
577 while ((sk = bt_accept_dequeue(parent, NULL)))
578 l2cap_sock_close(sk);
580 parent->sk_state = BT_CLOSED;
581 sock_set_flag(parent, SOCK_ZAPPED);
584 /* Kill socket (only if zapped and orphan)
585 * Must be called on unlocked socket.
587 static void l2cap_sock_kill(struct sock *sk)
589 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
590 return;
592 BT_DBG("sk %p state %d", sk, sk->sk_state);
594 /* Kill poor orphan */
595 bt_sock_unlink(&l2cap_sk_list, sk);
596 sock_set_flag(sk, SOCK_DEAD);
597 sock_put(sk);
600 static void __l2cap_sock_close(struct sock *sk, int reason)
602 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
604 switch (sk->sk_state) {
605 case BT_LISTEN:
606 l2cap_sock_cleanup_listen(sk);
607 break;
609 case BT_CONNECTED:
610 case BT_CONFIG:
611 case BT_CONNECT2:
612 if (sk->sk_type == SOCK_SEQPACKET) {
613 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
614 struct l2cap_disconn_req req;
616 sk->sk_state = BT_DISCONN;
617 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
619 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
620 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
621 l2cap_send_cmd(conn, l2cap_get_ident(conn),
622 L2CAP_DISCONN_REQ, sizeof(req), &req);
623 } else
624 l2cap_chan_del(sk, reason);
625 break;
627 case BT_CONNECT:
628 case BT_DISCONN:
629 l2cap_chan_del(sk, reason);
630 break;
632 default:
633 sock_set_flag(sk, SOCK_ZAPPED);
634 break;
638 /* Must be called on unlocked socket. */
639 static void l2cap_sock_close(struct sock *sk)
641 l2cap_sock_clear_timer(sk);
642 lock_sock(sk);
643 __l2cap_sock_close(sk, ECONNRESET);
644 release_sock(sk);
645 l2cap_sock_kill(sk);
648 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
650 struct l2cap_pinfo *pi = l2cap_pi(sk);
652 BT_DBG("sk %p", sk);
654 if (parent) {
655 sk->sk_type = parent->sk_type;
656 pi->imtu = l2cap_pi(parent)->imtu;
657 pi->omtu = l2cap_pi(parent)->omtu;
658 pi->link_mode = l2cap_pi(parent)->link_mode;
659 } else {
660 pi->imtu = L2CAP_DEFAULT_MTU;
661 pi->omtu = 0;
662 pi->link_mode = 0;
665 /* Default config options */
666 pi->conf_len = 0;
667 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
670 static struct proto l2cap_proto = {
671 .name = "L2CAP",
672 .owner = THIS_MODULE,
673 .obj_size = sizeof(struct l2cap_pinfo)
676 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
678 struct sock *sk;
680 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
681 if (!sk)
682 return NULL;
684 sock_init_data(sock, sk);
685 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
687 sk->sk_destruct = l2cap_sock_destruct;
688 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
690 sock_reset_flag(sk, SOCK_ZAPPED);
692 sk->sk_protocol = proto;
693 sk->sk_state = BT_OPEN;
695 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
697 bt_sock_link(&l2cap_sk_list, sk);
698 return sk;
701 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
703 struct sock *sk;
705 BT_DBG("sock %p", sock);
707 sock->state = SS_UNCONNECTED;
709 if (sock->type != SOCK_SEQPACKET &&
710 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
711 return -ESOCKTNOSUPPORT;
713 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
714 return -EPERM;
716 sock->ops = &l2cap_sock_ops;
718 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
719 if (!sk)
720 return -ENOMEM;
722 l2cap_sock_init(sk, NULL);
723 return 0;
726 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
728 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
729 struct sock *sk = sock->sk;
730 int err = 0;
732 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
734 if (!addr || addr->sa_family != AF_BLUETOOTH)
735 return -EINVAL;
737 lock_sock(sk);
739 if (sk->sk_state != BT_OPEN) {
740 err = -EBADFD;
741 goto done;
744 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
745 !capable(CAP_NET_BIND_SERVICE)) {
746 err = -EACCES;
747 goto done;
750 write_lock_bh(&l2cap_sk_list.lock);
752 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
753 err = -EADDRINUSE;
754 } else {
755 /* Save source address */
756 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
757 l2cap_pi(sk)->psm = la->l2_psm;
758 l2cap_pi(sk)->sport = la->l2_psm;
759 sk->sk_state = BT_BOUND;
762 write_unlock_bh(&l2cap_sk_list.lock);
764 done:
765 release_sock(sk);
766 return err;
769 static int l2cap_do_connect(struct sock *sk)
771 bdaddr_t *src = &bt_sk(sk)->src;
772 bdaddr_t *dst = &bt_sk(sk)->dst;
773 struct l2cap_conn *conn;
774 struct hci_conn *hcon;
775 struct hci_dev *hdev;
776 __u8 auth_type;
777 int err = 0;
779 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
781 if (!(hdev = hci_get_route(dst, src)))
782 return -EHOSTUNREACH;
784 hci_dev_lock_bh(hdev);
786 err = -ENOMEM;
788 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH ||
789 l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT ||
790 l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
791 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
792 auth_type = HCI_AT_NO_BONDING_MITM;
793 else
794 auth_type = HCI_AT_GENERAL_BONDING_MITM;
795 } else {
796 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
797 auth_type = HCI_AT_NO_BONDING;
798 else
799 auth_type = HCI_AT_GENERAL_BONDING;
802 hcon = hci_connect(hdev, ACL_LINK, dst, auth_type);
803 if (!hcon)
804 goto done;
806 conn = l2cap_conn_add(hcon, 0);
807 if (!conn) {
808 hci_conn_put(hcon);
809 goto done;
812 err = 0;
814 /* Update source addr of the socket */
815 bacpy(src, conn->src);
817 l2cap_chan_add(conn, sk, NULL);
819 sk->sk_state = BT_CONNECT;
820 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
822 if (hcon->state == BT_CONNECTED) {
823 if (sk->sk_type != SOCK_SEQPACKET) {
824 l2cap_sock_clear_timer(sk);
825 sk->sk_state = BT_CONNECTED;
826 } else
827 l2cap_do_start(sk);
830 done:
831 hci_dev_unlock_bh(hdev);
832 hci_dev_put(hdev);
833 return err;
836 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
838 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
839 struct sock *sk = sock->sk;
840 int err = 0;
842 lock_sock(sk);
844 BT_DBG("sk %p", sk);
846 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
847 err = -EINVAL;
848 goto done;
851 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
852 err = -EINVAL;
853 goto done;
856 switch(sk->sk_state) {
857 case BT_CONNECT:
858 case BT_CONNECT2:
859 case BT_CONFIG:
860 /* Already connecting */
861 goto wait;
863 case BT_CONNECTED:
864 /* Already connected */
865 goto done;
867 case BT_OPEN:
868 case BT_BOUND:
869 /* Can connect */
870 break;
872 default:
873 err = -EBADFD;
874 goto done;
877 /* Set destination address and psm */
878 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
879 l2cap_pi(sk)->psm = la->l2_psm;
881 if ((err = l2cap_do_connect(sk)))
882 goto done;
884 wait:
885 err = bt_sock_wait_state(sk, BT_CONNECTED,
886 sock_sndtimeo(sk, flags & O_NONBLOCK));
887 done:
888 release_sock(sk);
889 return err;
892 static int l2cap_sock_listen(struct socket *sock, int backlog)
894 struct sock *sk = sock->sk;
895 int err = 0;
897 BT_DBG("sk %p backlog %d", sk, backlog);
899 lock_sock(sk);
901 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
902 err = -EBADFD;
903 goto done;
906 if (!l2cap_pi(sk)->psm) {
907 bdaddr_t *src = &bt_sk(sk)->src;
908 u16 psm;
910 err = -EINVAL;
912 write_lock_bh(&l2cap_sk_list.lock);
914 for (psm = 0x1001; psm < 0x1100; psm += 2)
915 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
916 l2cap_pi(sk)->psm = htobs(psm);
917 l2cap_pi(sk)->sport = htobs(psm);
918 err = 0;
919 break;
922 write_unlock_bh(&l2cap_sk_list.lock);
924 if (err < 0)
925 goto done;
928 sk->sk_max_ack_backlog = backlog;
929 sk->sk_ack_backlog = 0;
930 sk->sk_state = BT_LISTEN;
932 done:
933 release_sock(sk);
934 return err;
937 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
939 DECLARE_WAITQUEUE(wait, current);
940 struct sock *sk = sock->sk, *nsk;
941 long timeo;
942 int err = 0;
944 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
946 if (sk->sk_state != BT_LISTEN) {
947 err = -EBADFD;
948 goto done;
951 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
953 BT_DBG("sk %p timeo %ld", sk, timeo);
955 /* Wait for an incoming connection. (wake-one). */
956 add_wait_queue_exclusive(sk->sk_sleep, &wait);
957 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
958 set_current_state(TASK_INTERRUPTIBLE);
959 if (!timeo) {
960 err = -EAGAIN;
961 break;
964 release_sock(sk);
965 timeo = schedule_timeout(timeo);
966 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
968 if (sk->sk_state != BT_LISTEN) {
969 err = -EBADFD;
970 break;
973 if (signal_pending(current)) {
974 err = sock_intr_errno(timeo);
975 break;
978 set_current_state(TASK_RUNNING);
979 remove_wait_queue(sk->sk_sleep, &wait);
981 if (err)
982 goto done;
984 newsock->state = SS_CONNECTED;
986 BT_DBG("new socket %p", nsk);
988 done:
989 release_sock(sk);
990 return err;
993 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
995 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
996 struct sock *sk = sock->sk;
998 BT_DBG("sock %p, sk %p", sock, sk);
1000 addr->sa_family = AF_BLUETOOTH;
1001 *len = sizeof(struct sockaddr_l2);
1003 if (peer)
1004 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1005 else
1006 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1008 la->l2_psm = l2cap_pi(sk)->psm;
1009 return 0;
1012 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1014 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1015 struct sk_buff *skb, **frag;
1016 int err, hlen, count, sent=0;
1017 struct l2cap_hdr *lh;
1019 BT_DBG("sk %p len %d", sk, len);
1021 /* First fragment (with L2CAP header) */
1022 if (sk->sk_type == SOCK_DGRAM)
1023 hlen = L2CAP_HDR_SIZE + 2;
1024 else
1025 hlen = L2CAP_HDR_SIZE;
1027 count = min_t(unsigned int, (conn->mtu - hlen), len);
1029 skb = bt_skb_send_alloc(sk, hlen + count,
1030 msg->msg_flags & MSG_DONTWAIT, &err);
1031 if (!skb)
1032 return err;
1034 /* Create L2CAP header */
1035 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1036 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1037 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1039 if (sk->sk_type == SOCK_DGRAM)
1040 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1042 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1043 err = -EFAULT;
1044 goto fail;
1047 sent += count;
1048 len -= count;
1050 /* Continuation fragments (no L2CAP header) */
1051 frag = &skb_shinfo(skb)->frag_list;
1052 while (len) {
1053 count = min_t(unsigned int, conn->mtu, len);
1055 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1056 if (!*frag)
1057 goto fail;
1059 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1060 err = -EFAULT;
1061 goto fail;
1064 sent += count;
1065 len -= count;
1067 frag = &(*frag)->next;
1070 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1071 goto fail;
1073 return sent;
1075 fail:
1076 kfree_skb(skb);
1077 return err;
1080 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1082 struct sock *sk = sock->sk;
1083 int err = 0;
1085 BT_DBG("sock %p, sk %p", sock, sk);
1087 err = sock_error(sk);
1088 if (err)
1089 return err;
1091 if (msg->msg_flags & MSG_OOB)
1092 return -EOPNOTSUPP;
1094 /* Check outgoing MTU */
1095 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1096 return -EINVAL;
1098 lock_sock(sk);
1100 if (sk->sk_state == BT_CONNECTED)
1101 err = l2cap_do_send(sk, msg, len);
1102 else
1103 err = -ENOTCONN;
1105 release_sock(sk);
1106 return err;
1109 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1111 struct sock *sk = sock->sk;
1112 struct l2cap_options opts;
1113 int err = 0, len;
1114 u32 opt;
1116 BT_DBG("sk %p", sk);
1118 lock_sock(sk);
1120 switch (optname) {
1121 case L2CAP_OPTIONS:
1122 opts.imtu = l2cap_pi(sk)->imtu;
1123 opts.omtu = l2cap_pi(sk)->omtu;
1124 opts.flush_to = l2cap_pi(sk)->flush_to;
1125 opts.mode = L2CAP_MODE_BASIC;
1127 len = min_t(unsigned int, sizeof(opts), optlen);
1128 if (copy_from_user((char *) &opts, optval, len)) {
1129 err = -EFAULT;
1130 break;
1133 l2cap_pi(sk)->imtu = opts.imtu;
1134 l2cap_pi(sk)->omtu = opts.omtu;
1135 break;
1137 case L2CAP_LM:
1138 if (get_user(opt, (u32 __user *) optval)) {
1139 err = -EFAULT;
1140 break;
1143 l2cap_pi(sk)->link_mode = opt;
1144 break;
1146 default:
1147 err = -ENOPROTOOPT;
1148 break;
1151 release_sock(sk);
1152 return err;
1155 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1157 struct sock *sk = sock->sk;
1158 struct l2cap_options opts;
1159 struct l2cap_conninfo cinfo;
1160 int len, err = 0;
1162 BT_DBG("sk %p", sk);
1164 if (get_user(len, optlen))
1165 return -EFAULT;
1167 lock_sock(sk);
1169 switch (optname) {
1170 case L2CAP_OPTIONS:
1171 opts.imtu = l2cap_pi(sk)->imtu;
1172 opts.omtu = l2cap_pi(sk)->omtu;
1173 opts.flush_to = l2cap_pi(sk)->flush_to;
1174 opts.mode = L2CAP_MODE_BASIC;
1176 len = min_t(unsigned int, len, sizeof(opts));
1177 if (copy_to_user(optval, (char *) &opts, len))
1178 err = -EFAULT;
1180 break;
1182 case L2CAP_LM:
1183 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1184 err = -EFAULT;
1185 break;
1187 case L2CAP_CONNINFO:
1188 if (sk->sk_state != BT_CONNECTED) {
1189 err = -ENOTCONN;
1190 break;
1193 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1194 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1196 len = min_t(unsigned int, len, sizeof(cinfo));
1197 if (copy_to_user(optval, (char *) &cinfo, len))
1198 err = -EFAULT;
1200 break;
1202 default:
1203 err = -ENOPROTOOPT;
1204 break;
1207 release_sock(sk);
1208 return err;
1211 static int l2cap_sock_shutdown(struct socket *sock, int how)
1213 struct sock *sk = sock->sk;
1214 int err = 0;
1216 BT_DBG("sock %p, sk %p", sock, sk);
1218 if (!sk)
1219 return 0;
1221 lock_sock(sk);
1222 if (!sk->sk_shutdown) {
1223 sk->sk_shutdown = SHUTDOWN_MASK;
1224 l2cap_sock_clear_timer(sk);
1225 __l2cap_sock_close(sk, 0);
1227 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1228 err = bt_sock_wait_state(sk, BT_CLOSED,
1229 sk->sk_lingertime);
1231 release_sock(sk);
1232 return err;
1235 static int l2cap_sock_release(struct socket *sock)
1237 struct sock *sk = sock->sk;
1238 int err;
1240 BT_DBG("sock %p, sk %p", sock, sk);
1242 if (!sk)
1243 return 0;
1245 err = l2cap_sock_shutdown(sock, 2);
1247 sock_orphan(sk);
1248 l2cap_sock_kill(sk);
1249 return err;
1252 static void l2cap_chan_ready(struct sock *sk)
1254 struct sock *parent = bt_sk(sk)->parent;
1256 BT_DBG("sk %p, parent %p", sk, parent);
1258 l2cap_pi(sk)->conf_state = 0;
1259 l2cap_sock_clear_timer(sk);
1261 if (!parent) {
1262 /* Outgoing channel.
1263 * Wake up socket sleeping on connect.
1265 sk->sk_state = BT_CONNECTED;
1266 sk->sk_state_change(sk);
1267 } else {
1268 /* Incoming channel.
1269 * Wake up socket sleeping on accept.
1271 parent->sk_data_ready(parent, 0);
1274 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
1275 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1276 hci_conn_change_link_key(conn->hcon);
1280 /* Copy frame to all raw sockets on that connection */
1281 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1283 struct l2cap_chan_list *l = &conn->chan_list;
1284 struct sk_buff *nskb;
1285 struct sock * sk;
1287 BT_DBG("conn %p", conn);
1289 read_lock(&l->lock);
1290 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1291 if (sk->sk_type != SOCK_RAW)
1292 continue;
1294 /* Don't send frame to the socket it came from */
1295 if (skb->sk == sk)
1296 continue;
1298 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1299 continue;
1301 if (sock_queue_rcv_skb(sk, nskb))
1302 kfree_skb(nskb);
1304 read_unlock(&l->lock);
1307 /* ---- L2CAP signalling commands ---- */
1308 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1309 u8 code, u8 ident, u16 dlen, void *data)
1311 struct sk_buff *skb, **frag;
1312 struct l2cap_cmd_hdr *cmd;
1313 struct l2cap_hdr *lh;
1314 int len, count;
1316 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1318 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1319 count = min_t(unsigned int, conn->mtu, len);
1321 skb = bt_skb_alloc(count, GFP_ATOMIC);
1322 if (!skb)
1323 return NULL;
1325 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1326 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1327 lh->cid = cpu_to_le16(0x0001);
1329 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1330 cmd->code = code;
1331 cmd->ident = ident;
1332 cmd->len = cpu_to_le16(dlen);
1334 if (dlen) {
1335 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1336 memcpy(skb_put(skb, count), data, count);
1337 data += count;
1340 len -= skb->len;
1342 /* Continuation fragments (no L2CAP header) */
1343 frag = &skb_shinfo(skb)->frag_list;
1344 while (len) {
1345 count = min_t(unsigned int, conn->mtu, len);
1347 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1348 if (!*frag)
1349 goto fail;
1351 memcpy(skb_put(*frag, count), data, count);
1353 len -= count;
1354 data += count;
1356 frag = &(*frag)->next;
1359 return skb;
1361 fail:
1362 kfree_skb(skb);
1363 return NULL;
1366 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1368 struct l2cap_conf_opt *opt = *ptr;
1369 int len;
1371 len = L2CAP_CONF_OPT_SIZE + opt->len;
1372 *ptr += len;
1374 *type = opt->type;
1375 *olen = opt->len;
1377 switch (opt->len) {
1378 case 1:
1379 *val = *((u8 *) opt->val);
1380 break;
1382 case 2:
1383 *val = __le16_to_cpu(*((__le16 *) opt->val));
1384 break;
1386 case 4:
1387 *val = __le32_to_cpu(*((__le32 *) opt->val));
1388 break;
1390 default:
1391 *val = (unsigned long) opt->val;
1392 break;
1395 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1396 return len;
1399 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1401 struct l2cap_conf_opt *opt = *ptr;
1403 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1405 opt->type = type;
1406 opt->len = len;
1408 switch (len) {
1409 case 1:
1410 *((u8 *) opt->val) = val;
1411 break;
1413 case 2:
1414 *((__le16 *) opt->val) = cpu_to_le16(val);
1415 break;
1417 case 4:
1418 *((__le32 *) opt->val) = cpu_to_le32(val);
1419 break;
1421 default:
1422 memcpy(opt->val, (void *) val, len);
1423 break;
1426 *ptr += L2CAP_CONF_OPT_SIZE + len;
1429 static int l2cap_build_conf_req(struct sock *sk, void *data)
1431 struct l2cap_pinfo *pi = l2cap_pi(sk);
1432 struct l2cap_conf_req *req = data;
1433 void *ptr = req->data;
1435 BT_DBG("sk %p", sk);
1437 if (pi->imtu != L2CAP_DEFAULT_MTU)
1438 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1440 /* FIXME: Need actual value of the flush timeout */
1441 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1442 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1444 req->dcid = cpu_to_le16(pi->dcid);
1445 req->flags = cpu_to_le16(0);
1447 return ptr - data;
1450 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1452 struct l2cap_pinfo *pi = l2cap_pi(sk);
1453 struct l2cap_conf_rsp *rsp = data;
1454 void *ptr = rsp->data;
1455 void *req = pi->conf_req;
1456 int len = pi->conf_len;
1457 int type, hint, olen;
1458 unsigned long val;
1459 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1460 u16 mtu = L2CAP_DEFAULT_MTU;
1461 u16 result = L2CAP_CONF_SUCCESS;
1463 BT_DBG("sk %p", sk);
1465 while (len >= L2CAP_CONF_OPT_SIZE) {
1466 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1468 hint = type & 0x80;
1469 type &= 0x7f;
1471 switch (type) {
1472 case L2CAP_CONF_MTU:
1473 mtu = val;
1474 break;
1476 case L2CAP_CONF_FLUSH_TO:
1477 pi->flush_to = val;
1478 break;
1480 case L2CAP_CONF_QOS:
1481 break;
1483 case L2CAP_CONF_RFC:
1484 if (olen == sizeof(rfc))
1485 memcpy(&rfc, (void *) val, olen);
1486 break;
1488 default:
1489 if (hint)
1490 break;
1492 result = L2CAP_CONF_UNKNOWN;
1493 *((u8 *) ptr++) = type;
1494 break;
1498 if (result == L2CAP_CONF_SUCCESS) {
1499 /* Configure output options and let the other side know
1500 * which ones we don't like. */
1502 if (rfc.mode == L2CAP_MODE_BASIC) {
1503 if (mtu < pi->omtu)
1504 result = L2CAP_CONF_UNACCEPT;
1505 else {
1506 pi->omtu = mtu;
1507 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1510 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1511 } else {
1512 result = L2CAP_CONF_UNACCEPT;
1514 memset(&rfc, 0, sizeof(rfc));
1515 rfc.mode = L2CAP_MODE_BASIC;
1517 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1518 sizeof(rfc), (unsigned long) &rfc);
1522 rsp->scid = cpu_to_le16(pi->dcid);
1523 rsp->result = cpu_to_le16(result);
1524 rsp->flags = cpu_to_le16(0x0000);
1526 return ptr - data;
1529 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1531 struct l2cap_conf_rsp *rsp = data;
1532 void *ptr = rsp->data;
1534 BT_DBG("sk %p", sk);
1536 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1537 rsp->result = cpu_to_le16(result);
1538 rsp->flags = cpu_to_le16(flags);
1540 return ptr - data;
1543 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1545 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1547 if (rej->reason != 0x0000)
1548 return 0;
1550 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1551 cmd->ident == conn->info_ident) {
1552 conn->info_ident = 0;
1553 del_timer(&conn->info_timer);
1554 l2cap_conn_start(conn);
1557 return 0;
1560 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1562 struct l2cap_chan_list *list = &conn->chan_list;
1563 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1564 struct l2cap_conn_rsp rsp;
1565 struct sock *sk, *parent;
1566 int result, status = L2CAP_CS_NO_INFO;
1568 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1569 __le16 psm = req->psm;
1571 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1573 /* Check if we have socket listening on psm */
1574 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1575 if (!parent) {
1576 result = L2CAP_CR_BAD_PSM;
1577 goto sendresp;
1580 /* Check if the ACL is secure enough (if not SDP) */
1581 if (psm != cpu_to_le16(0x0001) &&
1582 !hci_conn_check_link_mode(conn->hcon)) {
1583 result = L2CAP_CR_SEC_BLOCK;
1584 goto response;
1587 result = L2CAP_CR_NO_MEM;
1589 /* Check for backlog size */
1590 if (sk_acceptq_is_full(parent)) {
1591 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1592 goto response;
1595 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1596 if (!sk)
1597 goto response;
1599 write_lock_bh(&list->lock);
1601 /* Check if we already have channel with that dcid */
1602 if (__l2cap_get_chan_by_dcid(list, scid)) {
1603 write_unlock_bh(&list->lock);
1604 sock_set_flag(sk, SOCK_ZAPPED);
1605 l2cap_sock_kill(sk);
1606 goto response;
1609 hci_conn_hold(conn->hcon);
1611 l2cap_sock_init(sk, parent);
1612 bacpy(&bt_sk(sk)->src, conn->src);
1613 bacpy(&bt_sk(sk)->dst, conn->dst);
1614 l2cap_pi(sk)->psm = psm;
1615 l2cap_pi(sk)->dcid = scid;
1617 __l2cap_chan_add(conn, sk, parent);
1618 dcid = l2cap_pi(sk)->scid;
1620 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1622 l2cap_pi(sk)->ident = cmd->ident;
1624 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1625 if (l2cap_check_link_mode(sk)) {
1626 sk->sk_state = BT_CONFIG;
1627 result = L2CAP_CR_SUCCESS;
1628 status = L2CAP_CS_NO_INFO;
1629 } else {
1630 sk->sk_state = BT_CONNECT2;
1631 result = L2CAP_CR_PEND;
1632 status = L2CAP_CS_AUTHEN_PEND;
1634 } else {
1635 sk->sk_state = BT_CONNECT2;
1636 result = L2CAP_CR_PEND;
1637 status = L2CAP_CS_NO_INFO;
1640 write_unlock_bh(&list->lock);
1642 response:
1643 bh_unlock_sock(parent);
1645 sendresp:
1646 rsp.scid = cpu_to_le16(scid);
1647 rsp.dcid = cpu_to_le16(dcid);
1648 rsp.result = cpu_to_le16(result);
1649 rsp.status = cpu_to_le16(status);
1650 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1652 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1653 struct l2cap_info_req info;
1654 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1657 conn->info_ident = l2cap_get_ident(conn);
1659 mod_timer(&conn->info_timer, jiffies +
1660 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1662 l2cap_send_cmd(conn, conn->info_ident,
1663 L2CAP_INFO_REQ, sizeof(info), &info);
1666 return 0;
1669 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1671 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1672 u16 scid, dcid, result, status;
1673 struct sock *sk;
1674 u8 req[128];
1676 scid = __le16_to_cpu(rsp->scid);
1677 dcid = __le16_to_cpu(rsp->dcid);
1678 result = __le16_to_cpu(rsp->result);
1679 status = __le16_to_cpu(rsp->status);
1681 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1683 if (scid) {
1684 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1685 return 0;
1686 } else {
1687 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1688 return 0;
1691 switch (result) {
1692 case L2CAP_CR_SUCCESS:
1693 sk->sk_state = BT_CONFIG;
1694 l2cap_pi(sk)->ident = 0;
1695 l2cap_pi(sk)->dcid = dcid;
1696 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1698 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1699 l2cap_build_conf_req(sk, req), req);
1700 break;
1702 case L2CAP_CR_PEND:
1703 break;
1705 default:
1706 l2cap_chan_del(sk, ECONNREFUSED);
1707 break;
1710 bh_unlock_sock(sk);
1711 return 0;
1714 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1716 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1717 u16 dcid, flags;
1718 u8 rsp[64];
1719 struct sock *sk;
1720 int len;
1722 dcid = __le16_to_cpu(req->dcid);
1723 flags = __le16_to_cpu(req->flags);
1725 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1727 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1728 return -ENOENT;
1730 if (sk->sk_state == BT_DISCONN)
1731 goto unlock;
1733 /* Reject if config buffer is too small. */
1734 len = cmd_len - sizeof(*req);
1735 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1736 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1737 l2cap_build_conf_rsp(sk, rsp,
1738 L2CAP_CONF_REJECT, flags), rsp);
1739 goto unlock;
1742 /* Store config. */
1743 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1744 l2cap_pi(sk)->conf_len += len;
1746 if (flags & 0x0001) {
1747 /* Incomplete config. Send empty response. */
1748 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1749 l2cap_build_conf_rsp(sk, rsp,
1750 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1751 goto unlock;
1754 /* Complete config. */
1755 len = l2cap_parse_conf_req(sk, rsp);
1756 if (len < 0)
1757 goto unlock;
1759 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1761 /* Reset config buffer. */
1762 l2cap_pi(sk)->conf_len = 0;
1764 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1765 goto unlock;
1767 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1768 sk->sk_state = BT_CONNECTED;
1769 l2cap_chan_ready(sk);
1770 goto unlock;
1773 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1774 u8 buf[64];
1775 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1776 l2cap_build_conf_req(sk, buf), buf);
1779 unlock:
1780 bh_unlock_sock(sk);
1781 return 0;
1784 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1786 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1787 u16 scid, flags, result;
1788 struct sock *sk;
1790 scid = __le16_to_cpu(rsp->scid);
1791 flags = __le16_to_cpu(rsp->flags);
1792 result = __le16_to_cpu(rsp->result);
1794 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1796 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1797 return 0;
1799 switch (result) {
1800 case L2CAP_CONF_SUCCESS:
1801 break;
1803 case L2CAP_CONF_UNACCEPT:
1804 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1805 char req[128];
1806 /* It does not make sense to adjust L2CAP parameters
1807 * that are currently defined in the spec. We simply
1808 * resend config request that we sent earlier. It is
1809 * stupid, but it helps qualification testing which
1810 * expects at least some response from us. */
1811 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1812 l2cap_build_conf_req(sk, req), req);
1813 goto done;
1816 default:
1817 sk->sk_state = BT_DISCONN;
1818 sk->sk_err = ECONNRESET;
1819 l2cap_sock_set_timer(sk, HZ * 5);
1821 struct l2cap_disconn_req req;
1822 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1823 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1824 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1825 L2CAP_DISCONN_REQ, sizeof(req), &req);
1827 goto done;
1830 if (flags & 0x01)
1831 goto done;
1833 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1835 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1836 sk->sk_state = BT_CONNECTED;
1837 l2cap_chan_ready(sk);
1840 done:
1841 bh_unlock_sock(sk);
1842 return 0;
1845 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1847 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1848 struct l2cap_disconn_rsp rsp;
1849 u16 dcid, scid;
1850 struct sock *sk;
1852 scid = __le16_to_cpu(req->scid);
1853 dcid = __le16_to_cpu(req->dcid);
1855 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1857 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1858 return 0;
1860 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1861 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1862 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1864 sk->sk_shutdown = SHUTDOWN_MASK;
1866 l2cap_chan_del(sk, ECONNRESET);
1867 bh_unlock_sock(sk);
1869 l2cap_sock_kill(sk);
1870 return 0;
1873 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1875 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1876 u16 dcid, scid;
1877 struct sock *sk;
1879 scid = __le16_to_cpu(rsp->scid);
1880 dcid = __le16_to_cpu(rsp->dcid);
1882 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1884 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1885 return 0;
1887 l2cap_chan_del(sk, 0);
1888 bh_unlock_sock(sk);
1890 l2cap_sock_kill(sk);
1891 return 0;
1894 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1896 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1897 u16 type;
1899 type = __le16_to_cpu(req->type);
1901 BT_DBG("type 0x%4.4x", type);
1903 if (type == L2CAP_IT_FEAT_MASK) {
1904 u8 buf[8];
1905 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1906 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1907 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1908 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1909 l2cap_send_cmd(conn, cmd->ident,
1910 L2CAP_INFO_RSP, sizeof(buf), buf);
1911 } else {
1912 struct l2cap_info_rsp rsp;
1913 rsp.type = cpu_to_le16(type);
1914 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1915 l2cap_send_cmd(conn, cmd->ident,
1916 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1919 return 0;
1922 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1924 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1925 u16 type, result;
1927 type = __le16_to_cpu(rsp->type);
1928 result = __le16_to_cpu(rsp->result);
1930 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1932 conn->info_ident = 0;
1934 del_timer(&conn->info_timer);
1936 if (type == L2CAP_IT_FEAT_MASK)
1937 conn->feat_mask = get_unaligned_le32(rsp->data);
1939 l2cap_conn_start(conn);
1941 return 0;
1944 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1946 u8 *data = skb->data;
1947 int len = skb->len;
1948 struct l2cap_cmd_hdr cmd;
1949 int err = 0;
1951 l2cap_raw_recv(conn, skb);
1953 while (len >= L2CAP_CMD_HDR_SIZE) {
1954 u16 cmd_len;
1955 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1956 data += L2CAP_CMD_HDR_SIZE;
1957 len -= L2CAP_CMD_HDR_SIZE;
1959 cmd_len = le16_to_cpu(cmd.len);
1961 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1963 if (cmd_len > len || !cmd.ident) {
1964 BT_DBG("corrupted command");
1965 break;
1968 switch (cmd.code) {
1969 case L2CAP_COMMAND_REJ:
1970 l2cap_command_rej(conn, &cmd, data);
1971 break;
1973 case L2CAP_CONN_REQ:
1974 err = l2cap_connect_req(conn, &cmd, data);
1975 break;
1977 case L2CAP_CONN_RSP:
1978 err = l2cap_connect_rsp(conn, &cmd, data);
1979 break;
1981 case L2CAP_CONF_REQ:
1982 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1983 break;
1985 case L2CAP_CONF_RSP:
1986 err = l2cap_config_rsp(conn, &cmd, data);
1987 break;
1989 case L2CAP_DISCONN_REQ:
1990 err = l2cap_disconnect_req(conn, &cmd, data);
1991 break;
1993 case L2CAP_DISCONN_RSP:
1994 err = l2cap_disconnect_rsp(conn, &cmd, data);
1995 break;
1997 case L2CAP_ECHO_REQ:
1998 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1999 break;
2001 case L2CAP_ECHO_RSP:
2002 break;
2004 case L2CAP_INFO_REQ:
2005 err = l2cap_information_req(conn, &cmd, data);
2006 break;
2008 case L2CAP_INFO_RSP:
2009 err = l2cap_information_rsp(conn, &cmd, data);
2010 break;
2012 default:
2013 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2014 err = -EINVAL;
2015 break;
2018 if (err) {
2019 struct l2cap_cmd_rej rej;
2020 BT_DBG("error %d", err);
2022 /* FIXME: Map err to a valid reason */
2023 rej.reason = cpu_to_le16(0);
2024 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2027 data += cmd_len;
2028 len -= cmd_len;
2031 kfree_skb(skb);
2034 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2036 struct sock *sk;
2038 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2039 if (!sk) {
2040 BT_DBG("unknown cid 0x%4.4x", cid);
2041 goto drop;
2044 BT_DBG("sk %p, len %d", sk, skb->len);
2046 if (sk->sk_state != BT_CONNECTED)
2047 goto drop;
2049 if (l2cap_pi(sk)->imtu < skb->len)
2050 goto drop;
2052 /* If socket recv buffers overflows we drop data here
2053 * which is *bad* because L2CAP has to be reliable.
2054 * But we don't have any other choice. L2CAP doesn't
2055 * provide flow control mechanism. */
2057 if (!sock_queue_rcv_skb(sk, skb))
2058 goto done;
2060 drop:
2061 kfree_skb(skb);
2063 done:
2064 if (sk)
2065 bh_unlock_sock(sk);
2067 return 0;
2070 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2072 struct sock *sk;
2074 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2075 if (!sk)
2076 goto drop;
2078 BT_DBG("sk %p, len %d", sk, skb->len);
2080 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2081 goto drop;
2083 if (l2cap_pi(sk)->imtu < skb->len)
2084 goto drop;
2086 if (!sock_queue_rcv_skb(sk, skb))
2087 goto done;
2089 drop:
2090 kfree_skb(skb);
2092 done:
2093 if (sk) bh_unlock_sock(sk);
2094 return 0;
2097 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2099 struct l2cap_hdr *lh = (void *) skb->data;
2100 u16 cid, len;
2101 __le16 psm;
2103 skb_pull(skb, L2CAP_HDR_SIZE);
2104 cid = __le16_to_cpu(lh->cid);
2105 len = __le16_to_cpu(lh->len);
2107 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2109 switch (cid) {
2110 case 0x0001:
2111 l2cap_sig_channel(conn, skb);
2112 break;
2114 case 0x0002:
2115 psm = get_unaligned((__le16 *) skb->data);
2116 skb_pull(skb, 2);
2117 l2cap_conless_channel(conn, psm, skb);
2118 break;
2120 default:
2121 l2cap_data_channel(conn, cid, skb);
2122 break;
2126 /* ---- L2CAP interface with lower layer (HCI) ---- */
2128 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2130 int exact = 0, lm1 = 0, lm2 = 0;
2131 register struct sock *sk;
2132 struct hlist_node *node;
2134 if (type != ACL_LINK)
2135 return 0;
2137 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2139 /* Find listening sockets and check their link_mode */
2140 read_lock(&l2cap_sk_list.lock);
2141 sk_for_each(sk, node, &l2cap_sk_list.head) {
2142 if (sk->sk_state != BT_LISTEN)
2143 continue;
2145 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2146 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2147 exact++;
2148 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2149 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2151 read_unlock(&l2cap_sk_list.lock);
2153 return exact ? lm1 : lm2;
2156 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2158 struct l2cap_conn *conn;
2160 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2162 if (hcon->type != ACL_LINK)
2163 return 0;
2165 if (!status) {
2166 conn = l2cap_conn_add(hcon, status);
2167 if (conn)
2168 l2cap_conn_ready(conn);
2169 } else
2170 l2cap_conn_del(hcon, bt_err(status));
2172 return 0;
2175 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2177 BT_DBG("hcon %p reason %d", hcon, reason);
2179 if (hcon->type != ACL_LINK)
2180 return 0;
2182 l2cap_conn_del(hcon, bt_err(reason));
2184 return 0;
2187 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2189 struct l2cap_chan_list *l;
2190 struct l2cap_conn *conn = hcon->l2cap_data;
2191 struct sock *sk;
2193 if (!conn)
2194 return 0;
2196 l = &conn->chan_list;
2198 BT_DBG("conn %p", conn);
2200 read_lock(&l->lock);
2202 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2203 struct l2cap_pinfo *pi = l2cap_pi(sk);
2205 bh_lock_sock(sk);
2207 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2208 !(hcon->link_mode & HCI_LM_ENCRYPT) &&
2209 !status) {
2210 bh_unlock_sock(sk);
2211 continue;
2214 if (sk->sk_state == BT_CONNECT) {
2215 if (!status) {
2216 struct l2cap_conn_req req;
2217 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2218 req.psm = l2cap_pi(sk)->psm;
2220 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2222 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2223 L2CAP_CONN_REQ, sizeof(req), &req);
2224 } else {
2225 l2cap_sock_clear_timer(sk);
2226 l2cap_sock_set_timer(sk, HZ / 10);
2228 } else if (sk->sk_state == BT_CONNECT2) {
2229 struct l2cap_conn_rsp rsp;
2230 __u16 result;
2232 if (!status) {
2233 sk->sk_state = BT_CONFIG;
2234 result = L2CAP_CR_SUCCESS;
2235 } else {
2236 sk->sk_state = BT_DISCONN;
2237 l2cap_sock_set_timer(sk, HZ / 10);
2238 result = L2CAP_CR_SEC_BLOCK;
2241 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2242 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2243 rsp.result = cpu_to_le16(result);
2244 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2245 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2246 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2249 bh_unlock_sock(sk);
2252 read_unlock(&l->lock);
2254 return 0;
2257 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2259 struct l2cap_chan_list *l;
2260 struct l2cap_conn *conn = hcon->l2cap_data;
2261 struct sock *sk;
2263 if (!conn)
2264 return 0;
2266 l = &conn->chan_list;
2268 BT_DBG("conn %p", conn);
2270 read_lock(&l->lock);
2272 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2273 struct l2cap_pinfo *pi = l2cap_pi(sk);
2275 bh_lock_sock(sk);
2277 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2278 (sk->sk_state == BT_CONNECTED ||
2279 sk->sk_state == BT_CONFIG) &&
2280 !status && encrypt == 0x00) {
2281 __l2cap_sock_close(sk, ECONNREFUSED);
2282 bh_unlock_sock(sk);
2283 continue;
2286 if (sk->sk_state == BT_CONNECT) {
2287 if (!status) {
2288 struct l2cap_conn_req req;
2289 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2290 req.psm = l2cap_pi(sk)->psm;
2292 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2294 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2295 L2CAP_CONN_REQ, sizeof(req), &req);
2296 } else {
2297 l2cap_sock_clear_timer(sk);
2298 l2cap_sock_set_timer(sk, HZ / 10);
2300 } else if (sk->sk_state == BT_CONNECT2) {
2301 struct l2cap_conn_rsp rsp;
2302 __u16 result;
2304 if (!status) {
2305 sk->sk_state = BT_CONFIG;
2306 result = L2CAP_CR_SUCCESS;
2307 } else {
2308 sk->sk_state = BT_DISCONN;
2309 l2cap_sock_set_timer(sk, HZ / 10);
2310 result = L2CAP_CR_SEC_BLOCK;
2313 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2314 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2315 rsp.result = cpu_to_le16(result);
2316 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2317 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2318 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2321 bh_unlock_sock(sk);
2324 read_unlock(&l->lock);
2326 return 0;
2329 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2331 struct l2cap_conn *conn = hcon->l2cap_data;
2333 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2334 goto drop;
2336 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2338 if (flags & ACL_START) {
2339 struct l2cap_hdr *hdr;
2340 int len;
2342 if (conn->rx_len) {
2343 BT_ERR("Unexpected start frame (len %d)", skb->len);
2344 kfree_skb(conn->rx_skb);
2345 conn->rx_skb = NULL;
2346 conn->rx_len = 0;
2347 l2cap_conn_unreliable(conn, ECOMM);
2350 if (skb->len < 2) {
2351 BT_ERR("Frame is too short (len %d)", skb->len);
2352 l2cap_conn_unreliable(conn, ECOMM);
2353 goto drop;
2356 hdr = (struct l2cap_hdr *) skb->data;
2357 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2359 if (len == skb->len) {
2360 /* Complete frame received */
2361 l2cap_recv_frame(conn, skb);
2362 return 0;
2365 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2367 if (skb->len > len) {
2368 BT_ERR("Frame is too long (len %d, expected len %d)",
2369 skb->len, len);
2370 l2cap_conn_unreliable(conn, ECOMM);
2371 goto drop;
2374 /* Allocate skb for the complete frame (with header) */
2375 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2376 goto drop;
2378 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2379 skb->len);
2380 conn->rx_len = len - skb->len;
2381 } else {
2382 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2384 if (!conn->rx_len) {
2385 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2386 l2cap_conn_unreliable(conn, ECOMM);
2387 goto drop;
2390 if (skb->len > conn->rx_len) {
2391 BT_ERR("Fragment is too long (len %d, expected %d)",
2392 skb->len, conn->rx_len);
2393 kfree_skb(conn->rx_skb);
2394 conn->rx_skb = NULL;
2395 conn->rx_len = 0;
2396 l2cap_conn_unreliable(conn, ECOMM);
2397 goto drop;
2400 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2401 skb->len);
2402 conn->rx_len -= skb->len;
2404 if (!conn->rx_len) {
2405 /* Complete frame received */
2406 l2cap_recv_frame(conn, conn->rx_skb);
2407 conn->rx_skb = NULL;
2411 drop:
2412 kfree_skb(skb);
2413 return 0;
2416 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2418 struct sock *sk;
2419 struct hlist_node *node;
2420 char *str = buf;
2422 read_lock_bh(&l2cap_sk_list.lock);
2424 sk_for_each(sk, node, &l2cap_sk_list.head) {
2425 struct l2cap_pinfo *pi = l2cap_pi(sk);
2427 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2428 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2429 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2430 pi->imtu, pi->omtu, pi->link_mode);
2433 read_unlock_bh(&l2cap_sk_list.lock);
2435 return (str - buf);
2438 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2440 static const struct proto_ops l2cap_sock_ops = {
2441 .family = PF_BLUETOOTH,
2442 .owner = THIS_MODULE,
2443 .release = l2cap_sock_release,
2444 .bind = l2cap_sock_bind,
2445 .connect = l2cap_sock_connect,
2446 .listen = l2cap_sock_listen,
2447 .accept = l2cap_sock_accept,
2448 .getname = l2cap_sock_getname,
2449 .sendmsg = l2cap_sock_sendmsg,
2450 .recvmsg = bt_sock_recvmsg,
2451 .poll = bt_sock_poll,
2452 .ioctl = bt_sock_ioctl,
2453 .mmap = sock_no_mmap,
2454 .socketpair = sock_no_socketpair,
2455 .shutdown = l2cap_sock_shutdown,
2456 .setsockopt = l2cap_sock_setsockopt,
2457 .getsockopt = l2cap_sock_getsockopt
2460 static struct net_proto_family l2cap_sock_family_ops = {
2461 .family = PF_BLUETOOTH,
2462 .owner = THIS_MODULE,
2463 .create = l2cap_sock_create,
2466 static struct hci_proto l2cap_hci_proto = {
2467 .name = "L2CAP",
2468 .id = HCI_PROTO_L2CAP,
2469 .connect_ind = l2cap_connect_ind,
2470 .connect_cfm = l2cap_connect_cfm,
2471 .disconn_ind = l2cap_disconn_ind,
2472 .auth_cfm = l2cap_auth_cfm,
2473 .encrypt_cfm = l2cap_encrypt_cfm,
2474 .recv_acldata = l2cap_recv_acldata
2477 static int __init l2cap_init(void)
2479 int err;
2481 err = proto_register(&l2cap_proto, 0);
2482 if (err < 0)
2483 return err;
2485 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2486 if (err < 0) {
2487 BT_ERR("L2CAP socket registration failed");
2488 goto error;
2491 err = hci_register_proto(&l2cap_hci_proto);
2492 if (err < 0) {
2493 BT_ERR("L2CAP protocol registration failed");
2494 bt_sock_unregister(BTPROTO_L2CAP);
2495 goto error;
2498 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2499 BT_ERR("Failed to create L2CAP info file");
2501 BT_INFO("L2CAP ver %s", VERSION);
2502 BT_INFO("L2CAP socket layer initialized");
2504 return 0;
2506 error:
2507 proto_unregister(&l2cap_proto);
2508 return err;
2511 static void __exit l2cap_exit(void)
2513 class_remove_file(bt_class, &class_attr_l2cap);
2515 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2516 BT_ERR("L2CAP socket unregistration failed");
2518 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2519 BT_ERR("L2CAP protocol unregistration failed");
2521 proto_unregister(&l2cap_proto);
2524 void l2cap_load(void)
2526 /* Dummy function to trigger automatic L2CAP module loading by
2527 * other modules that use L2CAP sockets but don't use any other
2528 * symbols from it. */
2529 return;
2531 EXPORT_SYMBOL(l2cap_load);
2533 module_init(l2cap_init);
2534 module_exit(l2cap_exit);
2536 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2537 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2538 MODULE_VERSION(VERSION);
2539 MODULE_LICENSE("GPL");
2540 MODULE_ALIAS("bt-proto-0");