sound: ASoC: Blackfin: DMA Driver for AC97 sound chip
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blob9610a9c85b9896e69cd6c62712cb5f8376a33040
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
58 #define VERSION "2.11"
60 static u32 l2cap_feat_mask = 0x0000;
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
79 int reason;
81 BT_DBG("sock %p state %d", sk, sk->sk_state);
83 bh_lock_sock(sk);
85 if (sk->sk_state == BT_CONNECT &&
86 (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH |
87 L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)))
88 reason = ECONNREFUSED;
89 else
90 reason = ETIMEDOUT;
92 __l2cap_sock_close(sk, reason);
94 bh_unlock_sock(sk);
96 l2cap_sock_kill(sk);
97 sock_put(sk);
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
106 static void l2cap_sock_clear_timer(struct sock *sk)
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
118 break;
120 return s;
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
125 struct sock *s;
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
128 break;
130 return s;
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
137 struct sock *s;
138 read_lock(&l->lock);
139 s = __l2cap_get_chan_by_scid(l, cid);
140 if (s) bh_lock_sock(s);
141 read_unlock(&l->lock);
142 return s;
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
147 struct sock *s;
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
150 break;
152 return s;
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
157 struct sock *s;
158 read_lock(&l->lock);
159 s = __l2cap_get_chan_by_ident(l, ident);
160 if (s) bh_lock_sock(s);
161 read_unlock(&l->lock);
162 return s;
165 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
167 u16 cid = 0x0040;
169 for (; cid < 0xffff; cid++) {
170 if(!__l2cap_get_chan_by_scid(l, cid))
171 return cid;
174 return 0;
177 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
179 sock_hold(sk);
181 if (l->head)
182 l2cap_pi(l->head)->prev_c = sk;
184 l2cap_pi(sk)->next_c = l->head;
185 l2cap_pi(sk)->prev_c = NULL;
186 l->head = sk;
189 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
191 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
193 write_lock_bh(&l->lock);
194 if (sk == l->head)
195 l->head = next;
197 if (next)
198 l2cap_pi(next)->prev_c = prev;
199 if (prev)
200 l2cap_pi(prev)->next_c = next;
201 write_unlock_bh(&l->lock);
203 __sock_put(sk);
206 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
208 struct l2cap_chan_list *l = &conn->chan_list;
210 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
212 l2cap_pi(sk)->conn = conn;
214 if (sk->sk_type == SOCK_SEQPACKET) {
215 /* Alloc CID for connection-oriented socket */
216 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
217 } else if (sk->sk_type == SOCK_DGRAM) {
218 /* Connectionless socket */
219 l2cap_pi(sk)->scid = 0x0002;
220 l2cap_pi(sk)->dcid = 0x0002;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
222 } else {
223 /* Raw socket can send/recv signalling messages only */
224 l2cap_pi(sk)->scid = 0x0001;
225 l2cap_pi(sk)->dcid = 0x0001;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
229 __l2cap_chan_link(l, sk);
231 if (parent)
232 bt_accept_enqueue(parent, sk);
235 /* Delete channel.
236 * Must be called on the locked socket. */
237 static void l2cap_chan_del(struct sock *sk, int err)
239 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
240 struct sock *parent = bt_sk(sk)->parent;
242 l2cap_sock_clear_timer(sk);
244 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
246 if (conn) {
247 /* Unlink from channel list */
248 l2cap_chan_unlink(&conn->chan_list, sk);
249 l2cap_pi(sk)->conn = NULL;
250 hci_conn_put(conn->hcon);
253 sk->sk_state = BT_CLOSED;
254 sock_set_flag(sk, SOCK_ZAPPED);
256 if (err)
257 sk->sk_err = err;
259 if (parent) {
260 bt_accept_unlink(sk);
261 parent->sk_data_ready(parent, 0);
262 } else
263 sk->sk_state_change(sk);
266 /* Service level security */
267 static inline int l2cap_check_link_mode(struct sock *sk)
269 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
271 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
272 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE))
273 return hci_conn_encrypt(conn->hcon);
275 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
276 return hci_conn_auth(conn->hcon);
278 return 1;
281 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
283 u8 id;
285 /* Get next available identificator.
286 * 1 - 128 are used by kernel.
287 * 129 - 199 are reserved.
288 * 200 - 254 are used by utilities like l2ping, etc.
291 spin_lock_bh(&conn->lock);
293 if (++conn->tx_ident > 128)
294 conn->tx_ident = 1;
296 id = conn->tx_ident;
298 spin_unlock_bh(&conn->lock);
300 return id;
303 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
305 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
307 BT_DBG("code 0x%2.2x", code);
309 if (!skb)
310 return -ENOMEM;
312 return hci_send_acl(conn->hcon, skb, 0);
315 static void l2cap_do_start(struct sock *sk)
317 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
319 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
320 if (l2cap_check_link_mode(sk)) {
321 struct l2cap_conn_req req;
322 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
323 req.psm = l2cap_pi(sk)->psm;
325 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
327 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
328 L2CAP_CONN_REQ, sizeof(req), &req);
330 } else {
331 struct l2cap_info_req req;
332 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
334 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
335 conn->info_ident = l2cap_get_ident(conn);
337 mod_timer(&conn->info_timer, jiffies +
338 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
340 l2cap_send_cmd(conn, conn->info_ident,
341 L2CAP_INFO_REQ, sizeof(req), &req);
345 /* ---- L2CAP connections ---- */
346 static void l2cap_conn_start(struct l2cap_conn *conn)
348 struct l2cap_chan_list *l = &conn->chan_list;
349 struct sock *sk;
351 BT_DBG("conn %p", conn);
353 read_lock(&l->lock);
355 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
356 bh_lock_sock(sk);
358 if (sk->sk_type != SOCK_SEQPACKET) {
359 bh_unlock_sock(sk);
360 continue;
363 if (sk->sk_state == BT_CONNECT) {
364 if (l2cap_check_link_mode(sk)) {
365 struct l2cap_conn_req req;
366 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
367 req.psm = l2cap_pi(sk)->psm;
369 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
371 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
372 L2CAP_CONN_REQ, sizeof(req), &req);
374 } else if (sk->sk_state == BT_CONNECT2) {
375 struct l2cap_conn_rsp rsp;
376 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
377 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
379 if (l2cap_check_link_mode(sk)) {
380 sk->sk_state = BT_CONFIG;
381 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
382 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
383 } else {
384 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
385 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
388 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
389 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
392 bh_unlock_sock(sk);
395 read_unlock(&l->lock);
398 static void l2cap_conn_ready(struct l2cap_conn *conn)
400 struct l2cap_chan_list *l = &conn->chan_list;
401 struct sock *sk;
403 BT_DBG("conn %p", conn);
405 read_lock(&l->lock);
407 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
408 bh_lock_sock(sk);
410 if (sk->sk_type != SOCK_SEQPACKET) {
411 l2cap_sock_clear_timer(sk);
412 sk->sk_state = BT_CONNECTED;
413 sk->sk_state_change(sk);
414 } else if (sk->sk_state == BT_CONNECT)
415 l2cap_do_start(sk);
417 bh_unlock_sock(sk);
420 read_unlock(&l->lock);
423 /* Notify sockets that we cannot guaranty reliability anymore */
424 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
426 struct l2cap_chan_list *l = &conn->chan_list;
427 struct sock *sk;
429 BT_DBG("conn %p", conn);
431 read_lock(&l->lock);
433 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
434 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
435 sk->sk_err = err;
438 read_unlock(&l->lock);
441 static void l2cap_info_timeout(unsigned long arg)
443 struct l2cap_conn *conn = (void *) arg;
445 conn->info_ident = 0;
447 l2cap_conn_start(conn);
450 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
452 struct l2cap_conn *conn = hcon->l2cap_data;
454 if (conn || status)
455 return conn;
457 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
458 if (!conn)
459 return NULL;
461 hcon->l2cap_data = conn;
462 conn->hcon = hcon;
464 BT_DBG("hcon %p conn %p", hcon, conn);
466 conn->mtu = hcon->hdev->acl_mtu;
467 conn->src = &hcon->hdev->bdaddr;
468 conn->dst = &hcon->dst;
470 conn->feat_mask = 0;
472 setup_timer(&conn->info_timer, l2cap_info_timeout,
473 (unsigned long) conn);
475 spin_lock_init(&conn->lock);
476 rwlock_init(&conn->chan_list.lock);
478 return conn;
481 static void l2cap_conn_del(struct hci_conn *hcon, int err)
483 struct l2cap_conn *conn = hcon->l2cap_data;
484 struct sock *sk;
486 if (!conn)
487 return;
489 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
491 if (conn->rx_skb)
492 kfree_skb(conn->rx_skb);
494 /* Kill channels */
495 while ((sk = conn->chan_list.head)) {
496 bh_lock_sock(sk);
497 l2cap_chan_del(sk, err);
498 bh_unlock_sock(sk);
499 l2cap_sock_kill(sk);
502 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
503 del_timer_sync(&conn->info_timer);
505 hcon->l2cap_data = NULL;
506 kfree(conn);
509 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
511 struct l2cap_chan_list *l = &conn->chan_list;
512 write_lock_bh(&l->lock);
513 __l2cap_chan_add(conn, sk, parent);
514 write_unlock_bh(&l->lock);
517 /* ---- Socket interface ---- */
518 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
520 struct sock *sk;
521 struct hlist_node *node;
522 sk_for_each(sk, node, &l2cap_sk_list.head)
523 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
524 goto found;
525 sk = NULL;
526 found:
527 return sk;
530 /* Find socket with psm and source bdaddr.
531 * Returns closest match.
533 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
535 struct sock *sk = NULL, *sk1 = NULL;
536 struct hlist_node *node;
538 sk_for_each(sk, node, &l2cap_sk_list.head) {
539 if (state && sk->sk_state != state)
540 continue;
542 if (l2cap_pi(sk)->psm == psm) {
543 /* Exact match. */
544 if (!bacmp(&bt_sk(sk)->src, src))
545 break;
547 /* Closest match */
548 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
549 sk1 = sk;
552 return node ? sk : sk1;
555 /* Find socket with given address (psm, src).
556 * Returns locked socket */
557 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
559 struct sock *s;
560 read_lock(&l2cap_sk_list.lock);
561 s = __l2cap_get_sock_by_psm(state, psm, src);
562 if (s) bh_lock_sock(s);
563 read_unlock(&l2cap_sk_list.lock);
564 return s;
567 static void l2cap_sock_destruct(struct sock *sk)
569 BT_DBG("sk %p", sk);
571 skb_queue_purge(&sk->sk_receive_queue);
572 skb_queue_purge(&sk->sk_write_queue);
575 static void l2cap_sock_cleanup_listen(struct sock *parent)
577 struct sock *sk;
579 BT_DBG("parent %p", parent);
581 /* Close not yet accepted channels */
582 while ((sk = bt_accept_dequeue(parent, NULL)))
583 l2cap_sock_close(sk);
585 parent->sk_state = BT_CLOSED;
586 sock_set_flag(parent, SOCK_ZAPPED);
589 /* Kill socket (only if zapped and orphan)
590 * Must be called on unlocked socket.
592 static void l2cap_sock_kill(struct sock *sk)
594 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
595 return;
597 BT_DBG("sk %p state %d", sk, sk->sk_state);
599 /* Kill poor orphan */
600 bt_sock_unlink(&l2cap_sk_list, sk);
601 sock_set_flag(sk, SOCK_DEAD);
602 sock_put(sk);
605 static void __l2cap_sock_close(struct sock *sk, int reason)
607 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
609 switch (sk->sk_state) {
610 case BT_LISTEN:
611 l2cap_sock_cleanup_listen(sk);
612 break;
614 case BT_CONNECTED:
615 case BT_CONFIG:
616 case BT_CONNECT2:
617 if (sk->sk_type == SOCK_SEQPACKET) {
618 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
619 struct l2cap_disconn_req req;
621 sk->sk_state = BT_DISCONN;
622 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
624 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
625 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
626 l2cap_send_cmd(conn, l2cap_get_ident(conn),
627 L2CAP_DISCONN_REQ, sizeof(req), &req);
628 } else
629 l2cap_chan_del(sk, reason);
630 break;
632 case BT_CONNECT:
633 case BT_DISCONN:
634 l2cap_chan_del(sk, reason);
635 break;
637 default:
638 sock_set_flag(sk, SOCK_ZAPPED);
639 break;
643 /* Must be called on unlocked socket. */
644 static void l2cap_sock_close(struct sock *sk)
646 l2cap_sock_clear_timer(sk);
647 lock_sock(sk);
648 __l2cap_sock_close(sk, ECONNRESET);
649 release_sock(sk);
650 l2cap_sock_kill(sk);
653 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
655 struct l2cap_pinfo *pi = l2cap_pi(sk);
657 BT_DBG("sk %p", sk);
659 if (parent) {
660 sk->sk_type = parent->sk_type;
661 pi->imtu = l2cap_pi(parent)->imtu;
662 pi->omtu = l2cap_pi(parent)->omtu;
663 pi->link_mode = l2cap_pi(parent)->link_mode;
664 } else {
665 pi->imtu = L2CAP_DEFAULT_MTU;
666 pi->omtu = 0;
667 pi->link_mode = 0;
670 /* Default config options */
671 pi->conf_len = 0;
672 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
675 static struct proto l2cap_proto = {
676 .name = "L2CAP",
677 .owner = THIS_MODULE,
678 .obj_size = sizeof(struct l2cap_pinfo)
681 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
683 struct sock *sk;
685 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
686 if (!sk)
687 return NULL;
689 sock_init_data(sock, sk);
690 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
692 sk->sk_destruct = l2cap_sock_destruct;
693 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
695 sock_reset_flag(sk, SOCK_ZAPPED);
697 sk->sk_protocol = proto;
698 sk->sk_state = BT_OPEN;
700 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
702 bt_sock_link(&l2cap_sk_list, sk);
703 return sk;
706 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
708 struct sock *sk;
710 BT_DBG("sock %p", sock);
712 sock->state = SS_UNCONNECTED;
714 if (sock->type != SOCK_SEQPACKET &&
715 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
716 return -ESOCKTNOSUPPORT;
718 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
719 return -EPERM;
721 sock->ops = &l2cap_sock_ops;
723 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
724 if (!sk)
725 return -ENOMEM;
727 l2cap_sock_init(sk, NULL);
728 return 0;
731 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
733 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
734 struct sock *sk = sock->sk;
735 int err = 0;
737 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
739 if (!addr || addr->sa_family != AF_BLUETOOTH)
740 return -EINVAL;
742 lock_sock(sk);
744 if (sk->sk_state != BT_OPEN) {
745 err = -EBADFD;
746 goto done;
749 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
750 !capable(CAP_NET_BIND_SERVICE)) {
751 err = -EACCES;
752 goto done;
755 write_lock_bh(&l2cap_sk_list.lock);
757 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
758 err = -EADDRINUSE;
759 } else {
760 /* Save source address */
761 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
762 l2cap_pi(sk)->psm = la->l2_psm;
763 l2cap_pi(sk)->sport = la->l2_psm;
764 sk->sk_state = BT_BOUND;
767 write_unlock_bh(&l2cap_sk_list.lock);
769 done:
770 release_sock(sk);
771 return err;
774 static int l2cap_do_connect(struct sock *sk)
776 bdaddr_t *src = &bt_sk(sk)->src;
777 bdaddr_t *dst = &bt_sk(sk)->dst;
778 struct l2cap_conn *conn;
779 struct hci_conn *hcon;
780 struct hci_dev *hdev;
781 __u8 auth_type;
782 int err = 0;
784 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
786 if (!(hdev = hci_get_route(dst, src)))
787 return -EHOSTUNREACH;
789 hci_dev_lock_bh(hdev);
791 err = -ENOMEM;
793 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH ||
794 l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT ||
795 l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
796 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
797 auth_type = HCI_AT_NO_BONDING_MITM;
798 else
799 auth_type = HCI_AT_GENERAL_BONDING_MITM;
800 } else {
801 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
802 auth_type = HCI_AT_NO_BONDING;
803 else
804 auth_type = HCI_AT_GENERAL_BONDING;
807 hcon = hci_connect(hdev, ACL_LINK, dst, auth_type);
808 if (!hcon)
809 goto done;
811 conn = l2cap_conn_add(hcon, 0);
812 if (!conn) {
813 hci_conn_put(hcon);
814 goto done;
817 err = 0;
819 /* Update source addr of the socket */
820 bacpy(src, conn->src);
822 l2cap_chan_add(conn, sk, NULL);
824 sk->sk_state = BT_CONNECT;
825 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
827 if (hcon->state == BT_CONNECTED) {
828 if (sk->sk_type != SOCK_SEQPACKET) {
829 l2cap_sock_clear_timer(sk);
830 sk->sk_state = BT_CONNECTED;
831 } else
832 l2cap_do_start(sk);
835 done:
836 hci_dev_unlock_bh(hdev);
837 hci_dev_put(hdev);
838 return err;
841 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
843 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
844 struct sock *sk = sock->sk;
845 int err = 0;
847 lock_sock(sk);
849 BT_DBG("sk %p", sk);
851 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
852 err = -EINVAL;
853 goto done;
856 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
857 err = -EINVAL;
858 goto done;
861 switch(sk->sk_state) {
862 case BT_CONNECT:
863 case BT_CONNECT2:
864 case BT_CONFIG:
865 /* Already connecting */
866 goto wait;
868 case BT_CONNECTED:
869 /* Already connected */
870 goto done;
872 case BT_OPEN:
873 case BT_BOUND:
874 /* Can connect */
875 break;
877 default:
878 err = -EBADFD;
879 goto done;
882 /* Set destination address and psm */
883 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
884 l2cap_pi(sk)->psm = la->l2_psm;
886 if ((err = l2cap_do_connect(sk)))
887 goto done;
889 wait:
890 err = bt_sock_wait_state(sk, BT_CONNECTED,
891 sock_sndtimeo(sk, flags & O_NONBLOCK));
892 done:
893 release_sock(sk);
894 return err;
897 static int l2cap_sock_listen(struct socket *sock, int backlog)
899 struct sock *sk = sock->sk;
900 int err = 0;
902 BT_DBG("sk %p backlog %d", sk, backlog);
904 lock_sock(sk);
906 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
907 err = -EBADFD;
908 goto done;
911 if (!l2cap_pi(sk)->psm) {
912 bdaddr_t *src = &bt_sk(sk)->src;
913 u16 psm;
915 err = -EINVAL;
917 write_lock_bh(&l2cap_sk_list.lock);
919 for (psm = 0x1001; psm < 0x1100; psm += 2)
920 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
921 l2cap_pi(sk)->psm = htobs(psm);
922 l2cap_pi(sk)->sport = htobs(psm);
923 err = 0;
924 break;
927 write_unlock_bh(&l2cap_sk_list.lock);
929 if (err < 0)
930 goto done;
933 sk->sk_max_ack_backlog = backlog;
934 sk->sk_ack_backlog = 0;
935 sk->sk_state = BT_LISTEN;
937 done:
938 release_sock(sk);
939 return err;
942 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
944 DECLARE_WAITQUEUE(wait, current);
945 struct sock *sk = sock->sk, *nsk;
946 long timeo;
947 int err = 0;
949 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
951 if (sk->sk_state != BT_LISTEN) {
952 err = -EBADFD;
953 goto done;
956 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
958 BT_DBG("sk %p timeo %ld", sk, timeo);
960 /* Wait for an incoming connection. (wake-one). */
961 add_wait_queue_exclusive(sk->sk_sleep, &wait);
962 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
963 set_current_state(TASK_INTERRUPTIBLE);
964 if (!timeo) {
965 err = -EAGAIN;
966 break;
969 release_sock(sk);
970 timeo = schedule_timeout(timeo);
971 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
973 if (sk->sk_state != BT_LISTEN) {
974 err = -EBADFD;
975 break;
978 if (signal_pending(current)) {
979 err = sock_intr_errno(timeo);
980 break;
983 set_current_state(TASK_RUNNING);
984 remove_wait_queue(sk->sk_sleep, &wait);
986 if (err)
987 goto done;
989 newsock->state = SS_CONNECTED;
991 BT_DBG("new socket %p", nsk);
993 done:
994 release_sock(sk);
995 return err;
998 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1000 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1001 struct sock *sk = sock->sk;
1003 BT_DBG("sock %p, sk %p", sock, sk);
1005 addr->sa_family = AF_BLUETOOTH;
1006 *len = sizeof(struct sockaddr_l2);
1008 if (peer)
1009 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1010 else
1011 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1013 la->l2_psm = l2cap_pi(sk)->psm;
1014 return 0;
1017 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1019 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1020 struct sk_buff *skb, **frag;
1021 int err, hlen, count, sent=0;
1022 struct l2cap_hdr *lh;
1024 BT_DBG("sk %p len %d", sk, len);
1026 /* First fragment (with L2CAP header) */
1027 if (sk->sk_type == SOCK_DGRAM)
1028 hlen = L2CAP_HDR_SIZE + 2;
1029 else
1030 hlen = L2CAP_HDR_SIZE;
1032 count = min_t(unsigned int, (conn->mtu - hlen), len);
1034 skb = bt_skb_send_alloc(sk, hlen + count,
1035 msg->msg_flags & MSG_DONTWAIT, &err);
1036 if (!skb)
1037 return err;
1039 /* Create L2CAP header */
1040 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1041 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1042 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1044 if (sk->sk_type == SOCK_DGRAM)
1045 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1047 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1048 err = -EFAULT;
1049 goto fail;
1052 sent += count;
1053 len -= count;
1055 /* Continuation fragments (no L2CAP header) */
1056 frag = &skb_shinfo(skb)->frag_list;
1057 while (len) {
1058 count = min_t(unsigned int, conn->mtu, len);
1060 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1061 if (!*frag)
1062 goto fail;
1064 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1065 err = -EFAULT;
1066 goto fail;
1069 sent += count;
1070 len -= count;
1072 frag = &(*frag)->next;
1075 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1076 goto fail;
1078 return sent;
1080 fail:
1081 kfree_skb(skb);
1082 return err;
1085 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1087 struct sock *sk = sock->sk;
1088 int err = 0;
1090 BT_DBG("sock %p, sk %p", sock, sk);
1092 err = sock_error(sk);
1093 if (err)
1094 return err;
1096 if (msg->msg_flags & MSG_OOB)
1097 return -EOPNOTSUPP;
1099 /* Check outgoing MTU */
1100 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1101 return -EINVAL;
1103 lock_sock(sk);
1105 if (sk->sk_state == BT_CONNECTED)
1106 err = l2cap_do_send(sk, msg, len);
1107 else
1108 err = -ENOTCONN;
1110 release_sock(sk);
1111 return err;
1114 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1116 struct sock *sk = sock->sk;
1117 struct l2cap_options opts;
1118 int err = 0, len;
1119 u32 opt;
1121 BT_DBG("sk %p", sk);
1123 lock_sock(sk);
1125 switch (optname) {
1126 case L2CAP_OPTIONS:
1127 opts.imtu = l2cap_pi(sk)->imtu;
1128 opts.omtu = l2cap_pi(sk)->omtu;
1129 opts.flush_to = l2cap_pi(sk)->flush_to;
1130 opts.mode = L2CAP_MODE_BASIC;
1132 len = min_t(unsigned int, sizeof(opts), optlen);
1133 if (copy_from_user((char *) &opts, optval, len)) {
1134 err = -EFAULT;
1135 break;
1138 l2cap_pi(sk)->imtu = opts.imtu;
1139 l2cap_pi(sk)->omtu = opts.omtu;
1140 break;
1142 case L2CAP_LM:
1143 if (get_user(opt, (u32 __user *) optval)) {
1144 err = -EFAULT;
1145 break;
1148 l2cap_pi(sk)->link_mode = opt;
1149 break;
1151 default:
1152 err = -ENOPROTOOPT;
1153 break;
1156 release_sock(sk);
1157 return err;
1160 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1162 struct sock *sk = sock->sk;
1163 struct l2cap_options opts;
1164 struct l2cap_conninfo cinfo;
1165 int len, err = 0;
1167 BT_DBG("sk %p", sk);
1169 if (get_user(len, optlen))
1170 return -EFAULT;
1172 lock_sock(sk);
1174 switch (optname) {
1175 case L2CAP_OPTIONS:
1176 opts.imtu = l2cap_pi(sk)->imtu;
1177 opts.omtu = l2cap_pi(sk)->omtu;
1178 opts.flush_to = l2cap_pi(sk)->flush_to;
1179 opts.mode = L2CAP_MODE_BASIC;
1181 len = min_t(unsigned int, len, sizeof(opts));
1182 if (copy_to_user(optval, (char *) &opts, len))
1183 err = -EFAULT;
1185 break;
1187 case L2CAP_LM:
1188 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1189 err = -EFAULT;
1190 break;
1192 case L2CAP_CONNINFO:
1193 if (sk->sk_state != BT_CONNECTED) {
1194 err = -ENOTCONN;
1195 break;
1198 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1199 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1201 len = min_t(unsigned int, len, sizeof(cinfo));
1202 if (copy_to_user(optval, (char *) &cinfo, len))
1203 err = -EFAULT;
1205 break;
1207 default:
1208 err = -ENOPROTOOPT;
1209 break;
1212 release_sock(sk);
1213 return err;
1216 static int l2cap_sock_shutdown(struct socket *sock, int how)
1218 struct sock *sk = sock->sk;
1219 int err = 0;
1221 BT_DBG("sock %p, sk %p", sock, sk);
1223 if (!sk)
1224 return 0;
1226 lock_sock(sk);
1227 if (!sk->sk_shutdown) {
1228 sk->sk_shutdown = SHUTDOWN_MASK;
1229 l2cap_sock_clear_timer(sk);
1230 __l2cap_sock_close(sk, 0);
1232 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1233 err = bt_sock_wait_state(sk, BT_CLOSED,
1234 sk->sk_lingertime);
1236 release_sock(sk);
1237 return err;
1240 static int l2cap_sock_release(struct socket *sock)
1242 struct sock *sk = sock->sk;
1243 int err;
1245 BT_DBG("sock %p, sk %p", sock, sk);
1247 if (!sk)
1248 return 0;
1250 err = l2cap_sock_shutdown(sock, 2);
1252 sock_orphan(sk);
1253 l2cap_sock_kill(sk);
1254 return err;
1257 static void l2cap_chan_ready(struct sock *sk)
1259 struct sock *parent = bt_sk(sk)->parent;
1261 BT_DBG("sk %p, parent %p", sk, parent);
1263 l2cap_pi(sk)->conf_state = 0;
1264 l2cap_sock_clear_timer(sk);
1266 if (!parent) {
1267 /* Outgoing channel.
1268 * Wake up socket sleeping on connect.
1270 sk->sk_state = BT_CONNECTED;
1271 sk->sk_state_change(sk);
1272 } else {
1273 /* Incoming channel.
1274 * Wake up socket sleeping on accept.
1276 parent->sk_data_ready(parent, 0);
1279 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
1280 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1281 hci_conn_change_link_key(conn->hcon);
1285 /* Copy frame to all raw sockets on that connection */
1286 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1288 struct l2cap_chan_list *l = &conn->chan_list;
1289 struct sk_buff *nskb;
1290 struct sock * sk;
1292 BT_DBG("conn %p", conn);
1294 read_lock(&l->lock);
1295 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1296 if (sk->sk_type != SOCK_RAW)
1297 continue;
1299 /* Don't send frame to the socket it came from */
1300 if (skb->sk == sk)
1301 continue;
1303 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1304 continue;
1306 if (sock_queue_rcv_skb(sk, nskb))
1307 kfree_skb(nskb);
1309 read_unlock(&l->lock);
1312 /* ---- L2CAP signalling commands ---- */
1313 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1314 u8 code, u8 ident, u16 dlen, void *data)
1316 struct sk_buff *skb, **frag;
1317 struct l2cap_cmd_hdr *cmd;
1318 struct l2cap_hdr *lh;
1319 int len, count;
1321 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1323 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1324 count = min_t(unsigned int, conn->mtu, len);
1326 skb = bt_skb_alloc(count, GFP_ATOMIC);
1327 if (!skb)
1328 return NULL;
1330 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1331 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1332 lh->cid = cpu_to_le16(0x0001);
1334 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1335 cmd->code = code;
1336 cmd->ident = ident;
1337 cmd->len = cpu_to_le16(dlen);
1339 if (dlen) {
1340 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1341 memcpy(skb_put(skb, count), data, count);
1342 data += count;
1345 len -= skb->len;
1347 /* Continuation fragments (no L2CAP header) */
1348 frag = &skb_shinfo(skb)->frag_list;
1349 while (len) {
1350 count = min_t(unsigned int, conn->mtu, len);
1352 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1353 if (!*frag)
1354 goto fail;
1356 memcpy(skb_put(*frag, count), data, count);
1358 len -= count;
1359 data += count;
1361 frag = &(*frag)->next;
1364 return skb;
1366 fail:
1367 kfree_skb(skb);
1368 return NULL;
1371 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1373 struct l2cap_conf_opt *opt = *ptr;
1374 int len;
1376 len = L2CAP_CONF_OPT_SIZE + opt->len;
1377 *ptr += len;
1379 *type = opt->type;
1380 *olen = opt->len;
1382 switch (opt->len) {
1383 case 1:
1384 *val = *((u8 *) opt->val);
1385 break;
1387 case 2:
1388 *val = __le16_to_cpu(*((__le16 *) opt->val));
1389 break;
1391 case 4:
1392 *val = __le32_to_cpu(*((__le32 *) opt->val));
1393 break;
1395 default:
1396 *val = (unsigned long) opt->val;
1397 break;
1400 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1401 return len;
1404 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1406 struct l2cap_conf_opt *opt = *ptr;
1408 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1410 opt->type = type;
1411 opt->len = len;
1413 switch (len) {
1414 case 1:
1415 *((u8 *) opt->val) = val;
1416 break;
1418 case 2:
1419 *((__le16 *) opt->val) = cpu_to_le16(val);
1420 break;
1422 case 4:
1423 *((__le32 *) opt->val) = cpu_to_le32(val);
1424 break;
1426 default:
1427 memcpy(opt->val, (void *) val, len);
1428 break;
1431 *ptr += L2CAP_CONF_OPT_SIZE + len;
1434 static int l2cap_build_conf_req(struct sock *sk, void *data)
1436 struct l2cap_pinfo *pi = l2cap_pi(sk);
1437 struct l2cap_conf_req *req = data;
1438 void *ptr = req->data;
1440 BT_DBG("sk %p", sk);
1442 if (pi->imtu != L2CAP_DEFAULT_MTU)
1443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1445 /* FIXME: Need actual value of the flush timeout */
1446 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1447 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1449 req->dcid = cpu_to_le16(pi->dcid);
1450 req->flags = cpu_to_le16(0);
1452 return ptr - data;
1455 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1457 struct l2cap_pinfo *pi = l2cap_pi(sk);
1458 struct l2cap_conf_rsp *rsp = data;
1459 void *ptr = rsp->data;
1460 void *req = pi->conf_req;
1461 int len = pi->conf_len;
1462 int type, hint, olen;
1463 unsigned long val;
1464 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1465 u16 mtu = L2CAP_DEFAULT_MTU;
1466 u16 result = L2CAP_CONF_SUCCESS;
1468 BT_DBG("sk %p", sk);
1470 while (len >= L2CAP_CONF_OPT_SIZE) {
1471 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1473 hint = type & 0x80;
1474 type &= 0x7f;
1476 switch (type) {
1477 case L2CAP_CONF_MTU:
1478 mtu = val;
1479 break;
1481 case L2CAP_CONF_FLUSH_TO:
1482 pi->flush_to = val;
1483 break;
1485 case L2CAP_CONF_QOS:
1486 break;
1488 case L2CAP_CONF_RFC:
1489 if (olen == sizeof(rfc))
1490 memcpy(&rfc, (void *) val, olen);
1491 break;
1493 default:
1494 if (hint)
1495 break;
1497 result = L2CAP_CONF_UNKNOWN;
1498 *((u8 *) ptr++) = type;
1499 break;
1503 if (result == L2CAP_CONF_SUCCESS) {
1504 /* Configure output options and let the other side know
1505 * which ones we don't like. */
1507 if (rfc.mode == L2CAP_MODE_BASIC) {
1508 if (mtu < pi->omtu)
1509 result = L2CAP_CONF_UNACCEPT;
1510 else {
1511 pi->omtu = mtu;
1512 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1515 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1516 } else {
1517 result = L2CAP_CONF_UNACCEPT;
1519 memset(&rfc, 0, sizeof(rfc));
1520 rfc.mode = L2CAP_MODE_BASIC;
1522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1523 sizeof(rfc), (unsigned long) &rfc);
1527 rsp->scid = cpu_to_le16(pi->dcid);
1528 rsp->result = cpu_to_le16(result);
1529 rsp->flags = cpu_to_le16(0x0000);
1531 return ptr - data;
1534 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1536 struct l2cap_conf_rsp *rsp = data;
1537 void *ptr = rsp->data;
1539 BT_DBG("sk %p", sk);
1541 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1542 rsp->result = cpu_to_le16(result);
1543 rsp->flags = cpu_to_le16(flags);
1545 return ptr - data;
1548 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1550 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1552 if (rej->reason != 0x0000)
1553 return 0;
1555 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1556 cmd->ident == conn->info_ident) {
1557 conn->info_ident = 0;
1558 del_timer(&conn->info_timer);
1559 l2cap_conn_start(conn);
1562 return 0;
1565 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1567 struct l2cap_chan_list *list = &conn->chan_list;
1568 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1569 struct l2cap_conn_rsp rsp;
1570 struct sock *sk, *parent;
1571 int result, status = L2CAP_CS_NO_INFO;
1573 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1574 __le16 psm = req->psm;
1576 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1578 /* Check if we have socket listening on psm */
1579 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1580 if (!parent) {
1581 result = L2CAP_CR_BAD_PSM;
1582 goto sendresp;
1585 /* Check if the ACL is secure enough (if not SDP) */
1586 if (psm != cpu_to_le16(0x0001) &&
1587 !hci_conn_check_link_mode(conn->hcon)) {
1588 result = L2CAP_CR_SEC_BLOCK;
1589 goto response;
1592 result = L2CAP_CR_NO_MEM;
1594 /* Check for backlog size */
1595 if (sk_acceptq_is_full(parent)) {
1596 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1597 goto response;
1600 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1601 if (!sk)
1602 goto response;
1604 write_lock_bh(&list->lock);
1606 /* Check if we already have channel with that dcid */
1607 if (__l2cap_get_chan_by_dcid(list, scid)) {
1608 write_unlock_bh(&list->lock);
1609 sock_set_flag(sk, SOCK_ZAPPED);
1610 l2cap_sock_kill(sk);
1611 goto response;
1614 hci_conn_hold(conn->hcon);
1616 l2cap_sock_init(sk, parent);
1617 bacpy(&bt_sk(sk)->src, conn->src);
1618 bacpy(&bt_sk(sk)->dst, conn->dst);
1619 l2cap_pi(sk)->psm = psm;
1620 l2cap_pi(sk)->dcid = scid;
1622 __l2cap_chan_add(conn, sk, parent);
1623 dcid = l2cap_pi(sk)->scid;
1625 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1627 l2cap_pi(sk)->ident = cmd->ident;
1629 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1630 if (l2cap_check_link_mode(sk)) {
1631 sk->sk_state = BT_CONFIG;
1632 result = L2CAP_CR_SUCCESS;
1633 status = L2CAP_CS_NO_INFO;
1634 } else {
1635 sk->sk_state = BT_CONNECT2;
1636 result = L2CAP_CR_PEND;
1637 status = L2CAP_CS_AUTHEN_PEND;
1639 } else {
1640 sk->sk_state = BT_CONNECT2;
1641 result = L2CAP_CR_PEND;
1642 status = L2CAP_CS_NO_INFO;
1645 write_unlock_bh(&list->lock);
1647 response:
1648 bh_unlock_sock(parent);
1650 sendresp:
1651 rsp.scid = cpu_to_le16(scid);
1652 rsp.dcid = cpu_to_le16(dcid);
1653 rsp.result = cpu_to_le16(result);
1654 rsp.status = cpu_to_le16(status);
1655 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1657 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1658 struct l2cap_info_req info;
1659 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1661 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1662 conn->info_ident = l2cap_get_ident(conn);
1664 mod_timer(&conn->info_timer, jiffies +
1665 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1667 l2cap_send_cmd(conn, conn->info_ident,
1668 L2CAP_INFO_REQ, sizeof(info), &info);
1671 return 0;
1674 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1676 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1677 u16 scid, dcid, result, status;
1678 struct sock *sk;
1679 u8 req[128];
1681 scid = __le16_to_cpu(rsp->scid);
1682 dcid = __le16_to_cpu(rsp->dcid);
1683 result = __le16_to_cpu(rsp->result);
1684 status = __le16_to_cpu(rsp->status);
1686 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1688 if (scid) {
1689 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1690 return 0;
1691 } else {
1692 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1693 return 0;
1696 switch (result) {
1697 case L2CAP_CR_SUCCESS:
1698 sk->sk_state = BT_CONFIG;
1699 l2cap_pi(sk)->ident = 0;
1700 l2cap_pi(sk)->dcid = dcid;
1701 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1703 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1704 l2cap_build_conf_req(sk, req), req);
1705 break;
1707 case L2CAP_CR_PEND:
1708 break;
1710 default:
1711 l2cap_chan_del(sk, ECONNREFUSED);
1712 break;
1715 bh_unlock_sock(sk);
1716 return 0;
1719 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1721 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1722 u16 dcid, flags;
1723 u8 rsp[64];
1724 struct sock *sk;
1725 int len;
1727 dcid = __le16_to_cpu(req->dcid);
1728 flags = __le16_to_cpu(req->flags);
1730 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1732 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1733 return -ENOENT;
1735 if (sk->sk_state == BT_DISCONN)
1736 goto unlock;
1738 /* Reject if config buffer is too small. */
1739 len = cmd_len - sizeof(*req);
1740 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1741 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1742 l2cap_build_conf_rsp(sk, rsp,
1743 L2CAP_CONF_REJECT, flags), rsp);
1744 goto unlock;
1747 /* Store config. */
1748 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1749 l2cap_pi(sk)->conf_len += len;
1751 if (flags & 0x0001) {
1752 /* Incomplete config. Send empty response. */
1753 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1754 l2cap_build_conf_rsp(sk, rsp,
1755 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1756 goto unlock;
1759 /* Complete config. */
1760 len = l2cap_parse_conf_req(sk, rsp);
1761 if (len < 0)
1762 goto unlock;
1764 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1766 /* Reset config buffer. */
1767 l2cap_pi(sk)->conf_len = 0;
1769 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1770 goto unlock;
1772 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1773 sk->sk_state = BT_CONNECTED;
1774 l2cap_chan_ready(sk);
1775 goto unlock;
1778 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1779 u8 buf[64];
1780 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1781 l2cap_build_conf_req(sk, buf), buf);
1784 unlock:
1785 bh_unlock_sock(sk);
1786 return 0;
1789 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1791 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1792 u16 scid, flags, result;
1793 struct sock *sk;
1795 scid = __le16_to_cpu(rsp->scid);
1796 flags = __le16_to_cpu(rsp->flags);
1797 result = __le16_to_cpu(rsp->result);
1799 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1801 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1802 return 0;
1804 switch (result) {
1805 case L2CAP_CONF_SUCCESS:
1806 break;
1808 case L2CAP_CONF_UNACCEPT:
1809 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1810 char req[128];
1811 /* It does not make sense to adjust L2CAP parameters
1812 * that are currently defined in the spec. We simply
1813 * resend config request that we sent earlier. It is
1814 * stupid, but it helps qualification testing which
1815 * expects at least some response from us. */
1816 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1817 l2cap_build_conf_req(sk, req), req);
1818 goto done;
1821 default:
1822 sk->sk_state = BT_DISCONN;
1823 sk->sk_err = ECONNRESET;
1824 l2cap_sock_set_timer(sk, HZ * 5);
1826 struct l2cap_disconn_req req;
1827 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1828 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1829 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1830 L2CAP_DISCONN_REQ, sizeof(req), &req);
1832 goto done;
1835 if (flags & 0x01)
1836 goto done;
1838 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1840 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1841 sk->sk_state = BT_CONNECTED;
1842 l2cap_chan_ready(sk);
1845 done:
1846 bh_unlock_sock(sk);
1847 return 0;
1850 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1852 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1853 struct l2cap_disconn_rsp rsp;
1854 u16 dcid, scid;
1855 struct sock *sk;
1857 scid = __le16_to_cpu(req->scid);
1858 dcid = __le16_to_cpu(req->dcid);
1860 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1862 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1863 return 0;
1865 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1866 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1867 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1869 sk->sk_shutdown = SHUTDOWN_MASK;
1871 l2cap_chan_del(sk, ECONNRESET);
1872 bh_unlock_sock(sk);
1874 l2cap_sock_kill(sk);
1875 return 0;
1878 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1880 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1881 u16 dcid, scid;
1882 struct sock *sk;
1884 scid = __le16_to_cpu(rsp->scid);
1885 dcid = __le16_to_cpu(rsp->dcid);
1887 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1889 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1890 return 0;
1892 l2cap_chan_del(sk, 0);
1893 bh_unlock_sock(sk);
1895 l2cap_sock_kill(sk);
1896 return 0;
1899 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1901 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1902 u16 type;
1904 type = __le16_to_cpu(req->type);
1906 BT_DBG("type 0x%4.4x", type);
1908 if (type == L2CAP_IT_FEAT_MASK) {
1909 u8 buf[8];
1910 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1911 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1912 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1913 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1914 l2cap_send_cmd(conn, cmd->ident,
1915 L2CAP_INFO_RSP, sizeof(buf), buf);
1916 } else {
1917 struct l2cap_info_rsp rsp;
1918 rsp.type = cpu_to_le16(type);
1919 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1920 l2cap_send_cmd(conn, cmd->ident,
1921 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1924 return 0;
1927 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1929 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1930 u16 type, result;
1932 type = __le16_to_cpu(rsp->type);
1933 result = __le16_to_cpu(rsp->result);
1935 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1937 conn->info_ident = 0;
1939 del_timer(&conn->info_timer);
1941 if (type == L2CAP_IT_FEAT_MASK)
1942 conn->feat_mask = get_unaligned_le32(rsp->data);
1944 l2cap_conn_start(conn);
1946 return 0;
1949 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1951 u8 *data = skb->data;
1952 int len = skb->len;
1953 struct l2cap_cmd_hdr cmd;
1954 int err = 0;
1956 l2cap_raw_recv(conn, skb);
1958 while (len >= L2CAP_CMD_HDR_SIZE) {
1959 u16 cmd_len;
1960 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1961 data += L2CAP_CMD_HDR_SIZE;
1962 len -= L2CAP_CMD_HDR_SIZE;
1964 cmd_len = le16_to_cpu(cmd.len);
1966 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1968 if (cmd_len > len || !cmd.ident) {
1969 BT_DBG("corrupted command");
1970 break;
1973 switch (cmd.code) {
1974 case L2CAP_COMMAND_REJ:
1975 l2cap_command_rej(conn, &cmd, data);
1976 break;
1978 case L2CAP_CONN_REQ:
1979 err = l2cap_connect_req(conn, &cmd, data);
1980 break;
1982 case L2CAP_CONN_RSP:
1983 err = l2cap_connect_rsp(conn, &cmd, data);
1984 break;
1986 case L2CAP_CONF_REQ:
1987 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1988 break;
1990 case L2CAP_CONF_RSP:
1991 err = l2cap_config_rsp(conn, &cmd, data);
1992 break;
1994 case L2CAP_DISCONN_REQ:
1995 err = l2cap_disconnect_req(conn, &cmd, data);
1996 break;
1998 case L2CAP_DISCONN_RSP:
1999 err = l2cap_disconnect_rsp(conn, &cmd, data);
2000 break;
2002 case L2CAP_ECHO_REQ:
2003 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2004 break;
2006 case L2CAP_ECHO_RSP:
2007 break;
2009 case L2CAP_INFO_REQ:
2010 err = l2cap_information_req(conn, &cmd, data);
2011 break;
2013 case L2CAP_INFO_RSP:
2014 err = l2cap_information_rsp(conn, &cmd, data);
2015 break;
2017 default:
2018 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2019 err = -EINVAL;
2020 break;
2023 if (err) {
2024 struct l2cap_cmd_rej rej;
2025 BT_DBG("error %d", err);
2027 /* FIXME: Map err to a valid reason */
2028 rej.reason = cpu_to_le16(0);
2029 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2032 data += cmd_len;
2033 len -= cmd_len;
2036 kfree_skb(skb);
2039 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2041 struct sock *sk;
2043 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2044 if (!sk) {
2045 BT_DBG("unknown cid 0x%4.4x", cid);
2046 goto drop;
2049 BT_DBG("sk %p, len %d", sk, skb->len);
2051 if (sk->sk_state != BT_CONNECTED)
2052 goto drop;
2054 if (l2cap_pi(sk)->imtu < skb->len)
2055 goto drop;
2057 /* If socket recv buffers overflows we drop data here
2058 * which is *bad* because L2CAP has to be reliable.
2059 * But we don't have any other choice. L2CAP doesn't
2060 * provide flow control mechanism. */
2062 if (!sock_queue_rcv_skb(sk, skb))
2063 goto done;
2065 drop:
2066 kfree_skb(skb);
2068 done:
2069 if (sk)
2070 bh_unlock_sock(sk);
2072 return 0;
2075 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2077 struct sock *sk;
2079 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2080 if (!sk)
2081 goto drop;
2083 BT_DBG("sk %p, len %d", sk, skb->len);
2085 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2086 goto drop;
2088 if (l2cap_pi(sk)->imtu < skb->len)
2089 goto drop;
2091 if (!sock_queue_rcv_skb(sk, skb))
2092 goto done;
2094 drop:
2095 kfree_skb(skb);
2097 done:
2098 if (sk) bh_unlock_sock(sk);
2099 return 0;
2102 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2104 struct l2cap_hdr *lh = (void *) skb->data;
2105 u16 cid, len;
2106 __le16 psm;
2108 skb_pull(skb, L2CAP_HDR_SIZE);
2109 cid = __le16_to_cpu(lh->cid);
2110 len = __le16_to_cpu(lh->len);
2112 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2114 switch (cid) {
2115 case 0x0001:
2116 l2cap_sig_channel(conn, skb);
2117 break;
2119 case 0x0002:
2120 psm = get_unaligned((__le16 *) skb->data);
2121 skb_pull(skb, 2);
2122 l2cap_conless_channel(conn, psm, skb);
2123 break;
2125 default:
2126 l2cap_data_channel(conn, cid, skb);
2127 break;
2131 /* ---- L2CAP interface with lower layer (HCI) ---- */
2133 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2135 int exact = 0, lm1 = 0, lm2 = 0;
2136 register struct sock *sk;
2137 struct hlist_node *node;
2139 if (type != ACL_LINK)
2140 return 0;
2142 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2144 /* Find listening sockets and check their link_mode */
2145 read_lock(&l2cap_sk_list.lock);
2146 sk_for_each(sk, node, &l2cap_sk_list.head) {
2147 if (sk->sk_state != BT_LISTEN)
2148 continue;
2150 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2151 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2152 exact++;
2153 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2154 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2156 read_unlock(&l2cap_sk_list.lock);
2158 return exact ? lm1 : lm2;
2161 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2163 struct l2cap_conn *conn;
2165 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2167 if (hcon->type != ACL_LINK)
2168 return 0;
2170 if (!status) {
2171 conn = l2cap_conn_add(hcon, status);
2172 if (conn)
2173 l2cap_conn_ready(conn);
2174 } else
2175 l2cap_conn_del(hcon, bt_err(status));
2177 return 0;
2180 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2182 BT_DBG("hcon %p reason %d", hcon, reason);
2184 if (hcon->type != ACL_LINK)
2185 return 0;
2187 l2cap_conn_del(hcon, bt_err(reason));
2189 return 0;
2192 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2194 struct l2cap_chan_list *l;
2195 struct l2cap_conn *conn = hcon->l2cap_data;
2196 struct sock *sk;
2198 if (!conn)
2199 return 0;
2201 l = &conn->chan_list;
2203 BT_DBG("conn %p", conn);
2205 read_lock(&l->lock);
2207 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2208 struct l2cap_pinfo *pi = l2cap_pi(sk);
2210 bh_lock_sock(sk);
2212 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2213 !(hcon->link_mode & HCI_LM_ENCRYPT) &&
2214 !status) {
2215 bh_unlock_sock(sk);
2216 continue;
2219 if (sk->sk_state == BT_CONNECT) {
2220 if (!status) {
2221 struct l2cap_conn_req req;
2222 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2223 req.psm = l2cap_pi(sk)->psm;
2225 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2227 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2228 L2CAP_CONN_REQ, sizeof(req), &req);
2229 } else {
2230 l2cap_sock_clear_timer(sk);
2231 l2cap_sock_set_timer(sk, HZ / 10);
2233 } else if (sk->sk_state == BT_CONNECT2) {
2234 struct l2cap_conn_rsp rsp;
2235 __u16 result;
2237 if (!status) {
2238 sk->sk_state = BT_CONFIG;
2239 result = L2CAP_CR_SUCCESS;
2240 } else {
2241 sk->sk_state = BT_DISCONN;
2242 l2cap_sock_set_timer(sk, HZ / 10);
2243 result = L2CAP_CR_SEC_BLOCK;
2246 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2247 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2248 rsp.result = cpu_to_le16(result);
2249 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2250 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2251 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2254 bh_unlock_sock(sk);
2257 read_unlock(&l->lock);
2259 return 0;
2262 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2264 struct l2cap_chan_list *l;
2265 struct l2cap_conn *conn = hcon->l2cap_data;
2266 struct sock *sk;
2268 if (!conn)
2269 return 0;
2271 l = &conn->chan_list;
2273 BT_DBG("conn %p", conn);
2275 read_lock(&l->lock);
2277 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2278 struct l2cap_pinfo *pi = l2cap_pi(sk);
2280 bh_lock_sock(sk);
2282 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2283 (sk->sk_state == BT_CONNECTED ||
2284 sk->sk_state == BT_CONFIG) &&
2285 !status && encrypt == 0x00) {
2286 __l2cap_sock_close(sk, ECONNREFUSED);
2287 bh_unlock_sock(sk);
2288 continue;
2291 if (sk->sk_state == BT_CONNECT) {
2292 if (!status) {
2293 struct l2cap_conn_req req;
2294 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2295 req.psm = l2cap_pi(sk)->psm;
2297 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2299 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2300 L2CAP_CONN_REQ, sizeof(req), &req);
2301 } else {
2302 l2cap_sock_clear_timer(sk);
2303 l2cap_sock_set_timer(sk, HZ / 10);
2305 } else if (sk->sk_state == BT_CONNECT2) {
2306 struct l2cap_conn_rsp rsp;
2307 __u16 result;
2309 if (!status) {
2310 sk->sk_state = BT_CONFIG;
2311 result = L2CAP_CR_SUCCESS;
2312 } else {
2313 sk->sk_state = BT_DISCONN;
2314 l2cap_sock_set_timer(sk, HZ / 10);
2315 result = L2CAP_CR_SEC_BLOCK;
2318 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2319 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2320 rsp.result = cpu_to_le16(result);
2321 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2322 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2323 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2326 bh_unlock_sock(sk);
2329 read_unlock(&l->lock);
2331 return 0;
2334 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2336 struct l2cap_conn *conn = hcon->l2cap_data;
2338 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2339 goto drop;
2341 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2343 if (flags & ACL_START) {
2344 struct l2cap_hdr *hdr;
2345 int len;
2347 if (conn->rx_len) {
2348 BT_ERR("Unexpected start frame (len %d)", skb->len);
2349 kfree_skb(conn->rx_skb);
2350 conn->rx_skb = NULL;
2351 conn->rx_len = 0;
2352 l2cap_conn_unreliable(conn, ECOMM);
2355 if (skb->len < 2) {
2356 BT_ERR("Frame is too short (len %d)", skb->len);
2357 l2cap_conn_unreliable(conn, ECOMM);
2358 goto drop;
2361 hdr = (struct l2cap_hdr *) skb->data;
2362 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2364 if (len == skb->len) {
2365 /* Complete frame received */
2366 l2cap_recv_frame(conn, skb);
2367 return 0;
2370 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2372 if (skb->len > len) {
2373 BT_ERR("Frame is too long (len %d, expected len %d)",
2374 skb->len, len);
2375 l2cap_conn_unreliable(conn, ECOMM);
2376 goto drop;
2379 /* Allocate skb for the complete frame (with header) */
2380 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2381 goto drop;
2383 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2384 skb->len);
2385 conn->rx_len = len - skb->len;
2386 } else {
2387 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2389 if (!conn->rx_len) {
2390 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2391 l2cap_conn_unreliable(conn, ECOMM);
2392 goto drop;
2395 if (skb->len > conn->rx_len) {
2396 BT_ERR("Fragment is too long (len %d, expected %d)",
2397 skb->len, conn->rx_len);
2398 kfree_skb(conn->rx_skb);
2399 conn->rx_skb = NULL;
2400 conn->rx_len = 0;
2401 l2cap_conn_unreliable(conn, ECOMM);
2402 goto drop;
2405 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2406 skb->len);
2407 conn->rx_len -= skb->len;
2409 if (!conn->rx_len) {
2410 /* Complete frame received */
2411 l2cap_recv_frame(conn, conn->rx_skb);
2412 conn->rx_skb = NULL;
2416 drop:
2417 kfree_skb(skb);
2418 return 0;
2421 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2423 struct sock *sk;
2424 struct hlist_node *node;
2425 char *str = buf;
2427 read_lock_bh(&l2cap_sk_list.lock);
2429 sk_for_each(sk, node, &l2cap_sk_list.head) {
2430 struct l2cap_pinfo *pi = l2cap_pi(sk);
2432 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2433 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2434 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2435 pi->imtu, pi->omtu, pi->link_mode);
2438 read_unlock_bh(&l2cap_sk_list.lock);
2440 return (str - buf);
2443 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2445 static const struct proto_ops l2cap_sock_ops = {
2446 .family = PF_BLUETOOTH,
2447 .owner = THIS_MODULE,
2448 .release = l2cap_sock_release,
2449 .bind = l2cap_sock_bind,
2450 .connect = l2cap_sock_connect,
2451 .listen = l2cap_sock_listen,
2452 .accept = l2cap_sock_accept,
2453 .getname = l2cap_sock_getname,
2454 .sendmsg = l2cap_sock_sendmsg,
2455 .recvmsg = bt_sock_recvmsg,
2456 .poll = bt_sock_poll,
2457 .ioctl = bt_sock_ioctl,
2458 .mmap = sock_no_mmap,
2459 .socketpair = sock_no_socketpair,
2460 .shutdown = l2cap_sock_shutdown,
2461 .setsockopt = l2cap_sock_setsockopt,
2462 .getsockopt = l2cap_sock_getsockopt
2465 static struct net_proto_family l2cap_sock_family_ops = {
2466 .family = PF_BLUETOOTH,
2467 .owner = THIS_MODULE,
2468 .create = l2cap_sock_create,
2471 static struct hci_proto l2cap_hci_proto = {
2472 .name = "L2CAP",
2473 .id = HCI_PROTO_L2CAP,
2474 .connect_ind = l2cap_connect_ind,
2475 .connect_cfm = l2cap_connect_cfm,
2476 .disconn_ind = l2cap_disconn_ind,
2477 .auth_cfm = l2cap_auth_cfm,
2478 .encrypt_cfm = l2cap_encrypt_cfm,
2479 .recv_acldata = l2cap_recv_acldata
2482 static int __init l2cap_init(void)
2484 int err;
2486 err = proto_register(&l2cap_proto, 0);
2487 if (err < 0)
2488 return err;
2490 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2491 if (err < 0) {
2492 BT_ERR("L2CAP socket registration failed");
2493 goto error;
2496 err = hci_register_proto(&l2cap_hci_proto);
2497 if (err < 0) {
2498 BT_ERR("L2CAP protocol registration failed");
2499 bt_sock_unregister(BTPROTO_L2CAP);
2500 goto error;
2503 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2504 BT_ERR("Failed to create L2CAP info file");
2506 BT_INFO("L2CAP ver %s", VERSION);
2507 BT_INFO("L2CAP socket layer initialized");
2509 return 0;
2511 error:
2512 proto_unregister(&l2cap_proto);
2513 return err;
2516 static void __exit l2cap_exit(void)
2518 class_remove_file(bt_class, &class_attr_l2cap);
2520 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2521 BT_ERR("L2CAP socket unregistration failed");
2523 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2524 BT_ERR("L2CAP protocol unregistration failed");
2526 proto_unregister(&l2cap_proto);
2529 void l2cap_load(void)
2531 /* Dummy function to trigger automatic L2CAP module loading by
2532 * other modules that use L2CAP sockets but don't use any other
2533 * symbols from it. */
2534 return;
2536 EXPORT_SYMBOL(l2cap_load);
2538 module_init(l2cap_init);
2539 module_exit(l2cap_exit);
2541 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2542 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2543 MODULE_VERSION(VERSION);
2544 MODULE_LICENSE("GPL");
2545 MODULE_ALIAS("bt-proto-0");