Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blobc12babcf0b3ccb05b473e95bf6af77be19dd218e
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/config.h>
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/major.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/interrupt.h>
40 #include <linux/socket.h>
41 #include <linux/skbuff.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/list.h>
45 #include <net/sock.h>
47 #include <asm/system.h>
48 #include <asm/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53 #include <net/bluetooth/l2cap.h>
55 #ifndef CONFIG_BT_L2CAP_DEBUG
56 #undef BT_DBG
57 #define BT_DBG(D...)
58 #endif
60 #define VERSION "2.7"
62 static struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static int l2cap_conn_del(struct hci_conn *conn, int err);
70 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
71 static void l2cap_chan_del(struct sock *sk, int err);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
85 BT_DBG("sock %p state %d", sk, sk->sk_state);
87 bh_lock_sock(sk);
88 __l2cap_sock_close(sk, ETIMEDOUT);
89 bh_unlock_sock(sk);
91 l2cap_sock_kill(sk);
92 sock_put(sk);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
98 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 static void l2cap_sock_clear_timer(struct sock *sk)
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104 sk_stop_timer(sk, &sk->sk_timer);
107 static void l2cap_sock_init_timer(struct sock *sk)
109 init_timer(&sk->sk_timer);
110 sk->sk_timer.function = l2cap_sock_timeout;
111 sk->sk_timer.data = (unsigned long)sk;
114 /* ---- L2CAP connections ---- */
115 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
117 struct l2cap_conn *conn;
119 if ((conn = hcon->l2cap_data))
120 return conn;
122 if (status)
123 return conn;
125 if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC)))
126 return NULL;
127 memset(conn, 0, sizeof(struct l2cap_conn));
129 hcon->l2cap_data = conn;
130 conn->hcon = hcon;
132 conn->mtu = hcon->hdev->acl_mtu;
133 conn->src = &hcon->hdev->bdaddr;
134 conn->dst = &hcon->dst;
136 spin_lock_init(&conn->lock);
137 rwlock_init(&conn->chan_list.lock);
139 BT_DBG("hcon %p conn %p", hcon, conn);
140 return conn;
143 static int l2cap_conn_del(struct hci_conn *hcon, int err)
145 struct l2cap_conn *conn;
146 struct sock *sk;
148 if (!(conn = hcon->l2cap_data))
149 return 0;
151 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
153 if (conn->rx_skb)
154 kfree_skb(conn->rx_skb);
156 /* Kill channels */
157 while ((sk = conn->chan_list.head)) {
158 bh_lock_sock(sk);
159 l2cap_chan_del(sk, err);
160 bh_unlock_sock(sk);
161 l2cap_sock_kill(sk);
164 hcon->l2cap_data = NULL;
165 kfree(conn);
166 return 0;
169 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
171 struct l2cap_chan_list *l = &conn->chan_list;
172 write_lock(&l->lock);
173 __l2cap_chan_add(conn, sk, parent);
174 write_unlock(&l->lock);
177 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
179 u8 id;
181 /* Get next available identificator.
182 * 1 - 128 are used by kernel.
183 * 129 - 199 are reserved.
184 * 200 - 254 are used by utilities like l2ping, etc.
187 spin_lock(&conn->lock);
189 if (++conn->tx_ident > 128)
190 conn->tx_ident = 1;
192 id = conn->tx_ident;
194 spin_unlock(&conn->lock);
196 return id;
199 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
201 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
203 BT_DBG("code 0x%2.2x", code);
205 if (!skb)
206 return -ENOMEM;
208 return hci_send_acl(conn->hcon, skb, 0);
211 /* ---- Socket interface ---- */
212 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
214 struct sock *sk;
215 struct hlist_node *node;
216 sk_for_each(sk, node, &l2cap_sk_list.head)
217 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
218 goto found;
219 sk = NULL;
220 found:
221 return sk;
224 /* Find socket with psm and source bdaddr.
225 * Returns closest match.
227 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
229 struct sock *sk = NULL, *sk1 = NULL;
230 struct hlist_node *node;
232 sk_for_each(sk, node, &l2cap_sk_list.head) {
233 if (state && sk->sk_state != state)
234 continue;
236 if (l2cap_pi(sk)->psm == psm) {
237 /* Exact match. */
238 if (!bacmp(&bt_sk(sk)->src, src))
239 break;
241 /* Closest match */
242 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
243 sk1 = sk;
246 return node ? sk : sk1;
249 /* Find socket with given address (psm, src).
250 * Returns locked socket */
251 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
253 struct sock *s;
254 read_lock(&l2cap_sk_list.lock);
255 s = __l2cap_get_sock_by_psm(state, psm, src);
256 if (s) bh_lock_sock(s);
257 read_unlock(&l2cap_sk_list.lock);
258 return s;
261 static void l2cap_sock_destruct(struct sock *sk)
263 BT_DBG("sk %p", sk);
265 skb_queue_purge(&sk->sk_receive_queue);
266 skb_queue_purge(&sk->sk_write_queue);
269 static void l2cap_sock_cleanup_listen(struct sock *parent)
271 struct sock *sk;
273 BT_DBG("parent %p", parent);
275 /* Close not yet accepted channels */
276 while ((sk = bt_accept_dequeue(parent, NULL)))
277 l2cap_sock_close(sk);
279 parent->sk_state = BT_CLOSED;
280 sock_set_flag(parent, SOCK_ZAPPED);
283 /* Kill socket (only if zapped and orphan)
284 * Must be called on unlocked socket.
286 static void l2cap_sock_kill(struct sock *sk)
288 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
289 return;
291 BT_DBG("sk %p state %d", sk, sk->sk_state);
293 /* Kill poor orphan */
294 bt_sock_unlink(&l2cap_sk_list, sk);
295 sock_set_flag(sk, SOCK_DEAD);
296 sock_put(sk);
299 static void __l2cap_sock_close(struct sock *sk, int reason)
301 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
303 switch (sk->sk_state) {
304 case BT_LISTEN:
305 l2cap_sock_cleanup_listen(sk);
306 break;
308 case BT_CONNECTED:
309 case BT_CONFIG:
310 case BT_CONNECT2:
311 if (sk->sk_type == SOCK_SEQPACKET) {
312 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
313 struct l2cap_disconn_req req;
315 sk->sk_state = BT_DISCONN;
316 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
318 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
319 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
320 l2cap_send_cmd(conn, l2cap_get_ident(conn),
321 L2CAP_DISCONN_REQ, sizeof(req), &req);
322 } else {
323 l2cap_chan_del(sk, reason);
325 break;
327 case BT_CONNECT:
328 case BT_DISCONN:
329 l2cap_chan_del(sk, reason);
330 break;
332 default:
333 sock_set_flag(sk, SOCK_ZAPPED);
334 break;
338 /* Must be called on unlocked socket. */
339 static void l2cap_sock_close(struct sock *sk)
341 l2cap_sock_clear_timer(sk);
342 lock_sock(sk);
343 __l2cap_sock_close(sk, ECONNRESET);
344 release_sock(sk);
345 l2cap_sock_kill(sk);
348 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
350 struct l2cap_pinfo *pi = l2cap_pi(sk);
352 BT_DBG("sk %p", sk);
354 if (parent) {
355 sk->sk_type = parent->sk_type;
356 pi->imtu = l2cap_pi(parent)->imtu;
357 pi->omtu = l2cap_pi(parent)->omtu;
358 pi->link_mode = l2cap_pi(parent)->link_mode;
359 } else {
360 pi->imtu = L2CAP_DEFAULT_MTU;
361 pi->omtu = 0;
362 pi->link_mode = 0;
365 /* Default config options */
366 pi->conf_mtu = L2CAP_DEFAULT_MTU;
367 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
370 static struct proto l2cap_proto = {
371 .name = "L2CAP",
372 .owner = THIS_MODULE,
373 .obj_size = sizeof(struct l2cap_pinfo)
376 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
378 struct sock *sk;
380 sk = sk_alloc(PF_BLUETOOTH, prio, &l2cap_proto, 1);
381 if (!sk)
382 return NULL;
384 sock_init_data(sock, sk);
385 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
387 sk->sk_destruct = l2cap_sock_destruct;
388 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
390 sock_reset_flag(sk, SOCK_ZAPPED);
392 sk->sk_protocol = proto;
393 sk->sk_state = BT_OPEN;
395 l2cap_sock_init_timer(sk);
397 bt_sock_link(&l2cap_sk_list, sk);
398 return sk;
401 static int l2cap_sock_create(struct socket *sock, int protocol)
403 struct sock *sk;
405 BT_DBG("sock %p", sock);
407 sock->state = SS_UNCONNECTED;
409 if (sock->type != SOCK_SEQPACKET &&
410 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
411 return -ESOCKTNOSUPPORT;
413 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
414 return -EPERM;
416 sock->ops = &l2cap_sock_ops;
418 sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
419 if (!sk)
420 return -ENOMEM;
422 l2cap_sock_init(sk, NULL);
423 return 0;
426 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
428 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
429 struct sock *sk = sock->sk;
430 int err = 0;
432 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
434 if (!addr || addr->sa_family != AF_BLUETOOTH)
435 return -EINVAL;
437 lock_sock(sk);
439 if (sk->sk_state != BT_OPEN) {
440 err = -EBADFD;
441 goto done;
444 write_lock_bh(&l2cap_sk_list.lock);
446 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
447 err = -EADDRINUSE;
448 } else {
449 /* Save source address */
450 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
451 l2cap_pi(sk)->psm = la->l2_psm;
452 l2cap_pi(sk)->sport = la->l2_psm;
453 sk->sk_state = BT_BOUND;
456 write_unlock_bh(&l2cap_sk_list.lock);
458 done:
459 release_sock(sk);
460 return err;
463 static int l2cap_do_connect(struct sock *sk)
465 bdaddr_t *src = &bt_sk(sk)->src;
466 bdaddr_t *dst = &bt_sk(sk)->dst;
467 struct l2cap_conn *conn;
468 struct hci_conn *hcon;
469 struct hci_dev *hdev;
470 int err = 0;
472 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
474 if (!(hdev = hci_get_route(dst, src)))
475 return -EHOSTUNREACH;
477 hci_dev_lock_bh(hdev);
479 err = -ENOMEM;
481 hcon = hci_connect(hdev, ACL_LINK, dst);
482 if (!hcon)
483 goto done;
485 conn = l2cap_conn_add(hcon, 0);
486 if (!conn) {
487 hci_conn_put(hcon);
488 goto done;
491 err = 0;
493 /* Update source addr of the socket */
494 bacpy(src, conn->src);
496 l2cap_chan_add(conn, sk, NULL);
498 sk->sk_state = BT_CONNECT;
499 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
501 if (hcon->state == BT_CONNECTED) {
502 if (sk->sk_type == SOCK_SEQPACKET) {
503 struct l2cap_conn_req req;
504 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
505 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
506 req.psm = l2cap_pi(sk)->psm;
507 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
508 L2CAP_CONN_REQ, sizeof(req), &req);
509 } else {
510 l2cap_sock_clear_timer(sk);
511 sk->sk_state = BT_CONNECTED;
515 done:
516 hci_dev_unlock_bh(hdev);
517 hci_dev_put(hdev);
518 return err;
521 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
523 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
524 struct sock *sk = sock->sk;
525 int err = 0;
527 lock_sock(sk);
529 BT_DBG("sk %p", sk);
531 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
532 err = -EINVAL;
533 goto done;
536 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
537 err = -EINVAL;
538 goto done;
541 switch(sk->sk_state) {
542 case BT_CONNECT:
543 case BT_CONNECT2:
544 case BT_CONFIG:
545 /* Already connecting */
546 goto wait;
548 case BT_CONNECTED:
549 /* Already connected */
550 goto done;
552 case BT_OPEN:
553 case BT_BOUND:
554 /* Can connect */
555 break;
557 default:
558 err = -EBADFD;
559 goto done;
562 /* Set destination address and psm */
563 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
564 l2cap_pi(sk)->psm = la->l2_psm;
566 if ((err = l2cap_do_connect(sk)))
567 goto done;
569 wait:
570 err = bt_sock_wait_state(sk, BT_CONNECTED,
571 sock_sndtimeo(sk, flags & O_NONBLOCK));
572 done:
573 release_sock(sk);
574 return err;
577 static int l2cap_sock_listen(struct socket *sock, int backlog)
579 struct sock *sk = sock->sk;
580 int err = 0;
582 BT_DBG("sk %p backlog %d", sk, backlog);
584 lock_sock(sk);
586 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
587 err = -EBADFD;
588 goto done;
591 if (!l2cap_pi(sk)->psm) {
592 bdaddr_t *src = &bt_sk(sk)->src;
593 u16 psm;
595 err = -EINVAL;
597 write_lock_bh(&l2cap_sk_list.lock);
599 for (psm = 0x1001; psm < 0x1100; psm += 2)
600 if (!__l2cap_get_sock_by_addr(psm, src)) {
601 l2cap_pi(sk)->psm = htobs(psm);
602 l2cap_pi(sk)->sport = htobs(psm);
603 err = 0;
604 break;
607 write_unlock_bh(&l2cap_sk_list.lock);
609 if (err < 0)
610 goto done;
613 sk->sk_max_ack_backlog = backlog;
614 sk->sk_ack_backlog = 0;
615 sk->sk_state = BT_LISTEN;
617 done:
618 release_sock(sk);
619 return err;
622 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
624 DECLARE_WAITQUEUE(wait, current);
625 struct sock *sk = sock->sk, *nsk;
626 long timeo;
627 int err = 0;
629 lock_sock(sk);
631 if (sk->sk_state != BT_LISTEN) {
632 err = -EBADFD;
633 goto done;
636 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
638 BT_DBG("sk %p timeo %ld", sk, timeo);
640 /* Wait for an incoming connection. (wake-one). */
641 add_wait_queue_exclusive(sk->sk_sleep, &wait);
642 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
643 set_current_state(TASK_INTERRUPTIBLE);
644 if (!timeo) {
645 err = -EAGAIN;
646 break;
649 release_sock(sk);
650 timeo = schedule_timeout(timeo);
651 lock_sock(sk);
653 if (sk->sk_state != BT_LISTEN) {
654 err = -EBADFD;
655 break;
658 if (signal_pending(current)) {
659 err = sock_intr_errno(timeo);
660 break;
663 set_current_state(TASK_RUNNING);
664 remove_wait_queue(sk->sk_sleep, &wait);
666 if (err)
667 goto done;
669 newsock->state = SS_CONNECTED;
671 BT_DBG("new socket %p", nsk);
673 done:
674 release_sock(sk);
675 return err;
678 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
680 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
681 struct sock *sk = sock->sk;
683 BT_DBG("sock %p, sk %p", sock, sk);
685 addr->sa_family = AF_BLUETOOTH;
686 *len = sizeof(struct sockaddr_l2);
688 if (peer)
689 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
690 else
691 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
693 la->l2_psm = l2cap_pi(sk)->psm;
694 return 0;
697 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
699 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
700 struct sk_buff *skb, **frag;
701 int err, hlen, count, sent=0;
702 struct l2cap_hdr *lh;
704 BT_DBG("sk %p len %d", sk, len);
706 /* First fragment (with L2CAP header) */
707 if (sk->sk_type == SOCK_DGRAM)
708 hlen = L2CAP_HDR_SIZE + 2;
709 else
710 hlen = L2CAP_HDR_SIZE;
712 count = min_t(unsigned int, (conn->mtu - hlen), len);
714 skb = bt_skb_send_alloc(sk, hlen + count,
715 msg->msg_flags & MSG_DONTWAIT, &err);
716 if (!skb)
717 return err;
719 /* Create L2CAP header */
720 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
721 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
722 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
724 if (sk->sk_type == SOCK_DGRAM)
725 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
727 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
728 err = -EFAULT;
729 goto fail;
732 sent += count;
733 len -= count;
735 /* Continuation fragments (no L2CAP header) */
736 frag = &skb_shinfo(skb)->frag_list;
737 while (len) {
738 count = min_t(unsigned int, conn->mtu, len);
740 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
741 if (!*frag)
742 goto fail;
744 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
745 err = -EFAULT;
746 goto fail;
749 sent += count;
750 len -= count;
752 frag = &(*frag)->next;
755 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
756 goto fail;
758 return sent;
760 fail:
761 kfree_skb(skb);
762 return err;
765 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
767 struct sock *sk = sock->sk;
768 int err = 0;
770 BT_DBG("sock %p, sk %p", sock, sk);
772 if (sk->sk_err)
773 return sock_error(sk);
775 if (msg->msg_flags & MSG_OOB)
776 return -EOPNOTSUPP;
778 /* Check outgoing MTU */
779 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
780 return -EINVAL;
782 lock_sock(sk);
784 if (sk->sk_state == BT_CONNECTED)
785 err = l2cap_do_send(sk, msg, len);
786 else
787 err = -ENOTCONN;
789 release_sock(sk);
790 return err;
793 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
795 struct sock *sk = sock->sk;
796 struct l2cap_options opts;
797 int err = 0, len;
798 u32 opt;
800 BT_DBG("sk %p", sk);
802 lock_sock(sk);
804 switch (optname) {
805 case L2CAP_OPTIONS:
806 len = min_t(unsigned int, sizeof(opts), optlen);
807 if (copy_from_user((char *) &opts, optval, len)) {
808 err = -EFAULT;
809 break;
811 l2cap_pi(sk)->imtu = opts.imtu;
812 l2cap_pi(sk)->omtu = opts.omtu;
813 break;
815 case L2CAP_LM:
816 if (get_user(opt, (u32 __user *) optval)) {
817 err = -EFAULT;
818 break;
821 l2cap_pi(sk)->link_mode = opt;
822 break;
824 default:
825 err = -ENOPROTOOPT;
826 break;
829 release_sock(sk);
830 return err;
833 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
835 struct sock *sk = sock->sk;
836 struct l2cap_options opts;
837 struct l2cap_conninfo cinfo;
838 int len, err = 0;
840 BT_DBG("sk %p", sk);
842 if (get_user(len, optlen))
843 return -EFAULT;
845 lock_sock(sk);
847 switch (optname) {
848 case L2CAP_OPTIONS:
849 opts.imtu = l2cap_pi(sk)->imtu;
850 opts.omtu = l2cap_pi(sk)->omtu;
851 opts.flush_to = l2cap_pi(sk)->flush_to;
852 opts.mode = 0x00;
854 len = min_t(unsigned int, len, sizeof(opts));
855 if (copy_to_user(optval, (char *) &opts, len))
856 err = -EFAULT;
858 break;
860 case L2CAP_LM:
861 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
862 err = -EFAULT;
863 break;
865 case L2CAP_CONNINFO:
866 if (sk->sk_state != BT_CONNECTED) {
867 err = -ENOTCONN;
868 break;
871 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
872 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
874 len = min_t(unsigned int, len, sizeof(cinfo));
875 if (copy_to_user(optval, (char *) &cinfo, len))
876 err = -EFAULT;
878 break;
880 default:
881 err = -ENOPROTOOPT;
882 break;
885 release_sock(sk);
886 return err;
889 static int l2cap_sock_shutdown(struct socket *sock, int how)
891 struct sock *sk = sock->sk;
892 int err = 0;
894 BT_DBG("sock %p, sk %p", sock, sk);
896 if (!sk)
897 return 0;
899 lock_sock(sk);
900 if (!sk->sk_shutdown) {
901 sk->sk_shutdown = SHUTDOWN_MASK;
902 l2cap_sock_clear_timer(sk);
903 __l2cap_sock_close(sk, 0);
905 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
906 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
908 release_sock(sk);
909 return err;
912 static int l2cap_sock_release(struct socket *sock)
914 struct sock *sk = sock->sk;
915 int err;
917 BT_DBG("sock %p, sk %p", sock, sk);
919 if (!sk)
920 return 0;
922 err = l2cap_sock_shutdown(sock, 2);
924 sock_orphan(sk);
925 l2cap_sock_kill(sk);
926 return err;
929 /* ---- L2CAP channels ---- */
930 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
932 struct sock *s;
933 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
934 if (l2cap_pi(s)->dcid == cid)
935 break;
937 return s;
940 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
942 struct sock *s;
943 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
944 if (l2cap_pi(s)->scid == cid)
945 break;
947 return s;
950 /* Find channel with given SCID.
951 * Returns locked socket */
952 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
954 struct sock *s;
955 read_lock(&l->lock);
956 s = __l2cap_get_chan_by_scid(l, cid);
957 if (s) bh_lock_sock(s);
958 read_unlock(&l->lock);
959 return s;
962 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
964 struct sock *s;
965 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
966 if (l2cap_pi(s)->ident == ident)
967 break;
969 return s;
972 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
974 struct sock *s;
975 read_lock(&l->lock);
976 s = __l2cap_get_chan_by_ident(l, ident);
977 if (s) bh_lock_sock(s);
978 read_unlock(&l->lock);
979 return s;
982 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
984 u16 cid = 0x0040;
986 for (; cid < 0xffff; cid++) {
987 if(!__l2cap_get_chan_by_scid(l, cid))
988 return cid;
991 return 0;
994 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
996 sock_hold(sk);
998 if (l->head)
999 l2cap_pi(l->head)->prev_c = sk;
1001 l2cap_pi(sk)->next_c = l->head;
1002 l2cap_pi(sk)->prev_c = NULL;
1003 l->head = sk;
1006 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
1008 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
1010 write_lock(&l->lock);
1011 if (sk == l->head)
1012 l->head = next;
1014 if (next)
1015 l2cap_pi(next)->prev_c = prev;
1016 if (prev)
1017 l2cap_pi(prev)->next_c = next;
1018 write_unlock(&l->lock);
1020 __sock_put(sk);
1023 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
1025 struct l2cap_chan_list *l = &conn->chan_list;
1027 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
1029 l2cap_pi(sk)->conn = conn;
1031 if (sk->sk_type == SOCK_SEQPACKET) {
1032 /* Alloc CID for connection-oriented socket */
1033 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
1034 } else if (sk->sk_type == SOCK_DGRAM) {
1035 /* Connectionless socket */
1036 l2cap_pi(sk)->scid = 0x0002;
1037 l2cap_pi(sk)->dcid = 0x0002;
1038 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1039 } else {
1040 /* Raw socket can send/recv signalling messages only */
1041 l2cap_pi(sk)->scid = 0x0001;
1042 l2cap_pi(sk)->dcid = 0x0001;
1043 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1046 __l2cap_chan_link(l, sk);
1048 if (parent)
1049 bt_accept_enqueue(parent, sk);
1052 /* Delete channel.
1053 * Must be called on the locked socket. */
1054 static void l2cap_chan_del(struct sock *sk, int err)
1056 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1057 struct sock *parent = bt_sk(sk)->parent;
1059 l2cap_sock_clear_timer(sk);
1061 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
1063 if (conn) {
1064 /* Unlink from channel list */
1065 l2cap_chan_unlink(&conn->chan_list, sk);
1066 l2cap_pi(sk)->conn = NULL;
1067 hci_conn_put(conn->hcon);
1070 sk->sk_state = BT_CLOSED;
1071 sock_set_flag(sk, SOCK_ZAPPED);
1073 if (err)
1074 sk->sk_err = err;
1076 if (parent) {
1077 bt_accept_unlink(sk);
1078 parent->sk_data_ready(parent, 0);
1079 } else
1080 sk->sk_state_change(sk);
1083 static void l2cap_conn_ready(struct l2cap_conn *conn)
1085 struct l2cap_chan_list *l = &conn->chan_list;
1086 struct sock *sk;
1088 BT_DBG("conn %p", conn);
1090 read_lock(&l->lock);
1092 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1093 bh_lock_sock(sk);
1095 if (sk->sk_type != SOCK_SEQPACKET) {
1096 l2cap_sock_clear_timer(sk);
1097 sk->sk_state = BT_CONNECTED;
1098 sk->sk_state_change(sk);
1099 } else if (sk->sk_state == BT_CONNECT) {
1100 struct l2cap_conn_req req;
1101 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1102 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1103 req.psm = l2cap_pi(sk)->psm;
1104 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1107 bh_unlock_sock(sk);
1110 read_unlock(&l->lock);
1113 /* Notify sockets that we cannot guaranty reliability anymore */
1114 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1116 struct l2cap_chan_list *l = &conn->chan_list;
1117 struct sock *sk;
1119 BT_DBG("conn %p", conn);
1121 read_lock(&l->lock);
1122 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1123 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1124 sk->sk_err = err;
1126 read_unlock(&l->lock);
1129 static void l2cap_chan_ready(struct sock *sk)
1131 struct sock *parent = bt_sk(sk)->parent;
1133 BT_DBG("sk %p, parent %p", sk, parent);
1135 l2cap_pi(sk)->conf_state = 0;
1136 l2cap_sock_clear_timer(sk);
1138 if (!parent) {
1139 /* Outgoing channel.
1140 * Wake up socket sleeping on connect.
1142 sk->sk_state = BT_CONNECTED;
1143 sk->sk_state_change(sk);
1144 } else {
1145 /* Incoming channel.
1146 * Wake up socket sleeping on accept.
1148 parent->sk_data_ready(parent, 0);
1152 /* Copy frame to all raw sockets on that connection */
1153 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1155 struct l2cap_chan_list *l = &conn->chan_list;
1156 struct sk_buff *nskb;
1157 struct sock * sk;
1159 BT_DBG("conn %p", conn);
1161 read_lock(&l->lock);
1162 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1163 if (sk->sk_type != SOCK_RAW)
1164 continue;
1166 /* Don't send frame to the socket it came from */
1167 if (skb->sk == sk)
1168 continue;
1170 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1171 continue;
1173 if (sock_queue_rcv_skb(sk, nskb))
1174 kfree_skb(nskb);
1176 read_unlock(&l->lock);
1179 /* ---- L2CAP signalling commands ---- */
1180 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1181 u8 code, u8 ident, u16 dlen, void *data)
1183 struct sk_buff *skb, **frag;
1184 struct l2cap_cmd_hdr *cmd;
1185 struct l2cap_hdr *lh;
1186 int len, count;
1188 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1190 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1191 count = min_t(unsigned int, conn->mtu, len);
1193 skb = bt_skb_alloc(count, GFP_ATOMIC);
1194 if (!skb)
1195 return NULL;
1197 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1198 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1199 lh->cid = __cpu_to_le16(0x0001);
1201 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1202 cmd->code = code;
1203 cmd->ident = ident;
1204 cmd->len = __cpu_to_le16(dlen);
1206 if (dlen) {
1207 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1208 memcpy(skb_put(skb, count), data, count);
1209 data += count;
1212 len -= skb->len;
1214 /* Continuation fragments (no L2CAP header) */
1215 frag = &skb_shinfo(skb)->frag_list;
1216 while (len) {
1217 count = min_t(unsigned int, conn->mtu, len);
1219 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1220 if (!*frag)
1221 goto fail;
1223 memcpy(skb_put(*frag, count), data, count);
1225 len -= count;
1226 data += count;
1228 frag = &(*frag)->next;
1231 return skb;
1233 fail:
1234 kfree_skb(skb);
1235 return NULL;
1238 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1240 struct l2cap_conf_opt *opt = *ptr;
1241 int len;
1243 len = L2CAP_CONF_OPT_SIZE + opt->len;
1244 *ptr += len;
1246 *type = opt->type;
1247 *olen = opt->len;
1249 switch (opt->len) {
1250 case 1:
1251 *val = *((u8 *) opt->val);
1252 break;
1254 case 2:
1255 *val = __le16_to_cpu(*((u16 *)opt->val));
1256 break;
1258 case 4:
1259 *val = __le32_to_cpu(*((u32 *)opt->val));
1260 break;
1262 default:
1263 *val = (unsigned long) opt->val;
1264 break;
1267 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1268 return len;
1271 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1273 int type, hint, olen;
1274 unsigned long val;
1275 void *ptr = data;
1277 BT_DBG("sk %p len %d", sk, len);
1279 while (len >= L2CAP_CONF_OPT_SIZE) {
1280 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1282 hint = type & 0x80;
1283 type &= 0x7f;
1285 switch (type) {
1286 case L2CAP_CONF_MTU:
1287 l2cap_pi(sk)->conf_mtu = val;
1288 break;
1290 case L2CAP_CONF_FLUSH_TO:
1291 l2cap_pi(sk)->flush_to = val;
1292 break;
1294 case L2CAP_CONF_QOS:
1295 break;
1297 default:
1298 if (hint)
1299 break;
1301 /* FIXME: Reject unknown option */
1302 break;
1307 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1309 struct l2cap_conf_opt *opt = *ptr;
1311 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1313 opt->type = type;
1314 opt->len = len;
1316 switch (len) {
1317 case 1:
1318 *((u8 *) opt->val) = val;
1319 break;
1321 case 2:
1322 *((u16 *) opt->val) = __cpu_to_le16(val);
1323 break;
1325 case 4:
1326 *((u32 *) opt->val) = __cpu_to_le32(val);
1327 break;
1329 default:
1330 memcpy(opt->val, (void *) val, len);
1331 break;
1334 *ptr += L2CAP_CONF_OPT_SIZE + len;
1337 static int l2cap_build_conf_req(struct sock *sk, void *data)
1339 struct l2cap_pinfo *pi = l2cap_pi(sk);
1340 struct l2cap_conf_req *req = data;
1341 void *ptr = req->data;
1343 BT_DBG("sk %p", sk);
1345 if (pi->imtu != L2CAP_DEFAULT_MTU)
1346 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1348 /* FIXME: Need actual value of the flush timeout */
1349 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1350 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1352 req->dcid = __cpu_to_le16(pi->dcid);
1353 req->flags = __cpu_to_le16(0);
1355 return ptr - data;
1358 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1360 struct l2cap_pinfo *pi = l2cap_pi(sk);
1361 int result = 0;
1363 /* Configure output options and let the other side know
1364 * which ones we don't like. */
1365 if (pi->conf_mtu < pi->omtu) {
1366 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1367 result = L2CAP_CONF_UNACCEPT;
1368 } else {
1369 pi->omtu = pi->conf_mtu;
1372 BT_DBG("sk %p result %d", sk, result);
1373 return result;
1376 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1378 struct l2cap_conf_rsp *rsp = data;
1379 void *ptr = rsp->data;
1380 u16 flags = 0;
1382 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1384 if (result)
1385 *result = l2cap_conf_output(sk, &ptr);
1386 else
1387 flags = 0x0001;
1389 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1390 rsp->result = __cpu_to_le16(result ? *result : 0);
1391 rsp->flags = __cpu_to_le16(flags);
1393 return ptr - data;
1396 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1398 struct l2cap_chan_list *list = &conn->chan_list;
1399 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1400 struct l2cap_conn_rsp rsp;
1401 struct sock *sk, *parent;
1402 int result = 0, status = 0;
1404 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1405 u16 psm = req->psm;
1407 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1409 /* Check if we have socket listening on psm */
1410 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1411 if (!parent) {
1412 result = L2CAP_CR_BAD_PSM;
1413 goto sendresp;
1416 result = L2CAP_CR_NO_MEM;
1418 /* Check for backlog size */
1419 if (sk_acceptq_is_full(parent)) {
1420 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1421 goto response;
1424 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1425 if (!sk)
1426 goto response;
1428 write_lock(&list->lock);
1430 /* Check if we already have channel with that dcid */
1431 if (__l2cap_get_chan_by_dcid(list, scid)) {
1432 write_unlock(&list->lock);
1433 sock_set_flag(sk, SOCK_ZAPPED);
1434 l2cap_sock_kill(sk);
1435 goto response;
1438 hci_conn_hold(conn->hcon);
1440 l2cap_sock_init(sk, parent);
1441 bacpy(&bt_sk(sk)->src, conn->src);
1442 bacpy(&bt_sk(sk)->dst, conn->dst);
1443 l2cap_pi(sk)->psm = psm;
1444 l2cap_pi(sk)->dcid = scid;
1446 __l2cap_chan_add(conn, sk, parent);
1447 dcid = l2cap_pi(sk)->scid;
1449 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1451 /* Service level security */
1452 result = L2CAP_CR_PEND;
1453 status = L2CAP_CS_AUTHEN_PEND;
1454 sk->sk_state = BT_CONNECT2;
1455 l2cap_pi(sk)->ident = cmd->ident;
1457 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1458 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1459 if (!hci_conn_encrypt(conn->hcon))
1460 goto done;
1461 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1462 if (!hci_conn_auth(conn->hcon))
1463 goto done;
1466 sk->sk_state = BT_CONFIG;
1467 result = status = 0;
1469 done:
1470 write_unlock(&list->lock);
1472 response:
1473 bh_unlock_sock(parent);
1475 sendresp:
1476 rsp.scid = __cpu_to_le16(scid);
1477 rsp.dcid = __cpu_to_le16(dcid);
1478 rsp.result = __cpu_to_le16(result);
1479 rsp.status = __cpu_to_le16(status);
1480 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1481 return 0;
1484 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1486 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1487 u16 scid, dcid, result, status;
1488 struct sock *sk;
1489 u8 req[128];
1491 scid = __le16_to_cpu(rsp->scid);
1492 dcid = __le16_to_cpu(rsp->dcid);
1493 result = __le16_to_cpu(rsp->result);
1494 status = __le16_to_cpu(rsp->status);
1496 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1498 if (scid) {
1499 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1500 return 0;
1501 } else {
1502 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1503 return 0;
1506 switch (result) {
1507 case L2CAP_CR_SUCCESS:
1508 sk->sk_state = BT_CONFIG;
1509 l2cap_pi(sk)->ident = 0;
1510 l2cap_pi(sk)->dcid = dcid;
1511 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1513 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1514 l2cap_build_conf_req(sk, req), req);
1515 break;
1517 case L2CAP_CR_PEND:
1518 break;
1520 default:
1521 l2cap_chan_del(sk, ECONNREFUSED);
1522 break;
1525 bh_unlock_sock(sk);
1526 return 0;
1529 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1531 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1532 u16 dcid, flags;
1533 u8 rsp[64];
1534 struct sock *sk;
1535 int result;
1537 dcid = __le16_to_cpu(req->dcid);
1538 flags = __le16_to_cpu(req->flags);
1540 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1542 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1543 return -ENOENT;
1545 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1547 if (flags & 0x0001) {
1548 /* Incomplete config. Send empty response. */
1549 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1550 l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1551 goto unlock;
1554 /* Complete config. */
1555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1556 l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1558 if (result)
1559 goto unlock;
1561 /* Output config done */
1562 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1564 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1565 sk->sk_state = BT_CONNECTED;
1566 l2cap_chan_ready(sk);
1567 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1568 u8 req[64];
1569 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1570 l2cap_build_conf_req(sk, req), req);
1573 unlock:
1574 bh_unlock_sock(sk);
1575 return 0;
1578 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1580 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1581 u16 scid, flags, result;
1582 struct sock *sk;
1584 scid = __le16_to_cpu(rsp->scid);
1585 flags = __le16_to_cpu(rsp->flags);
1586 result = __le16_to_cpu(rsp->result);
1588 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1590 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1591 return 0;
1593 switch (result) {
1594 case L2CAP_CONF_SUCCESS:
1595 break;
1597 case L2CAP_CONF_UNACCEPT:
1598 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1599 char req[128];
1600 /* It does not make sense to adjust L2CAP parameters
1601 * that are currently defined in the spec. We simply
1602 * resend config request that we sent earlier. It is
1603 * stupid, but it helps qualification testing which
1604 * expects at least some response from us. */
1605 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1606 l2cap_build_conf_req(sk, req), req);
1607 goto done;
1610 default:
1611 sk->sk_state = BT_DISCONN;
1612 sk->sk_err = ECONNRESET;
1613 l2cap_sock_set_timer(sk, HZ * 5);
1615 struct l2cap_disconn_req req;
1616 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1617 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1618 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1619 L2CAP_DISCONN_REQ, sizeof(req), &req);
1621 goto done;
1624 if (flags & 0x01)
1625 goto done;
1627 /* Input config done */
1628 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1630 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1631 sk->sk_state = BT_CONNECTED;
1632 l2cap_chan_ready(sk);
1635 done:
1636 bh_unlock_sock(sk);
1637 return 0;
1640 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1642 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1643 struct l2cap_disconn_rsp rsp;
1644 u16 dcid, scid;
1645 struct sock *sk;
1647 scid = __le16_to_cpu(req->scid);
1648 dcid = __le16_to_cpu(req->dcid);
1650 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1652 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1653 return 0;
1655 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1656 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1657 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1659 sk->sk_shutdown = SHUTDOWN_MASK;
1661 l2cap_chan_del(sk, ECONNRESET);
1662 bh_unlock_sock(sk);
1664 l2cap_sock_kill(sk);
1665 return 0;
1668 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1670 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1671 u16 dcid, scid;
1672 struct sock *sk;
1674 scid = __le16_to_cpu(rsp->scid);
1675 dcid = __le16_to_cpu(rsp->dcid);
1677 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1679 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1680 return 0;
1682 l2cap_chan_del(sk, 0);
1683 bh_unlock_sock(sk);
1685 l2cap_sock_kill(sk);
1686 return 0;
1689 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1691 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1692 struct l2cap_info_rsp rsp;
1693 u16 type;
1695 type = __le16_to_cpu(req->type);
1697 BT_DBG("type 0x%4.4x", type);
1699 rsp.type = __cpu_to_le16(type);
1700 rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP);
1701 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1703 return 0;
1706 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1708 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1709 u16 type, result;
1711 type = __le16_to_cpu(rsp->type);
1712 result = __le16_to_cpu(rsp->result);
1714 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1716 return 0;
1719 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1721 u8 *data = skb->data;
1722 int len = skb->len;
1723 struct l2cap_cmd_hdr cmd;
1724 int err = 0;
1726 l2cap_raw_recv(conn, skb);
1728 while (len >= L2CAP_CMD_HDR_SIZE) {
1729 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1730 data += L2CAP_CMD_HDR_SIZE;
1731 len -= L2CAP_CMD_HDR_SIZE;
1733 cmd.len = __le16_to_cpu(cmd.len);
1735 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1737 if (cmd.len > len || !cmd.ident) {
1738 BT_DBG("corrupted command");
1739 break;
1742 switch (cmd.code) {
1743 case L2CAP_COMMAND_REJ:
1744 /* FIXME: We should process this */
1745 break;
1747 case L2CAP_CONN_REQ:
1748 err = l2cap_connect_req(conn, &cmd, data);
1749 break;
1751 case L2CAP_CONN_RSP:
1752 err = l2cap_connect_rsp(conn, &cmd, data);
1753 break;
1755 case L2CAP_CONF_REQ:
1756 err = l2cap_config_req(conn, &cmd, data);
1757 break;
1759 case L2CAP_CONF_RSP:
1760 err = l2cap_config_rsp(conn, &cmd, data);
1761 break;
1763 case L2CAP_DISCONN_REQ:
1764 err = l2cap_disconnect_req(conn, &cmd, data);
1765 break;
1767 case L2CAP_DISCONN_RSP:
1768 err = l2cap_disconnect_rsp(conn, &cmd, data);
1769 break;
1771 case L2CAP_ECHO_REQ:
1772 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1773 break;
1775 case L2CAP_ECHO_RSP:
1776 break;
1778 case L2CAP_INFO_REQ:
1779 err = l2cap_information_req(conn, &cmd, data);
1780 break;
1782 case L2CAP_INFO_RSP:
1783 err = l2cap_information_rsp(conn, &cmd, data);
1784 break;
1786 default:
1787 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1788 err = -EINVAL;
1789 break;
1792 if (err) {
1793 struct l2cap_cmd_rej rej;
1794 BT_DBG("error %d", err);
1796 /* FIXME: Map err to a valid reason */
1797 rej.reason = __cpu_to_le16(0);
1798 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1801 data += cmd.len;
1802 len -= cmd.len;
1805 kfree_skb(skb);
1808 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1810 struct sock *sk;
1812 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1813 if (!sk) {
1814 BT_DBG("unknown cid 0x%4.4x", cid);
1815 goto drop;
1818 BT_DBG("sk %p, len %d", sk, skb->len);
1820 if (sk->sk_state != BT_CONNECTED)
1821 goto drop;
1823 if (l2cap_pi(sk)->imtu < skb->len)
1824 goto drop;
1826 /* If socket recv buffers overflows we drop data here
1827 * which is *bad* because L2CAP has to be reliable.
1828 * But we don't have any other choice. L2CAP doesn't
1829 * provide flow control mechanism. */
1831 if (!sock_queue_rcv_skb(sk, skb))
1832 goto done;
1834 drop:
1835 kfree_skb(skb);
1837 done:
1838 if (sk) bh_unlock_sock(sk);
1839 return 0;
1842 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1844 struct sock *sk;
1846 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1847 if (!sk)
1848 goto drop;
1850 BT_DBG("sk %p, len %d", sk, skb->len);
1852 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1853 goto drop;
1855 if (l2cap_pi(sk)->imtu < skb->len)
1856 goto drop;
1858 if (!sock_queue_rcv_skb(sk, skb))
1859 goto done;
1861 drop:
1862 kfree_skb(skb);
1864 done:
1865 if (sk) bh_unlock_sock(sk);
1866 return 0;
1869 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1871 struct l2cap_hdr *lh = (void *) skb->data;
1872 u16 cid, psm, len;
1874 skb_pull(skb, L2CAP_HDR_SIZE);
1875 cid = __le16_to_cpu(lh->cid);
1876 len = __le16_to_cpu(lh->len);
1878 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1880 switch (cid) {
1881 case 0x0001:
1882 l2cap_sig_channel(conn, skb);
1883 break;
1885 case 0x0002:
1886 psm = get_unaligned((u16 *) skb->data);
1887 skb_pull(skb, 2);
1888 l2cap_conless_channel(conn, psm, skb);
1889 break;
1891 default:
1892 l2cap_data_channel(conn, cid, skb);
1893 break;
1897 /* ---- L2CAP interface with lower layer (HCI) ---- */
1899 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1901 int exact = 0, lm1 = 0, lm2 = 0;
1902 register struct sock *sk;
1903 struct hlist_node *node;
1905 if (type != ACL_LINK)
1906 return 0;
1908 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1910 /* Find listening sockets and check their link_mode */
1911 read_lock(&l2cap_sk_list.lock);
1912 sk_for_each(sk, node, &l2cap_sk_list.head) {
1913 if (sk->sk_state != BT_LISTEN)
1914 continue;
1916 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1917 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1918 exact++;
1919 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1920 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1922 read_unlock(&l2cap_sk_list.lock);
1924 return exact ? lm1 : lm2;
1927 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1929 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1931 if (hcon->type != ACL_LINK)
1932 return 0;
1934 if (!status) {
1935 struct l2cap_conn *conn;
1937 conn = l2cap_conn_add(hcon, status);
1938 if (conn)
1939 l2cap_conn_ready(conn);
1940 } else
1941 l2cap_conn_del(hcon, bt_err(status));
1943 return 0;
1946 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1948 BT_DBG("hcon %p reason %d", hcon, reason);
1950 if (hcon->type != ACL_LINK)
1951 return 0;
1953 l2cap_conn_del(hcon, bt_err(reason));
1954 return 0;
1957 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1959 struct l2cap_chan_list *l;
1960 struct l2cap_conn *conn;
1961 struct l2cap_conn_rsp rsp;
1962 struct sock *sk;
1963 int result;
1965 if (!(conn = hcon->l2cap_data))
1966 return 0;
1967 l = &conn->chan_list;
1969 BT_DBG("conn %p", conn);
1971 read_lock(&l->lock);
1973 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1974 bh_lock_sock(sk);
1976 if (sk->sk_state != BT_CONNECT2 ||
1977 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1978 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1979 bh_unlock_sock(sk);
1980 continue;
1983 if (!status) {
1984 sk->sk_state = BT_CONFIG;
1985 result = 0;
1986 } else {
1987 sk->sk_state = BT_DISCONN;
1988 l2cap_sock_set_timer(sk, HZ/10);
1989 result = L2CAP_CR_SEC_BLOCK;
1992 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1993 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1994 rsp.result = __cpu_to_le16(result);
1995 rsp.status = __cpu_to_le16(0);
1996 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
1997 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1999 bh_unlock_sock(sk);
2002 read_unlock(&l->lock);
2003 return 0;
2006 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2008 struct l2cap_chan_list *l;
2009 struct l2cap_conn *conn;
2010 struct l2cap_conn_rsp rsp;
2011 struct sock *sk;
2012 int result;
2014 if (!(conn = hcon->l2cap_data))
2015 return 0;
2016 l = &conn->chan_list;
2018 BT_DBG("conn %p", conn);
2020 read_lock(&l->lock);
2022 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2023 bh_lock_sock(sk);
2025 if (sk->sk_state != BT_CONNECT2) {
2026 bh_unlock_sock(sk);
2027 continue;
2030 if (!status) {
2031 sk->sk_state = BT_CONFIG;
2032 result = 0;
2033 } else {
2034 sk->sk_state = BT_DISCONN;
2035 l2cap_sock_set_timer(sk, HZ/10);
2036 result = L2CAP_CR_SEC_BLOCK;
2039 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
2040 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
2041 rsp.result = __cpu_to_le16(result);
2042 rsp.status = __cpu_to_le16(0);
2043 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2044 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2046 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2047 hci_conn_change_link_key(hcon);
2049 bh_unlock_sock(sk);
2052 read_unlock(&l->lock);
2053 return 0;
2056 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2058 struct l2cap_conn *conn = hcon->l2cap_data;
2060 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2061 goto drop;
2063 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2065 if (flags & ACL_START) {
2066 struct l2cap_hdr *hdr;
2067 int len;
2069 if (conn->rx_len) {
2070 BT_ERR("Unexpected start frame (len %d)", skb->len);
2071 kfree_skb(conn->rx_skb);
2072 conn->rx_skb = NULL;
2073 conn->rx_len = 0;
2074 l2cap_conn_unreliable(conn, ECOMM);
2077 if (skb->len < 2) {
2078 BT_ERR("Frame is too short (len %d)", skb->len);
2079 l2cap_conn_unreliable(conn, ECOMM);
2080 goto drop;
2083 hdr = (struct l2cap_hdr *) skb->data;
2084 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2086 if (len == skb->len) {
2087 /* Complete frame received */
2088 l2cap_recv_frame(conn, skb);
2089 return 0;
2092 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2094 if (skb->len > len) {
2095 BT_ERR("Frame is too long (len %d, expected len %d)",
2096 skb->len, len);
2097 l2cap_conn_unreliable(conn, ECOMM);
2098 goto drop;
2101 /* Allocate skb for the complete frame (with header) */
2102 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2103 goto drop;
2105 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2106 conn->rx_len = len - skb->len;
2107 } else {
2108 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2110 if (!conn->rx_len) {
2111 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2112 l2cap_conn_unreliable(conn, ECOMM);
2113 goto drop;
2116 if (skb->len > conn->rx_len) {
2117 BT_ERR("Fragment is too long (len %d, expected %d)",
2118 skb->len, conn->rx_len);
2119 kfree_skb(conn->rx_skb);
2120 conn->rx_skb = NULL;
2121 conn->rx_len = 0;
2122 l2cap_conn_unreliable(conn, ECOMM);
2123 goto drop;
2126 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2127 conn->rx_len -= skb->len;
2129 if (!conn->rx_len) {
2130 /* Complete frame received */
2131 l2cap_recv_frame(conn, conn->rx_skb);
2132 conn->rx_skb = NULL;
2136 drop:
2137 kfree_skb(skb);
2138 return 0;
2141 /* ---- Proc fs support ---- */
2142 #ifdef CONFIG_PROC_FS
2143 static void *l2cap_seq_start(struct seq_file *seq, loff_t *pos)
2145 struct sock *sk;
2146 struct hlist_node *node;
2147 loff_t l = *pos;
2149 read_lock_bh(&l2cap_sk_list.lock);
2151 sk_for_each(sk, node, &l2cap_sk_list.head)
2152 if (!l--)
2153 goto found;
2154 sk = NULL;
2155 found:
2156 return sk;
2159 static void *l2cap_seq_next(struct seq_file *seq, void *e, loff_t *pos)
2161 (*pos)++;
2162 return sk_next(e);
2165 static void l2cap_seq_stop(struct seq_file *seq, void *e)
2167 read_unlock_bh(&l2cap_sk_list.lock);
2170 static int l2cap_seq_show(struct seq_file *seq, void *e)
2172 struct sock *sk = e;
2173 struct l2cap_pinfo *pi = l2cap_pi(sk);
2175 seq_printf(seq, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2176 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2177 sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
2178 pi->omtu, pi->link_mode);
2179 return 0;
2182 static struct seq_operations l2cap_seq_ops = {
2183 .start = l2cap_seq_start,
2184 .next = l2cap_seq_next,
2185 .stop = l2cap_seq_stop,
2186 .show = l2cap_seq_show
2189 static int l2cap_seq_open(struct inode *inode, struct file *file)
2191 return seq_open(file, &l2cap_seq_ops);
2194 static struct file_operations l2cap_seq_fops = {
2195 .owner = THIS_MODULE,
2196 .open = l2cap_seq_open,
2197 .read = seq_read,
2198 .llseek = seq_lseek,
2199 .release = seq_release,
2202 static int __init l2cap_proc_init(void)
2204 struct proc_dir_entry *p = create_proc_entry("l2cap", S_IRUGO, proc_bt);
2205 if (!p)
2206 return -ENOMEM;
2207 p->owner = THIS_MODULE;
2208 p->proc_fops = &l2cap_seq_fops;
2209 return 0;
2212 static void __exit l2cap_proc_cleanup(void)
2214 remove_proc_entry("l2cap", proc_bt);
2217 #else /* CONFIG_PROC_FS */
2219 static int __init l2cap_proc_init(void)
2221 return 0;
2224 static void __exit l2cap_proc_cleanup(void)
2226 return;
2228 #endif /* CONFIG_PROC_FS */
2230 static struct proto_ops l2cap_sock_ops = {
2231 .family = PF_BLUETOOTH,
2232 .owner = THIS_MODULE,
2233 .release = l2cap_sock_release,
2234 .bind = l2cap_sock_bind,
2235 .connect = l2cap_sock_connect,
2236 .listen = l2cap_sock_listen,
2237 .accept = l2cap_sock_accept,
2238 .getname = l2cap_sock_getname,
2239 .sendmsg = l2cap_sock_sendmsg,
2240 .recvmsg = bt_sock_recvmsg,
2241 .poll = bt_sock_poll,
2242 .mmap = sock_no_mmap,
2243 .socketpair = sock_no_socketpair,
2244 .ioctl = sock_no_ioctl,
2245 .shutdown = l2cap_sock_shutdown,
2246 .setsockopt = l2cap_sock_setsockopt,
2247 .getsockopt = l2cap_sock_getsockopt
2250 static struct net_proto_family l2cap_sock_family_ops = {
2251 .family = PF_BLUETOOTH,
2252 .owner = THIS_MODULE,
2253 .create = l2cap_sock_create,
2256 static struct hci_proto l2cap_hci_proto = {
2257 .name = "L2CAP",
2258 .id = HCI_PROTO_L2CAP,
2259 .connect_ind = l2cap_connect_ind,
2260 .connect_cfm = l2cap_connect_cfm,
2261 .disconn_ind = l2cap_disconn_ind,
2262 .auth_cfm = l2cap_auth_cfm,
2263 .encrypt_cfm = l2cap_encrypt_cfm,
2264 .recv_acldata = l2cap_recv_acldata
2267 static int __init l2cap_init(void)
2269 int err;
2271 err = proto_register(&l2cap_proto, 0);
2272 if (err < 0)
2273 return err;
2275 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2276 if (err < 0) {
2277 BT_ERR("L2CAP socket registration failed");
2278 goto error;
2281 err = hci_register_proto(&l2cap_hci_proto);
2282 if (err < 0) {
2283 BT_ERR("L2CAP protocol registration failed");
2284 bt_sock_unregister(BTPROTO_L2CAP);
2285 goto error;
2288 l2cap_proc_init();
2290 BT_INFO("L2CAP ver %s", VERSION);
2291 BT_INFO("L2CAP socket layer initialized");
2293 return 0;
2295 error:
2296 proto_unregister(&l2cap_proto);
2297 return err;
2300 static void __exit l2cap_exit(void)
2302 l2cap_proc_cleanup();
2304 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2305 BT_ERR("L2CAP socket unregistration failed");
2307 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2308 BT_ERR("L2CAP protocol unregistration failed");
2310 proto_unregister(&l2cap_proto);
2313 void l2cap_load(void)
2315 /* Dummy function to trigger automatic L2CAP module loading by
2316 * other modules that use L2CAP sockets but don't use any other
2317 * symbols from it. */
2318 return;
2320 EXPORT_SYMBOL(l2cap_load);
2322 module_init(l2cap_init);
2323 module_exit(l2cap_exit);
2325 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2326 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2327 MODULE_VERSION(VERSION);
2328 MODULE_LICENSE("GPL");
2329 MODULE_ALIAS("bt-proto-0");