V4L/DVB (6238): bw-qcam: use data_reverse instead of manually poking the control...
[linux-2.6/mini2440.git] / net / bluetooth / l2cap.c
blobc4e4ce4ebb2b1115c6c92b07079fa6be7647c589
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
58 #define VERSION "2.8"
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = RW_LOCK_UNLOCKED
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
78 BT_DBG("sock %p state %d", sk, sk->sk_state);
80 bh_lock_sock(sk);
81 __l2cap_sock_close(sk, ETIMEDOUT);
82 bh_unlock_sock(sk);
84 l2cap_sock_kill(sk);
85 sock_put(sk);
88 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
90 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
91 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
94 static void l2cap_sock_clear_timer(struct sock *sk)
96 BT_DBG("sock %p state %d", sk, sk->sk_state);
97 sk_stop_timer(sk, &sk->sk_timer);
100 static void l2cap_sock_init_timer(struct sock *sk)
102 init_timer(&sk->sk_timer);
103 sk->sk_timer.function = l2cap_sock_timeout;
104 sk->sk_timer.data = (unsigned long)sk;
107 /* ---- L2CAP channels ---- */
108 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
110 struct sock *s;
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
113 break;
115 return s;
118 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
120 struct sock *s;
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
123 break;
125 return s;
128 /* Find channel with given SCID.
129 * Returns locked socket */
130 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 struct sock *s;
133 read_lock(&l->lock);
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
137 return s;
140 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
142 struct sock *s;
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
145 break;
147 return s;
150 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
152 struct sock *s;
153 read_lock(&l->lock);
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
157 return s;
160 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
162 u16 cid = 0x0040;
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
166 return cid;
169 return 0;
172 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
174 sock_hold(sk);
176 if (l->head)
177 l2cap_pi(l->head)->prev_c = sk;
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
181 l->head = sk;
184 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
188 write_lock_bh(&l->lock);
189 if (sk == l->head)
190 l->head = next;
192 if (next)
193 l2cap_pi(next)->prev_c = prev;
194 if (prev)
195 l2cap_pi(prev)->next_c = next;
196 write_unlock_bh(&l->lock);
198 __sock_put(sk);
201 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
203 struct l2cap_chan_list *l = &conn->chan_list;
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
207 l2cap_pi(sk)->conn = conn;
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
217 } else {
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
224 __l2cap_chan_link(l, sk);
226 if (parent)
227 bt_accept_enqueue(parent, sk);
230 /* Delete channel.
231 * Must be called on the locked socket. */
232 static void l2cap_chan_del(struct sock *sk, int err)
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
237 l2cap_sock_clear_timer(sk);
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
241 if (conn) {
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
251 if (err)
252 sk->sk_err = err;
254 if (parent) {
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
257 } else
258 sk->sk_state_change(sk);
261 /* ---- L2CAP connections ---- */
262 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
264 struct l2cap_conn *conn = hcon->l2cap_data;
266 if (conn || status)
267 return conn;
269 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
270 if (!conn)
271 return NULL;
273 hcon->l2cap_data = conn;
274 conn->hcon = hcon;
276 BT_DBG("hcon %p conn %p", hcon, conn);
278 conn->mtu = hcon->hdev->acl_mtu;
279 conn->src = &hcon->hdev->bdaddr;
280 conn->dst = &hcon->dst;
282 spin_lock_init(&conn->lock);
283 rwlock_init(&conn->chan_list.lock);
285 return conn;
288 static void l2cap_conn_del(struct hci_conn *hcon, int err)
290 struct l2cap_conn *conn = hcon->l2cap_data;
291 struct sock *sk;
293 if (!conn)
294 return;
296 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
298 if (conn->rx_skb)
299 kfree_skb(conn->rx_skb);
301 /* Kill channels */
302 while ((sk = conn->chan_list.head)) {
303 bh_lock_sock(sk);
304 l2cap_chan_del(sk, err);
305 bh_unlock_sock(sk);
306 l2cap_sock_kill(sk);
309 hcon->l2cap_data = NULL;
310 kfree(conn);
313 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
315 struct l2cap_chan_list *l = &conn->chan_list;
316 write_lock_bh(&l->lock);
317 __l2cap_chan_add(conn, sk, parent);
318 write_unlock_bh(&l->lock);
321 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
323 u8 id;
325 /* Get next available identificator.
326 * 1 - 128 are used by kernel.
327 * 129 - 199 are reserved.
328 * 200 - 254 are used by utilities like l2ping, etc.
331 spin_lock_bh(&conn->lock);
333 if (++conn->tx_ident > 128)
334 conn->tx_ident = 1;
336 id = conn->tx_ident;
338 spin_unlock_bh(&conn->lock);
340 return id;
343 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
345 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
347 BT_DBG("code 0x%2.2x", code);
349 if (!skb)
350 return -ENOMEM;
352 return hci_send_acl(conn->hcon, skb, 0);
355 /* ---- Socket interface ---- */
356 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
358 struct sock *sk;
359 struct hlist_node *node;
360 sk_for_each(sk, node, &l2cap_sk_list.head)
361 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
362 goto found;
363 sk = NULL;
364 found:
365 return sk;
368 /* Find socket with psm and source bdaddr.
369 * Returns closest match.
371 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
373 struct sock *sk = NULL, *sk1 = NULL;
374 struct hlist_node *node;
376 sk_for_each(sk, node, &l2cap_sk_list.head) {
377 if (state && sk->sk_state != state)
378 continue;
380 if (l2cap_pi(sk)->psm == psm) {
381 /* Exact match. */
382 if (!bacmp(&bt_sk(sk)->src, src))
383 break;
385 /* Closest match */
386 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
387 sk1 = sk;
390 return node ? sk : sk1;
393 /* Find socket with given address (psm, src).
394 * Returns locked socket */
395 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
397 struct sock *s;
398 read_lock(&l2cap_sk_list.lock);
399 s = __l2cap_get_sock_by_psm(state, psm, src);
400 if (s) bh_lock_sock(s);
401 read_unlock(&l2cap_sk_list.lock);
402 return s;
405 static void l2cap_sock_destruct(struct sock *sk)
407 BT_DBG("sk %p", sk);
409 skb_queue_purge(&sk->sk_receive_queue);
410 skb_queue_purge(&sk->sk_write_queue);
413 static void l2cap_sock_cleanup_listen(struct sock *parent)
415 struct sock *sk;
417 BT_DBG("parent %p", parent);
419 /* Close not yet accepted channels */
420 while ((sk = bt_accept_dequeue(parent, NULL)))
421 l2cap_sock_close(sk);
423 parent->sk_state = BT_CLOSED;
424 sock_set_flag(parent, SOCK_ZAPPED);
427 /* Kill socket (only if zapped and orphan)
428 * Must be called on unlocked socket.
430 static void l2cap_sock_kill(struct sock *sk)
432 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
433 return;
435 BT_DBG("sk %p state %d", sk, sk->sk_state);
437 /* Kill poor orphan */
438 bt_sock_unlink(&l2cap_sk_list, sk);
439 sock_set_flag(sk, SOCK_DEAD);
440 sock_put(sk);
443 static void __l2cap_sock_close(struct sock *sk, int reason)
445 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
447 switch (sk->sk_state) {
448 case BT_LISTEN:
449 l2cap_sock_cleanup_listen(sk);
450 break;
452 case BT_CONNECTED:
453 case BT_CONFIG:
454 case BT_CONNECT2:
455 if (sk->sk_type == SOCK_SEQPACKET) {
456 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
457 struct l2cap_disconn_req req;
459 sk->sk_state = BT_DISCONN;
460 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
462 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
463 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
464 l2cap_send_cmd(conn, l2cap_get_ident(conn),
465 L2CAP_DISCONN_REQ, sizeof(req), &req);
466 } else {
467 l2cap_chan_del(sk, reason);
469 break;
471 case BT_CONNECT:
472 case BT_DISCONN:
473 l2cap_chan_del(sk, reason);
474 break;
476 default:
477 sock_set_flag(sk, SOCK_ZAPPED);
478 break;
482 /* Must be called on unlocked socket. */
483 static void l2cap_sock_close(struct sock *sk)
485 l2cap_sock_clear_timer(sk);
486 lock_sock(sk);
487 __l2cap_sock_close(sk, ECONNRESET);
488 release_sock(sk);
489 l2cap_sock_kill(sk);
492 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
494 struct l2cap_pinfo *pi = l2cap_pi(sk);
496 BT_DBG("sk %p", sk);
498 if (parent) {
499 sk->sk_type = parent->sk_type;
500 pi->imtu = l2cap_pi(parent)->imtu;
501 pi->omtu = l2cap_pi(parent)->omtu;
502 pi->link_mode = l2cap_pi(parent)->link_mode;
503 } else {
504 pi->imtu = L2CAP_DEFAULT_MTU;
505 pi->omtu = 0;
506 pi->link_mode = 0;
509 /* Default config options */
510 pi->conf_len = 0;
511 pi->conf_mtu = L2CAP_DEFAULT_MTU;
512 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
515 static struct proto l2cap_proto = {
516 .name = "L2CAP",
517 .owner = THIS_MODULE,
518 .obj_size = sizeof(struct l2cap_pinfo)
521 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio)
523 struct sock *sk;
525 sk = sk_alloc(PF_BLUETOOTH, prio, &l2cap_proto, 1);
526 if (!sk)
527 return NULL;
529 sock_init_data(sock, sk);
530 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
532 sk->sk_destruct = l2cap_sock_destruct;
533 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
535 sock_reset_flag(sk, SOCK_ZAPPED);
537 sk->sk_protocol = proto;
538 sk->sk_state = BT_OPEN;
540 l2cap_sock_init_timer(sk);
542 bt_sock_link(&l2cap_sk_list, sk);
543 return sk;
546 static int l2cap_sock_create(struct socket *sock, int protocol)
548 struct sock *sk;
550 BT_DBG("sock %p", sock);
552 sock->state = SS_UNCONNECTED;
554 if (sock->type != SOCK_SEQPACKET &&
555 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
556 return -ESOCKTNOSUPPORT;
558 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
559 return -EPERM;
561 sock->ops = &l2cap_sock_ops;
563 sk = l2cap_sock_alloc(sock, protocol, GFP_ATOMIC);
564 if (!sk)
565 return -ENOMEM;
567 l2cap_sock_init(sk, NULL);
568 return 0;
571 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
573 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
574 struct sock *sk = sock->sk;
575 int err = 0;
577 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
579 if (!addr || addr->sa_family != AF_BLUETOOTH)
580 return -EINVAL;
582 lock_sock(sk);
584 if (sk->sk_state != BT_OPEN) {
585 err = -EBADFD;
586 goto done;
589 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
590 !capable(CAP_NET_BIND_SERVICE)) {
591 err = -EACCES;
592 goto done;
595 write_lock_bh(&l2cap_sk_list.lock);
597 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
598 err = -EADDRINUSE;
599 } else {
600 /* Save source address */
601 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
602 l2cap_pi(sk)->psm = la->l2_psm;
603 l2cap_pi(sk)->sport = la->l2_psm;
604 sk->sk_state = BT_BOUND;
607 write_unlock_bh(&l2cap_sk_list.lock);
609 done:
610 release_sock(sk);
611 return err;
614 static int l2cap_do_connect(struct sock *sk)
616 bdaddr_t *src = &bt_sk(sk)->src;
617 bdaddr_t *dst = &bt_sk(sk)->dst;
618 struct l2cap_conn *conn;
619 struct hci_conn *hcon;
620 struct hci_dev *hdev;
621 int err = 0;
623 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
625 if (!(hdev = hci_get_route(dst, src)))
626 return -EHOSTUNREACH;
628 hci_dev_lock_bh(hdev);
630 err = -ENOMEM;
632 hcon = hci_connect(hdev, ACL_LINK, dst);
633 if (!hcon)
634 goto done;
636 conn = l2cap_conn_add(hcon, 0);
637 if (!conn) {
638 hci_conn_put(hcon);
639 goto done;
642 err = 0;
644 /* Update source addr of the socket */
645 bacpy(src, conn->src);
647 l2cap_chan_add(conn, sk, NULL);
649 sk->sk_state = BT_CONNECT;
650 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
652 if (hcon->state == BT_CONNECTED) {
653 if (sk->sk_type == SOCK_SEQPACKET) {
654 struct l2cap_conn_req req;
655 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
656 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
657 req.psm = l2cap_pi(sk)->psm;
658 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
659 L2CAP_CONN_REQ, sizeof(req), &req);
660 } else {
661 l2cap_sock_clear_timer(sk);
662 sk->sk_state = BT_CONNECTED;
666 done:
667 hci_dev_unlock_bh(hdev);
668 hci_dev_put(hdev);
669 return err;
672 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
674 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
675 struct sock *sk = sock->sk;
676 int err = 0;
678 lock_sock(sk);
680 BT_DBG("sk %p", sk);
682 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
683 err = -EINVAL;
684 goto done;
687 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
688 err = -EINVAL;
689 goto done;
692 switch(sk->sk_state) {
693 case BT_CONNECT:
694 case BT_CONNECT2:
695 case BT_CONFIG:
696 /* Already connecting */
697 goto wait;
699 case BT_CONNECTED:
700 /* Already connected */
701 goto done;
703 case BT_OPEN:
704 case BT_BOUND:
705 /* Can connect */
706 break;
708 default:
709 err = -EBADFD;
710 goto done;
713 /* Set destination address and psm */
714 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
715 l2cap_pi(sk)->psm = la->l2_psm;
717 if ((err = l2cap_do_connect(sk)))
718 goto done;
720 wait:
721 err = bt_sock_wait_state(sk, BT_CONNECTED,
722 sock_sndtimeo(sk, flags & O_NONBLOCK));
723 done:
724 release_sock(sk);
725 return err;
728 static int l2cap_sock_listen(struct socket *sock, int backlog)
730 struct sock *sk = sock->sk;
731 int err = 0;
733 BT_DBG("sk %p backlog %d", sk, backlog);
735 lock_sock(sk);
737 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
738 err = -EBADFD;
739 goto done;
742 if (!l2cap_pi(sk)->psm) {
743 bdaddr_t *src = &bt_sk(sk)->src;
744 u16 psm;
746 err = -EINVAL;
748 write_lock_bh(&l2cap_sk_list.lock);
750 for (psm = 0x1001; psm < 0x1100; psm += 2)
751 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
752 l2cap_pi(sk)->psm = htobs(psm);
753 l2cap_pi(sk)->sport = htobs(psm);
754 err = 0;
755 break;
758 write_unlock_bh(&l2cap_sk_list.lock);
760 if (err < 0)
761 goto done;
764 sk->sk_max_ack_backlog = backlog;
765 sk->sk_ack_backlog = 0;
766 sk->sk_state = BT_LISTEN;
768 done:
769 release_sock(sk);
770 return err;
773 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
775 DECLARE_WAITQUEUE(wait, current);
776 struct sock *sk = sock->sk, *nsk;
777 long timeo;
778 int err = 0;
780 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
782 if (sk->sk_state != BT_LISTEN) {
783 err = -EBADFD;
784 goto done;
787 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
789 BT_DBG("sk %p timeo %ld", sk, timeo);
791 /* Wait for an incoming connection. (wake-one). */
792 add_wait_queue_exclusive(sk->sk_sleep, &wait);
793 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
794 set_current_state(TASK_INTERRUPTIBLE);
795 if (!timeo) {
796 err = -EAGAIN;
797 break;
800 release_sock(sk);
801 timeo = schedule_timeout(timeo);
802 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
804 if (sk->sk_state != BT_LISTEN) {
805 err = -EBADFD;
806 break;
809 if (signal_pending(current)) {
810 err = sock_intr_errno(timeo);
811 break;
814 set_current_state(TASK_RUNNING);
815 remove_wait_queue(sk->sk_sleep, &wait);
817 if (err)
818 goto done;
820 newsock->state = SS_CONNECTED;
822 BT_DBG("new socket %p", nsk);
824 done:
825 release_sock(sk);
826 return err;
829 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
831 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
832 struct sock *sk = sock->sk;
834 BT_DBG("sock %p, sk %p", sock, sk);
836 addr->sa_family = AF_BLUETOOTH;
837 *len = sizeof(struct sockaddr_l2);
839 if (peer)
840 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
841 else
842 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
844 la->l2_psm = l2cap_pi(sk)->psm;
845 return 0;
848 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
850 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
851 struct sk_buff *skb, **frag;
852 int err, hlen, count, sent=0;
853 struct l2cap_hdr *lh;
855 BT_DBG("sk %p len %d", sk, len);
857 /* First fragment (with L2CAP header) */
858 if (sk->sk_type == SOCK_DGRAM)
859 hlen = L2CAP_HDR_SIZE + 2;
860 else
861 hlen = L2CAP_HDR_SIZE;
863 count = min_t(unsigned int, (conn->mtu - hlen), len);
865 skb = bt_skb_send_alloc(sk, hlen + count,
866 msg->msg_flags & MSG_DONTWAIT, &err);
867 if (!skb)
868 return err;
870 /* Create L2CAP header */
871 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
872 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
873 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
875 if (sk->sk_type == SOCK_DGRAM)
876 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
878 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
879 err = -EFAULT;
880 goto fail;
883 sent += count;
884 len -= count;
886 /* Continuation fragments (no L2CAP header) */
887 frag = &skb_shinfo(skb)->frag_list;
888 while (len) {
889 count = min_t(unsigned int, conn->mtu, len);
891 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
892 if (!*frag)
893 goto fail;
895 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
896 err = -EFAULT;
897 goto fail;
900 sent += count;
901 len -= count;
903 frag = &(*frag)->next;
906 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
907 goto fail;
909 return sent;
911 fail:
912 kfree_skb(skb);
913 return err;
916 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
918 struct sock *sk = sock->sk;
919 int err = 0;
921 BT_DBG("sock %p, sk %p", sock, sk);
923 err = sock_error(sk);
924 if (err)
925 return err;
927 if (msg->msg_flags & MSG_OOB)
928 return -EOPNOTSUPP;
930 /* Check outgoing MTU */
931 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
932 return -EINVAL;
934 lock_sock(sk);
936 if (sk->sk_state == BT_CONNECTED)
937 err = l2cap_do_send(sk, msg, len);
938 else
939 err = -ENOTCONN;
941 release_sock(sk);
942 return err;
945 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
947 struct sock *sk = sock->sk;
948 struct l2cap_options opts;
949 int err = 0, len;
950 u32 opt;
952 BT_DBG("sk %p", sk);
954 lock_sock(sk);
956 switch (optname) {
957 case L2CAP_OPTIONS:
958 opts.imtu = l2cap_pi(sk)->imtu;
959 opts.omtu = l2cap_pi(sk)->omtu;
960 opts.flush_to = l2cap_pi(sk)->flush_to;
961 opts.mode = 0x00;
963 len = min_t(unsigned int, sizeof(opts), optlen);
964 if (copy_from_user((char *) &opts, optval, len)) {
965 err = -EFAULT;
966 break;
969 l2cap_pi(sk)->imtu = opts.imtu;
970 l2cap_pi(sk)->omtu = opts.omtu;
971 break;
973 case L2CAP_LM:
974 if (get_user(opt, (u32 __user *) optval)) {
975 err = -EFAULT;
976 break;
979 l2cap_pi(sk)->link_mode = opt;
980 break;
982 default:
983 err = -ENOPROTOOPT;
984 break;
987 release_sock(sk);
988 return err;
991 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
993 struct sock *sk = sock->sk;
994 struct l2cap_options opts;
995 struct l2cap_conninfo cinfo;
996 int len, err = 0;
998 BT_DBG("sk %p", sk);
1000 if (get_user(len, optlen))
1001 return -EFAULT;
1003 lock_sock(sk);
1005 switch (optname) {
1006 case L2CAP_OPTIONS:
1007 opts.imtu = l2cap_pi(sk)->imtu;
1008 opts.omtu = l2cap_pi(sk)->omtu;
1009 opts.flush_to = l2cap_pi(sk)->flush_to;
1010 opts.mode = 0x00;
1012 len = min_t(unsigned int, len, sizeof(opts));
1013 if (copy_to_user(optval, (char *) &opts, len))
1014 err = -EFAULT;
1016 break;
1018 case L2CAP_LM:
1019 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1020 err = -EFAULT;
1021 break;
1023 case L2CAP_CONNINFO:
1024 if (sk->sk_state != BT_CONNECTED) {
1025 err = -ENOTCONN;
1026 break;
1029 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1030 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1032 len = min_t(unsigned int, len, sizeof(cinfo));
1033 if (copy_to_user(optval, (char *) &cinfo, len))
1034 err = -EFAULT;
1036 break;
1038 default:
1039 err = -ENOPROTOOPT;
1040 break;
1043 release_sock(sk);
1044 return err;
1047 static int l2cap_sock_shutdown(struct socket *sock, int how)
1049 struct sock *sk = sock->sk;
1050 int err = 0;
1052 BT_DBG("sock %p, sk %p", sock, sk);
1054 if (!sk)
1055 return 0;
1057 lock_sock(sk);
1058 if (!sk->sk_shutdown) {
1059 sk->sk_shutdown = SHUTDOWN_MASK;
1060 l2cap_sock_clear_timer(sk);
1061 __l2cap_sock_close(sk, 0);
1063 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1064 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1066 release_sock(sk);
1067 return err;
1070 static int l2cap_sock_release(struct socket *sock)
1072 struct sock *sk = sock->sk;
1073 int err;
1075 BT_DBG("sock %p, sk %p", sock, sk);
1077 if (!sk)
1078 return 0;
1080 err = l2cap_sock_shutdown(sock, 2);
1082 sock_orphan(sk);
1083 l2cap_sock_kill(sk);
1084 return err;
1087 static void l2cap_conn_ready(struct l2cap_conn *conn)
1089 struct l2cap_chan_list *l = &conn->chan_list;
1090 struct sock *sk;
1092 BT_DBG("conn %p", conn);
1094 read_lock(&l->lock);
1096 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1097 bh_lock_sock(sk);
1099 if (sk->sk_type != SOCK_SEQPACKET) {
1100 l2cap_sock_clear_timer(sk);
1101 sk->sk_state = BT_CONNECTED;
1102 sk->sk_state_change(sk);
1103 } else if (sk->sk_state == BT_CONNECT) {
1104 struct l2cap_conn_req req;
1105 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1106 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1107 req.psm = l2cap_pi(sk)->psm;
1108 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1111 bh_unlock_sock(sk);
1114 read_unlock(&l->lock);
1117 /* Notify sockets that we cannot guaranty reliability anymore */
1118 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1120 struct l2cap_chan_list *l = &conn->chan_list;
1121 struct sock *sk;
1123 BT_DBG("conn %p", conn);
1125 read_lock(&l->lock);
1126 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1127 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1128 sk->sk_err = err;
1130 read_unlock(&l->lock);
1133 static void l2cap_chan_ready(struct sock *sk)
1135 struct sock *parent = bt_sk(sk)->parent;
1137 BT_DBG("sk %p, parent %p", sk, parent);
1139 l2cap_pi(sk)->conf_state = 0;
1140 l2cap_sock_clear_timer(sk);
1142 if (!parent) {
1143 /* Outgoing channel.
1144 * Wake up socket sleeping on connect.
1146 sk->sk_state = BT_CONNECTED;
1147 sk->sk_state_change(sk);
1148 } else {
1149 /* Incoming channel.
1150 * Wake up socket sleeping on accept.
1152 parent->sk_data_ready(parent, 0);
1156 /* Copy frame to all raw sockets on that connection */
1157 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1159 struct l2cap_chan_list *l = &conn->chan_list;
1160 struct sk_buff *nskb;
1161 struct sock * sk;
1163 BT_DBG("conn %p", conn);
1165 read_lock(&l->lock);
1166 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1167 if (sk->sk_type != SOCK_RAW)
1168 continue;
1170 /* Don't send frame to the socket it came from */
1171 if (skb->sk == sk)
1172 continue;
1174 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1175 continue;
1177 if (sock_queue_rcv_skb(sk, nskb))
1178 kfree_skb(nskb);
1180 read_unlock(&l->lock);
1183 /* ---- L2CAP signalling commands ---- */
1184 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1185 u8 code, u8 ident, u16 dlen, void *data)
1187 struct sk_buff *skb, **frag;
1188 struct l2cap_cmd_hdr *cmd;
1189 struct l2cap_hdr *lh;
1190 int len, count;
1192 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1194 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1195 count = min_t(unsigned int, conn->mtu, len);
1197 skb = bt_skb_alloc(count, GFP_ATOMIC);
1198 if (!skb)
1199 return NULL;
1201 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1202 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1203 lh->cid = cpu_to_le16(0x0001);
1205 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1206 cmd->code = code;
1207 cmd->ident = ident;
1208 cmd->len = cpu_to_le16(dlen);
1210 if (dlen) {
1211 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1212 memcpy(skb_put(skb, count), data, count);
1213 data += count;
1216 len -= skb->len;
1218 /* Continuation fragments (no L2CAP header) */
1219 frag = &skb_shinfo(skb)->frag_list;
1220 while (len) {
1221 count = min_t(unsigned int, conn->mtu, len);
1223 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1224 if (!*frag)
1225 goto fail;
1227 memcpy(skb_put(*frag, count), data, count);
1229 len -= count;
1230 data += count;
1232 frag = &(*frag)->next;
1235 return skb;
1237 fail:
1238 kfree_skb(skb);
1239 return NULL;
1242 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1244 struct l2cap_conf_opt *opt = *ptr;
1245 int len;
1247 len = L2CAP_CONF_OPT_SIZE + opt->len;
1248 *ptr += len;
1250 *type = opt->type;
1251 *olen = opt->len;
1253 switch (opt->len) {
1254 case 1:
1255 *val = *((u8 *) opt->val);
1256 break;
1258 case 2:
1259 *val = __le16_to_cpu(*((__le16 *)opt->val));
1260 break;
1262 case 4:
1263 *val = __le32_to_cpu(*((__le32 *)opt->val));
1264 break;
1266 default:
1267 *val = (unsigned long) opt->val;
1268 break;
1271 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1272 return len;
1275 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1277 struct l2cap_conf_opt *opt = *ptr;
1279 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1281 opt->type = type;
1282 opt->len = len;
1284 switch (len) {
1285 case 1:
1286 *((u8 *) opt->val) = val;
1287 break;
1289 case 2:
1290 *((__le16 *) opt->val) = cpu_to_le16(val);
1291 break;
1293 case 4:
1294 *((__le32 *) opt->val) = cpu_to_le32(val);
1295 break;
1297 default:
1298 memcpy(opt->val, (void *) val, len);
1299 break;
1302 *ptr += L2CAP_CONF_OPT_SIZE + len;
1305 static int l2cap_build_conf_req(struct sock *sk, void *data)
1307 struct l2cap_pinfo *pi = l2cap_pi(sk);
1308 struct l2cap_conf_req *req = data;
1309 void *ptr = req->data;
1311 BT_DBG("sk %p", sk);
1313 if (pi->imtu != L2CAP_DEFAULT_MTU)
1314 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1316 /* FIXME: Need actual value of the flush timeout */
1317 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1318 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1320 req->dcid = cpu_to_le16(pi->dcid);
1321 req->flags = cpu_to_le16(0);
1323 return ptr - data;
1326 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1328 struct l2cap_pinfo *pi = l2cap_pi(sk);
1329 struct l2cap_conf_rsp *rsp = data;
1330 void *ptr = rsp->data;
1331 void *req = pi->conf_req;
1332 int len = pi->conf_len;
1333 int type, hint, olen;
1334 unsigned long val;
1335 u16 result = L2CAP_CONF_SUCCESS;
1337 BT_DBG("sk %p", sk);
1339 while (len >= L2CAP_CONF_OPT_SIZE) {
1340 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1342 hint = type & 0x80;
1343 type &= 0x7f;
1345 switch (type) {
1346 case L2CAP_CONF_MTU:
1347 pi->conf_mtu = val;
1348 break;
1350 case L2CAP_CONF_FLUSH_TO:
1351 pi->flush_to = val;
1352 break;
1354 case L2CAP_CONF_QOS:
1355 break;
1357 default:
1358 if (hint)
1359 break;
1361 result = L2CAP_CONF_UNKNOWN;
1362 *((u8 *) ptr++) = type;
1363 break;
1367 if (result == L2CAP_CONF_SUCCESS) {
1368 /* Configure output options and let the other side know
1369 * which ones we don't like. */
1371 if (pi->conf_mtu < pi->omtu)
1372 result = L2CAP_CONF_UNACCEPT;
1373 else
1374 pi->omtu = pi->conf_mtu;
1376 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1379 rsp->scid = cpu_to_le16(pi->dcid);
1380 rsp->result = cpu_to_le16(result);
1381 rsp->flags = cpu_to_le16(0x0000);
1383 return ptr - data;
1386 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1388 struct l2cap_conf_rsp *rsp = data;
1389 void *ptr = rsp->data;
1391 BT_DBG("sk %p", sk);
1393 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1394 rsp->result = cpu_to_le16(result);
1395 rsp->flags = cpu_to_le16(flags);
1397 return ptr - data;
1400 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1402 struct l2cap_chan_list *list = &conn->chan_list;
1403 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1404 struct l2cap_conn_rsp rsp;
1405 struct sock *sk, *parent;
1406 int result = 0, status = 0;
1408 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1409 __le16 psm = req->psm;
1411 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1413 /* Check if we have socket listening on psm */
1414 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1415 if (!parent) {
1416 result = L2CAP_CR_BAD_PSM;
1417 goto sendresp;
1420 result = L2CAP_CR_NO_MEM;
1422 /* Check for backlog size */
1423 if (sk_acceptq_is_full(parent)) {
1424 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1425 goto response;
1428 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1429 if (!sk)
1430 goto response;
1432 write_lock_bh(&list->lock);
1434 /* Check if we already have channel with that dcid */
1435 if (__l2cap_get_chan_by_dcid(list, scid)) {
1436 write_unlock_bh(&list->lock);
1437 sock_set_flag(sk, SOCK_ZAPPED);
1438 l2cap_sock_kill(sk);
1439 goto response;
1442 hci_conn_hold(conn->hcon);
1444 l2cap_sock_init(sk, parent);
1445 bacpy(&bt_sk(sk)->src, conn->src);
1446 bacpy(&bt_sk(sk)->dst, conn->dst);
1447 l2cap_pi(sk)->psm = psm;
1448 l2cap_pi(sk)->dcid = scid;
1450 __l2cap_chan_add(conn, sk, parent);
1451 dcid = l2cap_pi(sk)->scid;
1453 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1455 /* Service level security */
1456 result = L2CAP_CR_PEND;
1457 status = L2CAP_CS_AUTHEN_PEND;
1458 sk->sk_state = BT_CONNECT2;
1459 l2cap_pi(sk)->ident = cmd->ident;
1461 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1462 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1463 if (!hci_conn_encrypt(conn->hcon))
1464 goto done;
1465 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1466 if (!hci_conn_auth(conn->hcon))
1467 goto done;
1470 sk->sk_state = BT_CONFIG;
1471 result = status = 0;
1473 done:
1474 write_unlock_bh(&list->lock);
1476 response:
1477 bh_unlock_sock(parent);
1479 sendresp:
1480 rsp.scid = cpu_to_le16(scid);
1481 rsp.dcid = cpu_to_le16(dcid);
1482 rsp.result = cpu_to_le16(result);
1483 rsp.status = cpu_to_le16(status);
1484 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1485 return 0;
1488 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1490 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1491 u16 scid, dcid, result, status;
1492 struct sock *sk;
1493 u8 req[128];
1495 scid = __le16_to_cpu(rsp->scid);
1496 dcid = __le16_to_cpu(rsp->dcid);
1497 result = __le16_to_cpu(rsp->result);
1498 status = __le16_to_cpu(rsp->status);
1500 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1502 if (scid) {
1503 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1504 return 0;
1505 } else {
1506 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1507 return 0;
1510 switch (result) {
1511 case L2CAP_CR_SUCCESS:
1512 sk->sk_state = BT_CONFIG;
1513 l2cap_pi(sk)->ident = 0;
1514 l2cap_pi(sk)->dcid = dcid;
1515 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1517 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1518 l2cap_build_conf_req(sk, req), req);
1519 break;
1521 case L2CAP_CR_PEND:
1522 break;
1524 default:
1525 l2cap_chan_del(sk, ECONNREFUSED);
1526 break;
1529 bh_unlock_sock(sk);
1530 return 0;
1533 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1535 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1536 u16 dcid, flags;
1537 u8 rsp[64];
1538 struct sock *sk;
1539 int len;
1541 dcid = __le16_to_cpu(req->dcid);
1542 flags = __le16_to_cpu(req->flags);
1544 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1546 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1547 return -ENOENT;
1549 if (sk->sk_state == BT_DISCONN)
1550 goto unlock;
1552 /* Reject if config buffer is too small. */
1553 len = cmd_len - sizeof(*req);
1554 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1556 l2cap_build_conf_rsp(sk, rsp,
1557 L2CAP_CONF_REJECT, flags), rsp);
1558 goto unlock;
1561 /* Store config. */
1562 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1563 l2cap_pi(sk)->conf_len += len;
1565 if (flags & 0x0001) {
1566 /* Incomplete config. Send empty response. */
1567 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1568 l2cap_build_conf_rsp(sk, rsp,
1569 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1570 goto unlock;
1573 /* Complete config. */
1574 len = l2cap_parse_conf_req(sk, rsp);
1575 if (len < 0)
1576 goto unlock;
1578 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1580 /* Output config done. */
1581 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1583 /* Reset config buffer. */
1584 l2cap_pi(sk)->conf_len = 0;
1586 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1587 sk->sk_state = BT_CONNECTED;
1588 l2cap_chan_ready(sk);
1589 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1590 u8 req[64];
1591 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1592 l2cap_build_conf_req(sk, req), req);
1595 unlock:
1596 bh_unlock_sock(sk);
1597 return 0;
1600 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1602 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1603 u16 scid, flags, result;
1604 struct sock *sk;
1606 scid = __le16_to_cpu(rsp->scid);
1607 flags = __le16_to_cpu(rsp->flags);
1608 result = __le16_to_cpu(rsp->result);
1610 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1612 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1613 return 0;
1615 switch (result) {
1616 case L2CAP_CONF_SUCCESS:
1617 break;
1619 case L2CAP_CONF_UNACCEPT:
1620 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1621 char req[128];
1622 /* It does not make sense to adjust L2CAP parameters
1623 * that are currently defined in the spec. We simply
1624 * resend config request that we sent earlier. It is
1625 * stupid, but it helps qualification testing which
1626 * expects at least some response from us. */
1627 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1628 l2cap_build_conf_req(sk, req), req);
1629 goto done;
1632 default:
1633 sk->sk_state = BT_DISCONN;
1634 sk->sk_err = ECONNRESET;
1635 l2cap_sock_set_timer(sk, HZ * 5);
1637 struct l2cap_disconn_req req;
1638 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1639 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1640 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1641 L2CAP_DISCONN_REQ, sizeof(req), &req);
1643 goto done;
1646 if (flags & 0x01)
1647 goto done;
1649 /* Input config done */
1650 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1652 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1653 sk->sk_state = BT_CONNECTED;
1654 l2cap_chan_ready(sk);
1657 done:
1658 bh_unlock_sock(sk);
1659 return 0;
1662 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1664 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1665 struct l2cap_disconn_rsp rsp;
1666 u16 dcid, scid;
1667 struct sock *sk;
1669 scid = __le16_to_cpu(req->scid);
1670 dcid = __le16_to_cpu(req->dcid);
1672 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1674 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1675 return 0;
1677 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1678 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1679 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1681 sk->sk_shutdown = SHUTDOWN_MASK;
1683 l2cap_chan_del(sk, ECONNRESET);
1684 bh_unlock_sock(sk);
1686 l2cap_sock_kill(sk);
1687 return 0;
1690 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1692 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1693 u16 dcid, scid;
1694 struct sock *sk;
1696 scid = __le16_to_cpu(rsp->scid);
1697 dcid = __le16_to_cpu(rsp->dcid);
1699 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1701 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1702 return 0;
1704 l2cap_chan_del(sk, 0);
1705 bh_unlock_sock(sk);
1707 l2cap_sock_kill(sk);
1708 return 0;
1711 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1713 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1714 struct l2cap_info_rsp rsp;
1715 u16 type;
1717 type = __le16_to_cpu(req->type);
1719 BT_DBG("type 0x%4.4x", type);
1721 rsp.type = cpu_to_le16(type);
1722 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1723 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1725 return 0;
1728 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1730 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1731 u16 type, result;
1733 type = __le16_to_cpu(rsp->type);
1734 result = __le16_to_cpu(rsp->result);
1736 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1738 return 0;
1741 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1743 u8 *data = skb->data;
1744 int len = skb->len;
1745 struct l2cap_cmd_hdr cmd;
1746 int err = 0;
1748 l2cap_raw_recv(conn, skb);
1750 while (len >= L2CAP_CMD_HDR_SIZE) {
1751 u16 cmd_len;
1752 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1753 data += L2CAP_CMD_HDR_SIZE;
1754 len -= L2CAP_CMD_HDR_SIZE;
1756 cmd_len = le16_to_cpu(cmd.len);
1758 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1760 if (cmd_len > len || !cmd.ident) {
1761 BT_DBG("corrupted command");
1762 break;
1765 switch (cmd.code) {
1766 case L2CAP_COMMAND_REJ:
1767 /* FIXME: We should process this */
1768 break;
1770 case L2CAP_CONN_REQ:
1771 err = l2cap_connect_req(conn, &cmd, data);
1772 break;
1774 case L2CAP_CONN_RSP:
1775 err = l2cap_connect_rsp(conn, &cmd, data);
1776 break;
1778 case L2CAP_CONF_REQ:
1779 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1780 break;
1782 case L2CAP_CONF_RSP:
1783 err = l2cap_config_rsp(conn, &cmd, data);
1784 break;
1786 case L2CAP_DISCONN_REQ:
1787 err = l2cap_disconnect_req(conn, &cmd, data);
1788 break;
1790 case L2CAP_DISCONN_RSP:
1791 err = l2cap_disconnect_rsp(conn, &cmd, data);
1792 break;
1794 case L2CAP_ECHO_REQ:
1795 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1796 break;
1798 case L2CAP_ECHO_RSP:
1799 break;
1801 case L2CAP_INFO_REQ:
1802 err = l2cap_information_req(conn, &cmd, data);
1803 break;
1805 case L2CAP_INFO_RSP:
1806 err = l2cap_information_rsp(conn, &cmd, data);
1807 break;
1809 default:
1810 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1811 err = -EINVAL;
1812 break;
1815 if (err) {
1816 struct l2cap_cmd_rej rej;
1817 BT_DBG("error %d", err);
1819 /* FIXME: Map err to a valid reason */
1820 rej.reason = cpu_to_le16(0);
1821 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1824 data += cmd_len;
1825 len -= cmd_len;
1828 kfree_skb(skb);
1831 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1833 struct sock *sk;
1835 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1836 if (!sk) {
1837 BT_DBG("unknown cid 0x%4.4x", cid);
1838 goto drop;
1841 BT_DBG("sk %p, len %d", sk, skb->len);
1843 if (sk->sk_state != BT_CONNECTED)
1844 goto drop;
1846 if (l2cap_pi(sk)->imtu < skb->len)
1847 goto drop;
1849 /* If socket recv buffers overflows we drop data here
1850 * which is *bad* because L2CAP has to be reliable.
1851 * But we don't have any other choice. L2CAP doesn't
1852 * provide flow control mechanism. */
1854 if (!sock_queue_rcv_skb(sk, skb))
1855 goto done;
1857 drop:
1858 kfree_skb(skb);
1860 done:
1861 if (sk)
1862 bh_unlock_sock(sk);
1864 return 0;
1867 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
1869 struct sock *sk;
1871 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1872 if (!sk)
1873 goto drop;
1875 BT_DBG("sk %p, len %d", sk, skb->len);
1877 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1878 goto drop;
1880 if (l2cap_pi(sk)->imtu < skb->len)
1881 goto drop;
1883 if (!sock_queue_rcv_skb(sk, skb))
1884 goto done;
1886 drop:
1887 kfree_skb(skb);
1889 done:
1890 if (sk) bh_unlock_sock(sk);
1891 return 0;
1894 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1896 struct l2cap_hdr *lh = (void *) skb->data;
1897 u16 cid, len;
1898 __le16 psm;
1900 skb_pull(skb, L2CAP_HDR_SIZE);
1901 cid = __le16_to_cpu(lh->cid);
1902 len = __le16_to_cpu(lh->len);
1904 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1906 switch (cid) {
1907 case 0x0001:
1908 l2cap_sig_channel(conn, skb);
1909 break;
1911 case 0x0002:
1912 psm = get_unaligned((__le16 *) skb->data);
1913 skb_pull(skb, 2);
1914 l2cap_conless_channel(conn, psm, skb);
1915 break;
1917 default:
1918 l2cap_data_channel(conn, cid, skb);
1919 break;
1923 /* ---- L2CAP interface with lower layer (HCI) ---- */
1925 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1927 int exact = 0, lm1 = 0, lm2 = 0;
1928 register struct sock *sk;
1929 struct hlist_node *node;
1931 if (type != ACL_LINK)
1932 return 0;
1934 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1936 /* Find listening sockets and check their link_mode */
1937 read_lock(&l2cap_sk_list.lock);
1938 sk_for_each(sk, node, &l2cap_sk_list.head) {
1939 if (sk->sk_state != BT_LISTEN)
1940 continue;
1942 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1943 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1944 exact++;
1945 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1946 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1948 read_unlock(&l2cap_sk_list.lock);
1950 return exact ? lm1 : lm2;
1953 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1955 struct l2cap_conn *conn;
1957 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1959 if (hcon->type != ACL_LINK)
1960 return 0;
1962 if (!status) {
1963 conn = l2cap_conn_add(hcon, status);
1964 if (conn)
1965 l2cap_conn_ready(conn);
1966 } else
1967 l2cap_conn_del(hcon, bt_err(status));
1969 return 0;
1972 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1974 BT_DBG("hcon %p reason %d", hcon, reason);
1976 if (hcon->type != ACL_LINK)
1977 return 0;
1979 l2cap_conn_del(hcon, bt_err(reason));
1981 return 0;
1984 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1986 struct l2cap_chan_list *l;
1987 struct l2cap_conn *conn = conn = hcon->l2cap_data;
1988 struct l2cap_conn_rsp rsp;
1989 struct sock *sk;
1990 int result;
1992 if (!conn)
1993 return 0;
1995 l = &conn->chan_list;
1997 BT_DBG("conn %p", conn);
1999 read_lock(&l->lock);
2001 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2002 bh_lock_sock(sk);
2004 if (sk->sk_state != BT_CONNECT2 ||
2005 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
2006 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
2007 bh_unlock_sock(sk);
2008 continue;
2011 if (!status) {
2012 sk->sk_state = BT_CONFIG;
2013 result = 0;
2014 } else {
2015 sk->sk_state = BT_DISCONN;
2016 l2cap_sock_set_timer(sk, HZ/10);
2017 result = L2CAP_CR_SEC_BLOCK;
2020 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2021 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2022 rsp.result = cpu_to_le16(result);
2023 rsp.status = cpu_to_le16(0);
2024 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2025 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2027 bh_unlock_sock(sk);
2030 read_unlock(&l->lock);
2031 return 0;
2034 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2036 struct l2cap_chan_list *l;
2037 struct l2cap_conn *conn = hcon->l2cap_data;
2038 struct l2cap_conn_rsp rsp;
2039 struct sock *sk;
2040 int result;
2042 if (!conn)
2043 return 0;
2045 l = &conn->chan_list;
2047 BT_DBG("conn %p", conn);
2049 read_lock(&l->lock);
2051 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2052 bh_lock_sock(sk);
2054 if (sk->sk_state != BT_CONNECT2) {
2055 bh_unlock_sock(sk);
2056 continue;
2059 if (!status) {
2060 sk->sk_state = BT_CONFIG;
2061 result = 0;
2062 } else {
2063 sk->sk_state = BT_DISCONN;
2064 l2cap_sock_set_timer(sk, HZ/10);
2065 result = L2CAP_CR_SEC_BLOCK;
2068 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2069 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2070 rsp.result = cpu_to_le16(result);
2071 rsp.status = cpu_to_le16(0);
2072 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2073 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2075 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2076 hci_conn_change_link_key(hcon);
2078 bh_unlock_sock(sk);
2081 read_unlock(&l->lock);
2082 return 0;
2085 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2087 struct l2cap_conn *conn = hcon->l2cap_data;
2089 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2090 goto drop;
2092 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2094 if (flags & ACL_START) {
2095 struct l2cap_hdr *hdr;
2096 int len;
2098 if (conn->rx_len) {
2099 BT_ERR("Unexpected start frame (len %d)", skb->len);
2100 kfree_skb(conn->rx_skb);
2101 conn->rx_skb = NULL;
2102 conn->rx_len = 0;
2103 l2cap_conn_unreliable(conn, ECOMM);
2106 if (skb->len < 2) {
2107 BT_ERR("Frame is too short (len %d)", skb->len);
2108 l2cap_conn_unreliable(conn, ECOMM);
2109 goto drop;
2112 hdr = (struct l2cap_hdr *) skb->data;
2113 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2115 if (len == skb->len) {
2116 /* Complete frame received */
2117 l2cap_recv_frame(conn, skb);
2118 return 0;
2121 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2123 if (skb->len > len) {
2124 BT_ERR("Frame is too long (len %d, expected len %d)",
2125 skb->len, len);
2126 l2cap_conn_unreliable(conn, ECOMM);
2127 goto drop;
2130 /* Allocate skb for the complete frame (with header) */
2131 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2132 goto drop;
2134 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2135 skb->len);
2136 conn->rx_len = len - skb->len;
2137 } else {
2138 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2140 if (!conn->rx_len) {
2141 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2142 l2cap_conn_unreliable(conn, ECOMM);
2143 goto drop;
2146 if (skb->len > conn->rx_len) {
2147 BT_ERR("Fragment is too long (len %d, expected %d)",
2148 skb->len, conn->rx_len);
2149 kfree_skb(conn->rx_skb);
2150 conn->rx_skb = NULL;
2151 conn->rx_len = 0;
2152 l2cap_conn_unreliable(conn, ECOMM);
2153 goto drop;
2156 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2157 skb->len);
2158 conn->rx_len -= skb->len;
2160 if (!conn->rx_len) {
2161 /* Complete frame received */
2162 l2cap_recv_frame(conn, conn->rx_skb);
2163 conn->rx_skb = NULL;
2167 drop:
2168 kfree_skb(skb);
2169 return 0;
2172 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2174 struct sock *sk;
2175 struct hlist_node *node;
2176 char *str = buf;
2178 read_lock_bh(&l2cap_sk_list.lock);
2180 sk_for_each(sk, node, &l2cap_sk_list.head) {
2181 struct l2cap_pinfo *pi = l2cap_pi(sk);
2183 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2184 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2185 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2186 pi->imtu, pi->omtu, pi->link_mode);
2189 read_unlock_bh(&l2cap_sk_list.lock);
2191 return (str - buf);
2194 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2196 static const struct proto_ops l2cap_sock_ops = {
2197 .family = PF_BLUETOOTH,
2198 .owner = THIS_MODULE,
2199 .release = l2cap_sock_release,
2200 .bind = l2cap_sock_bind,
2201 .connect = l2cap_sock_connect,
2202 .listen = l2cap_sock_listen,
2203 .accept = l2cap_sock_accept,
2204 .getname = l2cap_sock_getname,
2205 .sendmsg = l2cap_sock_sendmsg,
2206 .recvmsg = bt_sock_recvmsg,
2207 .poll = bt_sock_poll,
2208 .mmap = sock_no_mmap,
2209 .socketpair = sock_no_socketpair,
2210 .ioctl = sock_no_ioctl,
2211 .shutdown = l2cap_sock_shutdown,
2212 .setsockopt = l2cap_sock_setsockopt,
2213 .getsockopt = l2cap_sock_getsockopt
2216 static struct net_proto_family l2cap_sock_family_ops = {
2217 .family = PF_BLUETOOTH,
2218 .owner = THIS_MODULE,
2219 .create = l2cap_sock_create,
2222 static struct hci_proto l2cap_hci_proto = {
2223 .name = "L2CAP",
2224 .id = HCI_PROTO_L2CAP,
2225 .connect_ind = l2cap_connect_ind,
2226 .connect_cfm = l2cap_connect_cfm,
2227 .disconn_ind = l2cap_disconn_ind,
2228 .auth_cfm = l2cap_auth_cfm,
2229 .encrypt_cfm = l2cap_encrypt_cfm,
2230 .recv_acldata = l2cap_recv_acldata
2233 static int __init l2cap_init(void)
2235 int err;
2237 err = proto_register(&l2cap_proto, 0);
2238 if (err < 0)
2239 return err;
2241 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2242 if (err < 0) {
2243 BT_ERR("L2CAP socket registration failed");
2244 goto error;
2247 err = hci_register_proto(&l2cap_hci_proto);
2248 if (err < 0) {
2249 BT_ERR("L2CAP protocol registration failed");
2250 bt_sock_unregister(BTPROTO_L2CAP);
2251 goto error;
2254 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2255 BT_ERR("Failed to create L2CAP info file");
2257 BT_INFO("L2CAP ver %s", VERSION);
2258 BT_INFO("L2CAP socket layer initialized");
2260 return 0;
2262 error:
2263 proto_unregister(&l2cap_proto);
2264 return err;
2267 static void __exit l2cap_exit(void)
2269 class_remove_file(bt_class, &class_attr_l2cap);
2271 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2272 BT_ERR("L2CAP socket unregistration failed");
2274 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2275 BT_ERR("L2CAP protocol unregistration failed");
2277 proto_unregister(&l2cap_proto);
2280 void l2cap_load(void)
2282 /* Dummy function to trigger automatic L2CAP module loading by
2283 * other modules that use L2CAP sockets but don't use any other
2284 * symbols from it. */
2285 return;
2287 EXPORT_SYMBOL(l2cap_load);
2289 module_init(l2cap_init);
2290 module_exit(l2cap_exit);
2292 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2293 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2294 MODULE_VERSION(VERSION);
2295 MODULE_LICENSE("GPL");
2296 MODULE_ALIAS("bt-proto-0");