Driver core: add device symlink back to sysfs
[linux-2.6/cjktty.git] / net / bluetooth / l2cap.c
blobe83ee82440d333ba8e4d9ec4f1c5b937baeff711
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
58 #define VERSION "2.8"
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = RW_LOCK_UNLOCKED
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
78 BT_DBG("sock %p state %d", sk, sk->sk_state);
80 bh_lock_sock(sk);
81 __l2cap_sock_close(sk, ETIMEDOUT);
82 bh_unlock_sock(sk);
84 l2cap_sock_kill(sk);
85 sock_put(sk);
88 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
90 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
91 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
94 static void l2cap_sock_clear_timer(struct sock *sk)
96 BT_DBG("sock %p state %d", sk, sk->sk_state);
97 sk_stop_timer(sk, &sk->sk_timer);
100 static void l2cap_sock_init_timer(struct sock *sk)
102 init_timer(&sk->sk_timer);
103 sk->sk_timer.function = l2cap_sock_timeout;
104 sk->sk_timer.data = (unsigned long)sk;
107 /* ---- L2CAP channels ---- */
108 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
110 struct sock *s;
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
113 break;
115 return s;
118 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
120 struct sock *s;
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
123 break;
125 return s;
128 /* Find channel with given SCID.
129 * Returns locked socket */
130 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 struct sock *s;
133 read_lock(&l->lock);
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
137 return s;
140 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
142 struct sock *s;
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
145 break;
147 return s;
150 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
152 struct sock *s;
153 read_lock(&l->lock);
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
157 return s;
160 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
162 u16 cid = 0x0040;
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
166 return cid;
169 return 0;
172 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
174 sock_hold(sk);
176 if (l->head)
177 l2cap_pi(l->head)->prev_c = sk;
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
181 l->head = sk;
184 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
188 write_lock_bh(&l->lock);
189 if (sk == l->head)
190 l->head = next;
192 if (next)
193 l2cap_pi(next)->prev_c = prev;
194 if (prev)
195 l2cap_pi(prev)->next_c = next;
196 write_unlock_bh(&l->lock);
198 __sock_put(sk);
201 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
203 struct l2cap_chan_list *l = &conn->chan_list;
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
207 l2cap_pi(sk)->conn = conn;
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
217 } else {
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
224 __l2cap_chan_link(l, sk);
226 if (parent)
227 bt_accept_enqueue(parent, sk);
230 /* Delete channel.
231 * Must be called on the locked socket. */
232 static void l2cap_chan_del(struct sock *sk, int err)
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
237 l2cap_sock_clear_timer(sk);
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
241 if (conn) {
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
251 if (err)
252 sk->sk_err = err;
254 if (parent) {
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
257 } else
258 sk->sk_state_change(sk);
261 /* ---- L2CAP connections ---- */
262 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
264 struct l2cap_conn *conn = hcon->l2cap_data;
266 if (conn || status)
267 return conn;
269 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
270 if (!conn)
271 return NULL;
273 hcon->l2cap_data = conn;
274 conn->hcon = hcon;
276 BT_DBG("hcon %p conn %p", hcon, conn);
278 conn->mtu = hcon->hdev->acl_mtu;
279 conn->src = &hcon->hdev->bdaddr;
280 conn->dst = &hcon->dst;
282 spin_lock_init(&conn->lock);
283 rwlock_init(&conn->chan_list.lock);
285 return conn;
288 static void l2cap_conn_del(struct hci_conn *hcon, int err)
290 struct l2cap_conn *conn = hcon->l2cap_data;
291 struct sock *sk;
293 if (!conn)
294 return;
296 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
298 if (conn->rx_skb)
299 kfree_skb(conn->rx_skb);
301 /* Kill channels */
302 while ((sk = conn->chan_list.head)) {
303 bh_lock_sock(sk);
304 l2cap_chan_del(sk, err);
305 bh_unlock_sock(sk);
306 l2cap_sock_kill(sk);
309 hcon->l2cap_data = NULL;
310 kfree(conn);
313 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
315 struct l2cap_chan_list *l = &conn->chan_list;
316 write_lock_bh(&l->lock);
317 __l2cap_chan_add(conn, sk, parent);
318 write_unlock_bh(&l->lock);
321 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
323 u8 id;
325 /* Get next available identificator.
326 * 1 - 128 are used by kernel.
327 * 129 - 199 are reserved.
328 * 200 - 254 are used by utilities like l2ping, etc.
331 spin_lock_bh(&conn->lock);
333 if (++conn->tx_ident > 128)
334 conn->tx_ident = 1;
336 id = conn->tx_ident;
338 spin_unlock_bh(&conn->lock);
340 return id;
343 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
345 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
347 BT_DBG("code 0x%2.2x", code);
349 if (!skb)
350 return -ENOMEM;
352 return hci_send_acl(conn->hcon, skb, 0);
355 /* ---- Socket interface ---- */
356 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
358 struct sock *sk;
359 struct hlist_node *node;
360 sk_for_each(sk, node, &l2cap_sk_list.head)
361 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
362 goto found;
363 sk = NULL;
364 found:
365 return sk;
368 /* Find socket with psm and source bdaddr.
369 * Returns closest match.
371 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
373 struct sock *sk = NULL, *sk1 = NULL;
374 struct hlist_node *node;
376 sk_for_each(sk, node, &l2cap_sk_list.head) {
377 if (state && sk->sk_state != state)
378 continue;
380 if (l2cap_pi(sk)->psm == psm) {
381 /* Exact match. */
382 if (!bacmp(&bt_sk(sk)->src, src))
383 break;
385 /* Closest match */
386 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
387 sk1 = sk;
390 return node ? sk : sk1;
393 /* Find socket with given address (psm, src).
394 * Returns locked socket */
395 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
397 struct sock *s;
398 read_lock(&l2cap_sk_list.lock);
399 s = __l2cap_get_sock_by_psm(state, psm, src);
400 if (s) bh_lock_sock(s);
401 read_unlock(&l2cap_sk_list.lock);
402 return s;
405 static void l2cap_sock_destruct(struct sock *sk)
407 BT_DBG("sk %p", sk);
409 skb_queue_purge(&sk->sk_receive_queue);
410 skb_queue_purge(&sk->sk_write_queue);
413 static void l2cap_sock_cleanup_listen(struct sock *parent)
415 struct sock *sk;
417 BT_DBG("parent %p", parent);
419 /* Close not yet accepted channels */
420 while ((sk = bt_accept_dequeue(parent, NULL)))
421 l2cap_sock_close(sk);
423 parent->sk_state = BT_CLOSED;
424 sock_set_flag(parent, SOCK_ZAPPED);
427 /* Kill socket (only if zapped and orphan)
428 * Must be called on unlocked socket.
430 static void l2cap_sock_kill(struct sock *sk)
432 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
433 return;
435 BT_DBG("sk %p state %d", sk, sk->sk_state);
437 /* Kill poor orphan */
438 bt_sock_unlink(&l2cap_sk_list, sk);
439 sock_set_flag(sk, SOCK_DEAD);
440 sock_put(sk);
443 static void __l2cap_sock_close(struct sock *sk, int reason)
445 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
447 switch (sk->sk_state) {
448 case BT_LISTEN:
449 l2cap_sock_cleanup_listen(sk);
450 break;
452 case BT_CONNECTED:
453 case BT_CONFIG:
454 case BT_CONNECT2:
455 if (sk->sk_type == SOCK_SEQPACKET) {
456 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
457 struct l2cap_disconn_req req;
459 sk->sk_state = BT_DISCONN;
460 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
462 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
463 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
464 l2cap_send_cmd(conn, l2cap_get_ident(conn),
465 L2CAP_DISCONN_REQ, sizeof(req), &req);
466 } else {
467 l2cap_chan_del(sk, reason);
469 break;
471 case BT_CONNECT:
472 case BT_DISCONN:
473 l2cap_chan_del(sk, reason);
474 break;
476 default:
477 sock_set_flag(sk, SOCK_ZAPPED);
478 break;
482 /* Must be called on unlocked socket. */
483 static void l2cap_sock_close(struct sock *sk)
485 l2cap_sock_clear_timer(sk);
486 lock_sock(sk);
487 __l2cap_sock_close(sk, ECONNRESET);
488 release_sock(sk);
489 l2cap_sock_kill(sk);
492 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
494 struct l2cap_pinfo *pi = l2cap_pi(sk);
496 BT_DBG("sk %p", sk);
498 if (parent) {
499 sk->sk_type = parent->sk_type;
500 pi->imtu = l2cap_pi(parent)->imtu;
501 pi->omtu = l2cap_pi(parent)->omtu;
502 pi->link_mode = l2cap_pi(parent)->link_mode;
503 } else {
504 pi->imtu = L2CAP_DEFAULT_MTU;
505 pi->omtu = 0;
506 pi->link_mode = 0;
509 /* Default config options */
510 pi->conf_mtu = L2CAP_DEFAULT_MTU;
511 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
514 static struct proto l2cap_proto = {
515 .name = "L2CAP",
516 .owner = THIS_MODULE,
517 .obj_size = sizeof(struct l2cap_pinfo)
520 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio)
522 struct sock *sk;
524 sk = sk_alloc(PF_BLUETOOTH, prio, &l2cap_proto, 1);
525 if (!sk)
526 return NULL;
528 sock_init_data(sock, sk);
529 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
531 sk->sk_destruct = l2cap_sock_destruct;
532 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
534 sock_reset_flag(sk, SOCK_ZAPPED);
536 sk->sk_protocol = proto;
537 sk->sk_state = BT_OPEN;
539 l2cap_sock_init_timer(sk);
541 bt_sock_link(&l2cap_sk_list, sk);
542 return sk;
545 static int l2cap_sock_create(struct socket *sock, int protocol)
547 struct sock *sk;
549 BT_DBG("sock %p", sock);
551 sock->state = SS_UNCONNECTED;
553 if (sock->type != SOCK_SEQPACKET &&
554 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
555 return -ESOCKTNOSUPPORT;
557 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
558 return -EPERM;
560 sock->ops = &l2cap_sock_ops;
562 sk = l2cap_sock_alloc(sock, protocol, GFP_ATOMIC);
563 if (!sk)
564 return -ENOMEM;
566 l2cap_sock_init(sk, NULL);
567 return 0;
570 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
572 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
573 struct sock *sk = sock->sk;
574 int err = 0;
576 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
578 if (!addr || addr->sa_family != AF_BLUETOOTH)
579 return -EINVAL;
581 lock_sock(sk);
583 if (sk->sk_state != BT_OPEN) {
584 err = -EBADFD;
585 goto done;
588 if (la->l2_psm > 0 && btohs(la->l2_psm) < 0x1001 &&
589 !capable(CAP_NET_BIND_SERVICE)) {
590 err = -EACCES;
591 goto done;
594 write_lock_bh(&l2cap_sk_list.lock);
596 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
597 err = -EADDRINUSE;
598 } else {
599 /* Save source address */
600 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
601 l2cap_pi(sk)->psm = la->l2_psm;
602 l2cap_pi(sk)->sport = la->l2_psm;
603 sk->sk_state = BT_BOUND;
606 write_unlock_bh(&l2cap_sk_list.lock);
608 done:
609 release_sock(sk);
610 return err;
613 static int l2cap_do_connect(struct sock *sk)
615 bdaddr_t *src = &bt_sk(sk)->src;
616 bdaddr_t *dst = &bt_sk(sk)->dst;
617 struct l2cap_conn *conn;
618 struct hci_conn *hcon;
619 struct hci_dev *hdev;
620 int err = 0;
622 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
624 if (!(hdev = hci_get_route(dst, src)))
625 return -EHOSTUNREACH;
627 hci_dev_lock_bh(hdev);
629 err = -ENOMEM;
631 hcon = hci_connect(hdev, ACL_LINK, dst);
632 if (!hcon)
633 goto done;
635 conn = l2cap_conn_add(hcon, 0);
636 if (!conn) {
637 hci_conn_put(hcon);
638 goto done;
641 err = 0;
643 /* Update source addr of the socket */
644 bacpy(src, conn->src);
646 l2cap_chan_add(conn, sk, NULL);
648 sk->sk_state = BT_CONNECT;
649 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
651 if (hcon->state == BT_CONNECTED) {
652 if (sk->sk_type == SOCK_SEQPACKET) {
653 struct l2cap_conn_req req;
654 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
655 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
656 req.psm = l2cap_pi(sk)->psm;
657 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
658 L2CAP_CONN_REQ, sizeof(req), &req);
659 } else {
660 l2cap_sock_clear_timer(sk);
661 sk->sk_state = BT_CONNECTED;
665 done:
666 hci_dev_unlock_bh(hdev);
667 hci_dev_put(hdev);
668 return err;
671 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
673 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
674 struct sock *sk = sock->sk;
675 int err = 0;
677 lock_sock(sk);
679 BT_DBG("sk %p", sk);
681 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
682 err = -EINVAL;
683 goto done;
686 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
687 err = -EINVAL;
688 goto done;
691 switch(sk->sk_state) {
692 case BT_CONNECT:
693 case BT_CONNECT2:
694 case BT_CONFIG:
695 /* Already connecting */
696 goto wait;
698 case BT_CONNECTED:
699 /* Already connected */
700 goto done;
702 case BT_OPEN:
703 case BT_BOUND:
704 /* Can connect */
705 break;
707 default:
708 err = -EBADFD;
709 goto done;
712 /* Set destination address and psm */
713 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
714 l2cap_pi(sk)->psm = la->l2_psm;
716 if ((err = l2cap_do_connect(sk)))
717 goto done;
719 wait:
720 err = bt_sock_wait_state(sk, BT_CONNECTED,
721 sock_sndtimeo(sk, flags & O_NONBLOCK));
722 done:
723 release_sock(sk);
724 return err;
727 static int l2cap_sock_listen(struct socket *sock, int backlog)
729 struct sock *sk = sock->sk;
730 int err = 0;
732 BT_DBG("sk %p backlog %d", sk, backlog);
734 lock_sock(sk);
736 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
737 err = -EBADFD;
738 goto done;
741 if (!l2cap_pi(sk)->psm) {
742 bdaddr_t *src = &bt_sk(sk)->src;
743 u16 psm;
745 err = -EINVAL;
747 write_lock_bh(&l2cap_sk_list.lock);
749 for (psm = 0x1001; psm < 0x1100; psm += 2)
750 if (!__l2cap_get_sock_by_addr(psm, src)) {
751 l2cap_pi(sk)->psm = htobs(psm);
752 l2cap_pi(sk)->sport = htobs(psm);
753 err = 0;
754 break;
757 write_unlock_bh(&l2cap_sk_list.lock);
759 if (err < 0)
760 goto done;
763 sk->sk_max_ack_backlog = backlog;
764 sk->sk_ack_backlog = 0;
765 sk->sk_state = BT_LISTEN;
767 done:
768 release_sock(sk);
769 return err;
772 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
774 DECLARE_WAITQUEUE(wait, current);
775 struct sock *sk = sock->sk, *nsk;
776 long timeo;
777 int err = 0;
779 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
781 if (sk->sk_state != BT_LISTEN) {
782 err = -EBADFD;
783 goto done;
786 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
788 BT_DBG("sk %p timeo %ld", sk, timeo);
790 /* Wait for an incoming connection. (wake-one). */
791 add_wait_queue_exclusive(sk->sk_sleep, &wait);
792 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
793 set_current_state(TASK_INTERRUPTIBLE);
794 if (!timeo) {
795 err = -EAGAIN;
796 break;
799 release_sock(sk);
800 timeo = schedule_timeout(timeo);
801 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
803 if (sk->sk_state != BT_LISTEN) {
804 err = -EBADFD;
805 break;
808 if (signal_pending(current)) {
809 err = sock_intr_errno(timeo);
810 break;
813 set_current_state(TASK_RUNNING);
814 remove_wait_queue(sk->sk_sleep, &wait);
816 if (err)
817 goto done;
819 newsock->state = SS_CONNECTED;
821 BT_DBG("new socket %p", nsk);
823 done:
824 release_sock(sk);
825 return err;
828 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
830 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
831 struct sock *sk = sock->sk;
833 BT_DBG("sock %p, sk %p", sock, sk);
835 addr->sa_family = AF_BLUETOOTH;
836 *len = sizeof(struct sockaddr_l2);
838 if (peer)
839 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
840 else
841 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
843 la->l2_psm = l2cap_pi(sk)->psm;
844 return 0;
847 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
849 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
850 struct sk_buff *skb, **frag;
851 int err, hlen, count, sent=0;
852 struct l2cap_hdr *lh;
854 BT_DBG("sk %p len %d", sk, len);
856 /* First fragment (with L2CAP header) */
857 if (sk->sk_type == SOCK_DGRAM)
858 hlen = L2CAP_HDR_SIZE + 2;
859 else
860 hlen = L2CAP_HDR_SIZE;
862 count = min_t(unsigned int, (conn->mtu - hlen), len);
864 skb = bt_skb_send_alloc(sk, hlen + count,
865 msg->msg_flags & MSG_DONTWAIT, &err);
866 if (!skb)
867 return err;
869 /* Create L2CAP header */
870 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
871 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
872 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
874 if (sk->sk_type == SOCK_DGRAM)
875 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
877 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
878 err = -EFAULT;
879 goto fail;
882 sent += count;
883 len -= count;
885 /* Continuation fragments (no L2CAP header) */
886 frag = &skb_shinfo(skb)->frag_list;
887 while (len) {
888 count = min_t(unsigned int, conn->mtu, len);
890 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
891 if (!*frag)
892 goto fail;
894 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
895 err = -EFAULT;
896 goto fail;
899 sent += count;
900 len -= count;
902 frag = &(*frag)->next;
905 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
906 goto fail;
908 return sent;
910 fail:
911 kfree_skb(skb);
912 return err;
915 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
917 struct sock *sk = sock->sk;
918 int err = 0;
920 BT_DBG("sock %p, sk %p", sock, sk);
922 err = sock_error(sk);
923 if (err)
924 return err;
926 if (msg->msg_flags & MSG_OOB)
927 return -EOPNOTSUPP;
929 /* Check outgoing MTU */
930 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
931 return -EINVAL;
933 lock_sock(sk);
935 if (sk->sk_state == BT_CONNECTED)
936 err = l2cap_do_send(sk, msg, len);
937 else
938 err = -ENOTCONN;
940 release_sock(sk);
941 return err;
944 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
946 struct sock *sk = sock->sk;
947 struct l2cap_options opts;
948 int err = 0, len;
949 u32 opt;
951 BT_DBG("sk %p", sk);
953 lock_sock(sk);
955 switch (optname) {
956 case L2CAP_OPTIONS:
957 len = min_t(unsigned int, sizeof(opts), optlen);
958 if (copy_from_user((char *) &opts, optval, len)) {
959 err = -EFAULT;
960 break;
962 l2cap_pi(sk)->imtu = opts.imtu;
963 l2cap_pi(sk)->omtu = opts.omtu;
964 break;
966 case L2CAP_LM:
967 if (get_user(opt, (u32 __user *) optval)) {
968 err = -EFAULT;
969 break;
972 l2cap_pi(sk)->link_mode = opt;
973 break;
975 default:
976 err = -ENOPROTOOPT;
977 break;
980 release_sock(sk);
981 return err;
984 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
986 struct sock *sk = sock->sk;
987 struct l2cap_options opts;
988 struct l2cap_conninfo cinfo;
989 int len, err = 0;
991 BT_DBG("sk %p", sk);
993 if (get_user(len, optlen))
994 return -EFAULT;
996 lock_sock(sk);
998 switch (optname) {
999 case L2CAP_OPTIONS:
1000 opts.imtu = l2cap_pi(sk)->imtu;
1001 opts.omtu = l2cap_pi(sk)->omtu;
1002 opts.flush_to = l2cap_pi(sk)->flush_to;
1003 opts.mode = 0x00;
1005 len = min_t(unsigned int, len, sizeof(opts));
1006 if (copy_to_user(optval, (char *) &opts, len))
1007 err = -EFAULT;
1009 break;
1011 case L2CAP_LM:
1012 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1013 err = -EFAULT;
1014 break;
1016 case L2CAP_CONNINFO:
1017 if (sk->sk_state != BT_CONNECTED) {
1018 err = -ENOTCONN;
1019 break;
1022 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1023 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1025 len = min_t(unsigned int, len, sizeof(cinfo));
1026 if (copy_to_user(optval, (char *) &cinfo, len))
1027 err = -EFAULT;
1029 break;
1031 default:
1032 err = -ENOPROTOOPT;
1033 break;
1036 release_sock(sk);
1037 return err;
1040 static int l2cap_sock_shutdown(struct socket *sock, int how)
1042 struct sock *sk = sock->sk;
1043 int err = 0;
1045 BT_DBG("sock %p, sk %p", sock, sk);
1047 if (!sk)
1048 return 0;
1050 lock_sock(sk);
1051 if (!sk->sk_shutdown) {
1052 sk->sk_shutdown = SHUTDOWN_MASK;
1053 l2cap_sock_clear_timer(sk);
1054 __l2cap_sock_close(sk, 0);
1056 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1057 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1059 release_sock(sk);
1060 return err;
1063 static int l2cap_sock_release(struct socket *sock)
1065 struct sock *sk = sock->sk;
1066 int err;
1068 BT_DBG("sock %p, sk %p", sock, sk);
1070 if (!sk)
1071 return 0;
1073 err = l2cap_sock_shutdown(sock, 2);
1075 sock_orphan(sk);
1076 l2cap_sock_kill(sk);
1077 return err;
1080 static void l2cap_conn_ready(struct l2cap_conn *conn)
1082 struct l2cap_chan_list *l = &conn->chan_list;
1083 struct sock *sk;
1085 BT_DBG("conn %p", conn);
1087 read_lock(&l->lock);
1089 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1090 bh_lock_sock(sk);
1092 if (sk->sk_type != SOCK_SEQPACKET) {
1093 l2cap_sock_clear_timer(sk);
1094 sk->sk_state = BT_CONNECTED;
1095 sk->sk_state_change(sk);
1096 } else if (sk->sk_state == BT_CONNECT) {
1097 struct l2cap_conn_req req;
1098 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
1099 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1100 req.psm = l2cap_pi(sk)->psm;
1101 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1104 bh_unlock_sock(sk);
1107 read_unlock(&l->lock);
1110 /* Notify sockets that we cannot guaranty reliability anymore */
1111 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1113 struct l2cap_chan_list *l = &conn->chan_list;
1114 struct sock *sk;
1116 BT_DBG("conn %p", conn);
1118 read_lock(&l->lock);
1119 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1120 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
1121 sk->sk_err = err;
1123 read_unlock(&l->lock);
1126 static void l2cap_chan_ready(struct sock *sk)
1128 struct sock *parent = bt_sk(sk)->parent;
1130 BT_DBG("sk %p, parent %p", sk, parent);
1132 l2cap_pi(sk)->conf_state = 0;
1133 l2cap_sock_clear_timer(sk);
1135 if (!parent) {
1136 /* Outgoing channel.
1137 * Wake up socket sleeping on connect.
1139 sk->sk_state = BT_CONNECTED;
1140 sk->sk_state_change(sk);
1141 } else {
1142 /* Incoming channel.
1143 * Wake up socket sleeping on accept.
1145 parent->sk_data_ready(parent, 0);
1149 /* Copy frame to all raw sockets on that connection */
1150 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1152 struct l2cap_chan_list *l = &conn->chan_list;
1153 struct sk_buff *nskb;
1154 struct sock * sk;
1156 BT_DBG("conn %p", conn);
1158 read_lock(&l->lock);
1159 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1160 if (sk->sk_type != SOCK_RAW)
1161 continue;
1163 /* Don't send frame to the socket it came from */
1164 if (skb->sk == sk)
1165 continue;
1167 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1168 continue;
1170 if (sock_queue_rcv_skb(sk, nskb))
1171 kfree_skb(nskb);
1173 read_unlock(&l->lock);
1176 /* ---- L2CAP signalling commands ---- */
1177 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1178 u8 code, u8 ident, u16 dlen, void *data)
1180 struct sk_buff *skb, **frag;
1181 struct l2cap_cmd_hdr *cmd;
1182 struct l2cap_hdr *lh;
1183 int len, count;
1185 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1187 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1188 count = min_t(unsigned int, conn->mtu, len);
1190 skb = bt_skb_alloc(count, GFP_ATOMIC);
1191 if (!skb)
1192 return NULL;
1194 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1195 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1196 lh->cid = __cpu_to_le16(0x0001);
1198 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1199 cmd->code = code;
1200 cmd->ident = ident;
1201 cmd->len = __cpu_to_le16(dlen);
1203 if (dlen) {
1204 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1205 memcpy(skb_put(skb, count), data, count);
1206 data += count;
1209 len -= skb->len;
1211 /* Continuation fragments (no L2CAP header) */
1212 frag = &skb_shinfo(skb)->frag_list;
1213 while (len) {
1214 count = min_t(unsigned int, conn->mtu, len);
1216 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1217 if (!*frag)
1218 goto fail;
1220 memcpy(skb_put(*frag, count), data, count);
1222 len -= count;
1223 data += count;
1225 frag = &(*frag)->next;
1228 return skb;
1230 fail:
1231 kfree_skb(skb);
1232 return NULL;
1235 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1237 struct l2cap_conf_opt *opt = *ptr;
1238 int len;
1240 len = L2CAP_CONF_OPT_SIZE + opt->len;
1241 *ptr += len;
1243 *type = opt->type;
1244 *olen = opt->len;
1246 switch (opt->len) {
1247 case 1:
1248 *val = *((u8 *) opt->val);
1249 break;
1251 case 2:
1252 *val = __le16_to_cpu(*((u16 *)opt->val));
1253 break;
1255 case 4:
1256 *val = __le32_to_cpu(*((u32 *)opt->val));
1257 break;
1259 default:
1260 *val = (unsigned long) opt->val;
1261 break;
1264 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1265 return len;
1268 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1270 int type, hint, olen;
1271 unsigned long val;
1272 void *ptr = data;
1274 BT_DBG("sk %p len %d", sk, len);
1276 while (len >= L2CAP_CONF_OPT_SIZE) {
1277 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1279 hint = type & 0x80;
1280 type &= 0x7f;
1282 switch (type) {
1283 case L2CAP_CONF_MTU:
1284 l2cap_pi(sk)->conf_mtu = val;
1285 break;
1287 case L2CAP_CONF_FLUSH_TO:
1288 l2cap_pi(sk)->flush_to = val;
1289 break;
1291 case L2CAP_CONF_QOS:
1292 break;
1294 default:
1295 if (hint)
1296 break;
1298 /* FIXME: Reject unknown option */
1299 break;
1304 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1306 struct l2cap_conf_opt *opt = *ptr;
1308 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1310 opt->type = type;
1311 opt->len = len;
1313 switch (len) {
1314 case 1:
1315 *((u8 *) opt->val) = val;
1316 break;
1318 case 2:
1319 *((u16 *) opt->val) = __cpu_to_le16(val);
1320 break;
1322 case 4:
1323 *((u32 *) opt->val) = __cpu_to_le32(val);
1324 break;
1326 default:
1327 memcpy(opt->val, (void *) val, len);
1328 break;
1331 *ptr += L2CAP_CONF_OPT_SIZE + len;
1334 static int l2cap_build_conf_req(struct sock *sk, void *data)
1336 struct l2cap_pinfo *pi = l2cap_pi(sk);
1337 struct l2cap_conf_req *req = data;
1338 void *ptr = req->data;
1340 BT_DBG("sk %p", sk);
1342 if (pi->imtu != L2CAP_DEFAULT_MTU)
1343 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1345 /* FIXME: Need actual value of the flush timeout */
1346 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1347 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1349 req->dcid = __cpu_to_le16(pi->dcid);
1350 req->flags = __cpu_to_le16(0);
1352 return ptr - data;
1355 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1357 struct l2cap_pinfo *pi = l2cap_pi(sk);
1358 int result = 0;
1360 /* Configure output options and let the other side know
1361 * which ones we don't like. */
1362 if (pi->conf_mtu < pi->omtu)
1363 result = L2CAP_CONF_UNACCEPT;
1364 else
1365 pi->omtu = pi->conf_mtu;
1367 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1369 BT_DBG("sk %p result %d", sk, result);
1370 return result;
1373 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1375 struct l2cap_conf_rsp *rsp = data;
1376 void *ptr = rsp->data;
1377 u16 flags = 0;
1379 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1381 if (result)
1382 *result = l2cap_conf_output(sk, &ptr);
1383 else
1384 flags = 0x0001;
1386 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1387 rsp->result = __cpu_to_le16(result ? *result : 0);
1388 rsp->flags = __cpu_to_le16(flags);
1390 return ptr - data;
1393 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1395 struct l2cap_chan_list *list = &conn->chan_list;
1396 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1397 struct l2cap_conn_rsp rsp;
1398 struct sock *sk, *parent;
1399 int result = 0, status = 0;
1401 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1402 u16 psm = req->psm;
1404 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1406 /* Check if we have socket listening on psm */
1407 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1408 if (!parent) {
1409 result = L2CAP_CR_BAD_PSM;
1410 goto sendresp;
1413 result = L2CAP_CR_NO_MEM;
1415 /* Check for backlog size */
1416 if (sk_acceptq_is_full(parent)) {
1417 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1418 goto response;
1421 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1422 if (!sk)
1423 goto response;
1425 write_lock_bh(&list->lock);
1427 /* Check if we already have channel with that dcid */
1428 if (__l2cap_get_chan_by_dcid(list, scid)) {
1429 write_unlock_bh(&list->lock);
1430 sock_set_flag(sk, SOCK_ZAPPED);
1431 l2cap_sock_kill(sk);
1432 goto response;
1435 hci_conn_hold(conn->hcon);
1437 l2cap_sock_init(sk, parent);
1438 bacpy(&bt_sk(sk)->src, conn->src);
1439 bacpy(&bt_sk(sk)->dst, conn->dst);
1440 l2cap_pi(sk)->psm = psm;
1441 l2cap_pi(sk)->dcid = scid;
1443 __l2cap_chan_add(conn, sk, parent);
1444 dcid = l2cap_pi(sk)->scid;
1446 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1448 /* Service level security */
1449 result = L2CAP_CR_PEND;
1450 status = L2CAP_CS_AUTHEN_PEND;
1451 sk->sk_state = BT_CONNECT2;
1452 l2cap_pi(sk)->ident = cmd->ident;
1454 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1455 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1456 if (!hci_conn_encrypt(conn->hcon))
1457 goto done;
1458 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1459 if (!hci_conn_auth(conn->hcon))
1460 goto done;
1463 sk->sk_state = BT_CONFIG;
1464 result = status = 0;
1466 done:
1467 write_unlock_bh(&list->lock);
1469 response:
1470 bh_unlock_sock(parent);
1472 sendresp:
1473 rsp.scid = __cpu_to_le16(scid);
1474 rsp.dcid = __cpu_to_le16(dcid);
1475 rsp.result = __cpu_to_le16(result);
1476 rsp.status = __cpu_to_le16(status);
1477 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1478 return 0;
1481 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1483 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1484 u16 scid, dcid, result, status;
1485 struct sock *sk;
1486 u8 req[128];
1488 scid = __le16_to_cpu(rsp->scid);
1489 dcid = __le16_to_cpu(rsp->dcid);
1490 result = __le16_to_cpu(rsp->result);
1491 status = __le16_to_cpu(rsp->status);
1493 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1495 if (scid) {
1496 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1497 return 0;
1498 } else {
1499 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1500 return 0;
1503 switch (result) {
1504 case L2CAP_CR_SUCCESS:
1505 sk->sk_state = BT_CONFIG;
1506 l2cap_pi(sk)->ident = 0;
1507 l2cap_pi(sk)->dcid = dcid;
1508 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1510 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1511 l2cap_build_conf_req(sk, req), req);
1512 break;
1514 case L2CAP_CR_PEND:
1515 break;
1517 default:
1518 l2cap_chan_del(sk, ECONNREFUSED);
1519 break;
1522 bh_unlock_sock(sk);
1523 return 0;
1526 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1528 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1529 u16 dcid, flags;
1530 u8 rsp[64];
1531 struct sock *sk;
1532 int result;
1534 dcid = __le16_to_cpu(req->dcid);
1535 flags = __le16_to_cpu(req->flags);
1537 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1539 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1540 return -ENOENT;
1542 if (sk->sk_state == BT_DISCONN)
1543 goto unlock;
1545 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1547 if (flags & 0x0001) {
1548 /* Incomplete config. Send empty response. */
1549 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1550 l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1551 goto unlock;
1554 /* Complete config. */
1555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1556 l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1558 if (result)
1559 goto unlock;
1561 /* Output config done */
1562 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1564 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1565 sk->sk_state = BT_CONNECTED;
1566 l2cap_chan_ready(sk);
1567 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1568 u8 req[64];
1569 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1570 l2cap_build_conf_req(sk, req), req);
1573 unlock:
1574 bh_unlock_sock(sk);
1575 return 0;
1578 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1580 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1581 u16 scid, flags, result;
1582 struct sock *sk;
1584 scid = __le16_to_cpu(rsp->scid);
1585 flags = __le16_to_cpu(rsp->flags);
1586 result = __le16_to_cpu(rsp->result);
1588 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1590 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1591 return 0;
1593 switch (result) {
1594 case L2CAP_CONF_SUCCESS:
1595 break;
1597 case L2CAP_CONF_UNACCEPT:
1598 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1599 char req[128];
1600 /* It does not make sense to adjust L2CAP parameters
1601 * that are currently defined in the spec. We simply
1602 * resend config request that we sent earlier. It is
1603 * stupid, but it helps qualification testing which
1604 * expects at least some response from us. */
1605 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1606 l2cap_build_conf_req(sk, req), req);
1607 goto done;
1610 default:
1611 sk->sk_state = BT_DISCONN;
1612 sk->sk_err = ECONNRESET;
1613 l2cap_sock_set_timer(sk, HZ * 5);
1615 struct l2cap_disconn_req req;
1616 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1617 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1618 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1619 L2CAP_DISCONN_REQ, sizeof(req), &req);
1621 goto done;
1624 if (flags & 0x01)
1625 goto done;
1627 /* Input config done */
1628 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1630 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1631 sk->sk_state = BT_CONNECTED;
1632 l2cap_chan_ready(sk);
1635 done:
1636 bh_unlock_sock(sk);
1637 return 0;
1640 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1642 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1643 struct l2cap_disconn_rsp rsp;
1644 u16 dcid, scid;
1645 struct sock *sk;
1647 scid = __le16_to_cpu(req->scid);
1648 dcid = __le16_to_cpu(req->dcid);
1650 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1652 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1653 return 0;
1655 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1656 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1657 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1659 sk->sk_shutdown = SHUTDOWN_MASK;
1661 l2cap_chan_del(sk, ECONNRESET);
1662 bh_unlock_sock(sk);
1664 l2cap_sock_kill(sk);
1665 return 0;
1668 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1670 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1671 u16 dcid, scid;
1672 struct sock *sk;
1674 scid = __le16_to_cpu(rsp->scid);
1675 dcid = __le16_to_cpu(rsp->dcid);
1677 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1679 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1680 return 0;
1682 l2cap_chan_del(sk, 0);
1683 bh_unlock_sock(sk);
1685 l2cap_sock_kill(sk);
1686 return 0;
1689 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1691 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1692 struct l2cap_info_rsp rsp;
1693 u16 type;
1695 type = __le16_to_cpu(req->type);
1697 BT_DBG("type 0x%4.4x", type);
1699 rsp.type = __cpu_to_le16(type);
1700 rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP);
1701 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1703 return 0;
1706 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1708 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1709 u16 type, result;
1711 type = __le16_to_cpu(rsp->type);
1712 result = __le16_to_cpu(rsp->result);
1714 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1716 return 0;
1719 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1721 u8 *data = skb->data;
1722 int len = skb->len;
1723 struct l2cap_cmd_hdr cmd;
1724 int err = 0;
1726 l2cap_raw_recv(conn, skb);
1728 while (len >= L2CAP_CMD_HDR_SIZE) {
1729 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1730 data += L2CAP_CMD_HDR_SIZE;
1731 len -= L2CAP_CMD_HDR_SIZE;
1733 cmd.len = __le16_to_cpu(cmd.len);
1735 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1737 if (cmd.len > len || !cmd.ident) {
1738 BT_DBG("corrupted command");
1739 break;
1742 switch (cmd.code) {
1743 case L2CAP_COMMAND_REJ:
1744 /* FIXME: We should process this */
1745 break;
1747 case L2CAP_CONN_REQ:
1748 err = l2cap_connect_req(conn, &cmd, data);
1749 break;
1751 case L2CAP_CONN_RSP:
1752 err = l2cap_connect_rsp(conn, &cmd, data);
1753 break;
1755 case L2CAP_CONF_REQ:
1756 err = l2cap_config_req(conn, &cmd, data);
1757 break;
1759 case L2CAP_CONF_RSP:
1760 err = l2cap_config_rsp(conn, &cmd, data);
1761 break;
1763 case L2CAP_DISCONN_REQ:
1764 err = l2cap_disconnect_req(conn, &cmd, data);
1765 break;
1767 case L2CAP_DISCONN_RSP:
1768 err = l2cap_disconnect_rsp(conn, &cmd, data);
1769 break;
1771 case L2CAP_ECHO_REQ:
1772 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1773 break;
1775 case L2CAP_ECHO_RSP:
1776 break;
1778 case L2CAP_INFO_REQ:
1779 err = l2cap_information_req(conn, &cmd, data);
1780 break;
1782 case L2CAP_INFO_RSP:
1783 err = l2cap_information_rsp(conn, &cmd, data);
1784 break;
1786 default:
1787 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1788 err = -EINVAL;
1789 break;
1792 if (err) {
1793 struct l2cap_cmd_rej rej;
1794 BT_DBG("error %d", err);
1796 /* FIXME: Map err to a valid reason */
1797 rej.reason = __cpu_to_le16(0);
1798 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1801 data += cmd.len;
1802 len -= cmd.len;
1805 kfree_skb(skb);
1808 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1810 struct sock *sk;
1812 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1813 if (!sk) {
1814 BT_DBG("unknown cid 0x%4.4x", cid);
1815 goto drop;
1818 BT_DBG("sk %p, len %d", sk, skb->len);
1820 if (sk->sk_state != BT_CONNECTED)
1821 goto drop;
1823 if (l2cap_pi(sk)->imtu < skb->len)
1824 goto drop;
1826 /* If socket recv buffers overflows we drop data here
1827 * which is *bad* because L2CAP has to be reliable.
1828 * But we don't have any other choice. L2CAP doesn't
1829 * provide flow control mechanism. */
1831 if (!sock_queue_rcv_skb(sk, skb))
1832 goto done;
1834 drop:
1835 kfree_skb(skb);
1837 done:
1838 if (sk)
1839 bh_unlock_sock(sk);
1841 return 0;
1844 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1846 struct sock *sk;
1848 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1849 if (!sk)
1850 goto drop;
1852 BT_DBG("sk %p, len %d", sk, skb->len);
1854 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1855 goto drop;
1857 if (l2cap_pi(sk)->imtu < skb->len)
1858 goto drop;
1860 if (!sock_queue_rcv_skb(sk, skb))
1861 goto done;
1863 drop:
1864 kfree_skb(skb);
1866 done:
1867 if (sk) bh_unlock_sock(sk);
1868 return 0;
1871 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1873 struct l2cap_hdr *lh = (void *) skb->data;
1874 u16 cid, psm, len;
1876 skb_pull(skb, L2CAP_HDR_SIZE);
1877 cid = __le16_to_cpu(lh->cid);
1878 len = __le16_to_cpu(lh->len);
1880 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1882 switch (cid) {
1883 case 0x0001:
1884 l2cap_sig_channel(conn, skb);
1885 break;
1887 case 0x0002:
1888 psm = get_unaligned((u16 *) skb->data);
1889 skb_pull(skb, 2);
1890 l2cap_conless_channel(conn, psm, skb);
1891 break;
1893 default:
1894 l2cap_data_channel(conn, cid, skb);
1895 break;
1899 /* ---- L2CAP interface with lower layer (HCI) ---- */
1901 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1903 int exact = 0, lm1 = 0, lm2 = 0;
1904 register struct sock *sk;
1905 struct hlist_node *node;
1907 if (type != ACL_LINK)
1908 return 0;
1910 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1912 /* Find listening sockets and check their link_mode */
1913 read_lock(&l2cap_sk_list.lock);
1914 sk_for_each(sk, node, &l2cap_sk_list.head) {
1915 if (sk->sk_state != BT_LISTEN)
1916 continue;
1918 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1919 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1920 exact++;
1921 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1922 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1924 read_unlock(&l2cap_sk_list.lock);
1926 return exact ? lm1 : lm2;
1929 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1931 struct l2cap_conn *conn;
1933 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1935 if (hcon->type != ACL_LINK)
1936 return 0;
1938 if (!status) {
1939 conn = l2cap_conn_add(hcon, status);
1940 if (conn)
1941 l2cap_conn_ready(conn);
1942 } else
1943 l2cap_conn_del(hcon, bt_err(status));
1945 return 0;
1948 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1950 BT_DBG("hcon %p reason %d", hcon, reason);
1952 if (hcon->type != ACL_LINK)
1953 return 0;
1955 l2cap_conn_del(hcon, bt_err(reason));
1957 return 0;
1960 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1962 struct l2cap_chan_list *l;
1963 struct l2cap_conn *conn = conn = hcon->l2cap_data;
1964 struct l2cap_conn_rsp rsp;
1965 struct sock *sk;
1966 int result;
1968 if (!conn)
1969 return 0;
1971 l = &conn->chan_list;
1973 BT_DBG("conn %p", conn);
1975 read_lock(&l->lock);
1977 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1978 bh_lock_sock(sk);
1980 if (sk->sk_state != BT_CONNECT2 ||
1981 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1982 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1983 bh_unlock_sock(sk);
1984 continue;
1987 if (!status) {
1988 sk->sk_state = BT_CONFIG;
1989 result = 0;
1990 } else {
1991 sk->sk_state = BT_DISCONN;
1992 l2cap_sock_set_timer(sk, HZ/10);
1993 result = L2CAP_CR_SEC_BLOCK;
1996 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1997 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1998 rsp.result = __cpu_to_le16(result);
1999 rsp.status = __cpu_to_le16(0);
2000 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2001 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2003 bh_unlock_sock(sk);
2006 read_unlock(&l->lock);
2007 return 0;
2010 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2012 struct l2cap_chan_list *l;
2013 struct l2cap_conn *conn = hcon->l2cap_data;
2014 struct l2cap_conn_rsp rsp;
2015 struct sock *sk;
2016 int result;
2018 if (!conn)
2019 return 0;
2021 l = &conn->chan_list;
2023 BT_DBG("conn %p", conn);
2025 read_lock(&l->lock);
2027 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2028 bh_lock_sock(sk);
2030 if (sk->sk_state != BT_CONNECT2) {
2031 bh_unlock_sock(sk);
2032 continue;
2035 if (!status) {
2036 sk->sk_state = BT_CONFIG;
2037 result = 0;
2038 } else {
2039 sk->sk_state = BT_DISCONN;
2040 l2cap_sock_set_timer(sk, HZ/10);
2041 result = L2CAP_CR_SEC_BLOCK;
2044 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
2045 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
2046 rsp.result = __cpu_to_le16(result);
2047 rsp.status = __cpu_to_le16(0);
2048 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2049 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2051 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2052 hci_conn_change_link_key(hcon);
2054 bh_unlock_sock(sk);
2057 read_unlock(&l->lock);
2058 return 0;
2061 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2063 struct l2cap_conn *conn = hcon->l2cap_data;
2065 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2066 goto drop;
2068 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2070 if (flags & ACL_START) {
2071 struct l2cap_hdr *hdr;
2072 int len;
2074 if (conn->rx_len) {
2075 BT_ERR("Unexpected start frame (len %d)", skb->len);
2076 kfree_skb(conn->rx_skb);
2077 conn->rx_skb = NULL;
2078 conn->rx_len = 0;
2079 l2cap_conn_unreliable(conn, ECOMM);
2082 if (skb->len < 2) {
2083 BT_ERR("Frame is too short (len %d)", skb->len);
2084 l2cap_conn_unreliable(conn, ECOMM);
2085 goto drop;
2088 hdr = (struct l2cap_hdr *) skb->data;
2089 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2091 if (len == skb->len) {
2092 /* Complete frame received */
2093 l2cap_recv_frame(conn, skb);
2094 return 0;
2097 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2099 if (skb->len > len) {
2100 BT_ERR("Frame is too long (len %d, expected len %d)",
2101 skb->len, len);
2102 l2cap_conn_unreliable(conn, ECOMM);
2103 goto drop;
2106 /* Allocate skb for the complete frame (with header) */
2107 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2108 goto drop;
2110 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2111 conn->rx_len = len - skb->len;
2112 } else {
2113 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2115 if (!conn->rx_len) {
2116 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2117 l2cap_conn_unreliable(conn, ECOMM);
2118 goto drop;
2121 if (skb->len > conn->rx_len) {
2122 BT_ERR("Fragment is too long (len %d, expected %d)",
2123 skb->len, conn->rx_len);
2124 kfree_skb(conn->rx_skb);
2125 conn->rx_skb = NULL;
2126 conn->rx_len = 0;
2127 l2cap_conn_unreliable(conn, ECOMM);
2128 goto drop;
2131 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2132 conn->rx_len -= skb->len;
2134 if (!conn->rx_len) {
2135 /* Complete frame received */
2136 l2cap_recv_frame(conn, conn->rx_skb);
2137 conn->rx_skb = NULL;
2141 drop:
2142 kfree_skb(skb);
2143 return 0;
2146 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2148 struct sock *sk;
2149 struct hlist_node *node;
2150 char *str = buf;
2152 read_lock_bh(&l2cap_sk_list.lock);
2154 sk_for_each(sk, node, &l2cap_sk_list.head) {
2155 struct l2cap_pinfo *pi = l2cap_pi(sk);
2157 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2158 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2159 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2160 pi->imtu, pi->omtu, pi->link_mode);
2163 read_unlock_bh(&l2cap_sk_list.lock);
2165 return (str - buf);
2168 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2170 static const struct proto_ops l2cap_sock_ops = {
2171 .family = PF_BLUETOOTH,
2172 .owner = THIS_MODULE,
2173 .release = l2cap_sock_release,
2174 .bind = l2cap_sock_bind,
2175 .connect = l2cap_sock_connect,
2176 .listen = l2cap_sock_listen,
2177 .accept = l2cap_sock_accept,
2178 .getname = l2cap_sock_getname,
2179 .sendmsg = l2cap_sock_sendmsg,
2180 .recvmsg = bt_sock_recvmsg,
2181 .poll = bt_sock_poll,
2182 .mmap = sock_no_mmap,
2183 .socketpair = sock_no_socketpair,
2184 .ioctl = sock_no_ioctl,
2185 .shutdown = l2cap_sock_shutdown,
2186 .setsockopt = l2cap_sock_setsockopt,
2187 .getsockopt = l2cap_sock_getsockopt
2190 static struct net_proto_family l2cap_sock_family_ops = {
2191 .family = PF_BLUETOOTH,
2192 .owner = THIS_MODULE,
2193 .create = l2cap_sock_create,
2196 static struct hci_proto l2cap_hci_proto = {
2197 .name = "L2CAP",
2198 .id = HCI_PROTO_L2CAP,
2199 .connect_ind = l2cap_connect_ind,
2200 .connect_cfm = l2cap_connect_cfm,
2201 .disconn_ind = l2cap_disconn_ind,
2202 .auth_cfm = l2cap_auth_cfm,
2203 .encrypt_cfm = l2cap_encrypt_cfm,
2204 .recv_acldata = l2cap_recv_acldata
2207 static int __init l2cap_init(void)
2209 int err;
2211 err = proto_register(&l2cap_proto, 0);
2212 if (err < 0)
2213 return err;
2215 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2216 if (err < 0) {
2217 BT_ERR("L2CAP socket registration failed");
2218 goto error;
2221 err = hci_register_proto(&l2cap_hci_proto);
2222 if (err < 0) {
2223 BT_ERR("L2CAP protocol registration failed");
2224 bt_sock_unregister(BTPROTO_L2CAP);
2225 goto error;
2228 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2229 BT_ERR("Failed to create L2CAP info file");
2231 BT_INFO("L2CAP ver %s", VERSION);
2232 BT_INFO("L2CAP socket layer initialized");
2234 return 0;
2236 error:
2237 proto_unregister(&l2cap_proto);
2238 return err;
2241 static void __exit l2cap_exit(void)
2243 class_remove_file(bt_class, &class_attr_l2cap);
2245 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2246 BT_ERR("L2CAP socket unregistration failed");
2248 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2249 BT_ERR("L2CAP protocol unregistration failed");
2251 proto_unregister(&l2cap_proto);
2254 void l2cap_load(void)
2256 /* Dummy function to trigger automatic L2CAP module loading by
2257 * other modules that use L2CAP sockets but don't use any other
2258 * symbols from it. */
2259 return;
2261 EXPORT_SYMBOL(l2cap_load);
2263 module_init(l2cap_init);
2264 module_exit(l2cap_exit);
2266 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2267 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2268 MODULE_VERSION(VERSION);
2269 MODULE_LICENSE("GPL");
2270 MODULE_ALIAS("bt-proto-0");