radeonfb: remove warning with CONFIG_PM=n
[linux-2.6/kvm.git] / net / bluetooth / l2cap.c
blob6fbbae78b30452c8ee6e20742d011ebaba4acc11
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
58 #define VERSION "2.9"
60 static u32 l2cap_feat_mask = 0x0000;
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
82 bh_lock_sock(sk);
83 __l2cap_sock_close(sk, ETIMEDOUT);
84 bh_unlock_sock(sk);
86 l2cap_sock_kill(sk);
87 sock_put(sk);
90 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
92 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
93 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
96 static void l2cap_sock_clear_timer(struct sock *sk)
98 BT_DBG("sock %p state %d", sk, sk->sk_state);
99 sk_stop_timer(sk, &sk->sk_timer);
102 static void l2cap_sock_init_timer(struct sock *sk)
104 init_timer(&sk->sk_timer);
105 sk->sk_timer.function = l2cap_sock_timeout;
106 sk->sk_timer.data = (unsigned long)sk;
109 /* ---- L2CAP channels ---- */
110 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
112 struct sock *s;
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->dcid == cid)
115 break;
117 return s;
120 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
122 struct sock *s;
123 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
124 if (l2cap_pi(s)->scid == cid)
125 break;
127 return s;
130 /* Find channel with given SCID.
131 * Returns locked socket */
132 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 struct sock *s;
135 read_lock(&l->lock);
136 s = __l2cap_get_chan_by_scid(l, cid);
137 if (s) bh_lock_sock(s);
138 read_unlock(&l->lock);
139 return s;
142 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->ident == ident)
147 break;
149 return s;
152 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
154 struct sock *s;
155 read_lock(&l->lock);
156 s = __l2cap_get_chan_by_ident(l, ident);
157 if (s) bh_lock_sock(s);
158 read_unlock(&l->lock);
159 return s;
162 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
164 u16 cid = 0x0040;
166 for (; cid < 0xffff; cid++) {
167 if(!__l2cap_get_chan_by_scid(l, cid))
168 return cid;
171 return 0;
174 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
176 sock_hold(sk);
178 if (l->head)
179 l2cap_pi(l->head)->prev_c = sk;
181 l2cap_pi(sk)->next_c = l->head;
182 l2cap_pi(sk)->prev_c = NULL;
183 l->head = sk;
186 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
188 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
190 write_lock_bh(&l->lock);
191 if (sk == l->head)
192 l->head = next;
194 if (next)
195 l2cap_pi(next)->prev_c = prev;
196 if (prev)
197 l2cap_pi(prev)->next_c = next;
198 write_unlock_bh(&l->lock);
200 __sock_put(sk);
203 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
205 struct l2cap_chan_list *l = &conn->chan_list;
207 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
209 l2cap_pi(sk)->conn = conn;
211 if (sk->sk_type == SOCK_SEQPACKET) {
212 /* Alloc CID for connection-oriented socket */
213 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
214 } else if (sk->sk_type == SOCK_DGRAM) {
215 /* Connectionless socket */
216 l2cap_pi(sk)->scid = 0x0002;
217 l2cap_pi(sk)->dcid = 0x0002;
218 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
219 } else {
220 /* Raw socket can send/recv signalling messages only */
221 l2cap_pi(sk)->scid = 0x0001;
222 l2cap_pi(sk)->dcid = 0x0001;
223 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
226 __l2cap_chan_link(l, sk);
228 if (parent)
229 bt_accept_enqueue(parent, sk);
232 /* Delete channel.
233 * Must be called on the locked socket. */
234 static void l2cap_chan_del(struct sock *sk, int err)
236 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
237 struct sock *parent = bt_sk(sk)->parent;
239 l2cap_sock_clear_timer(sk);
241 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
243 if (conn) {
244 /* Unlink from channel list */
245 l2cap_chan_unlink(&conn->chan_list, sk);
246 l2cap_pi(sk)->conn = NULL;
247 hci_conn_put(conn->hcon);
250 sk->sk_state = BT_CLOSED;
251 sock_set_flag(sk, SOCK_ZAPPED);
253 if (err)
254 sk->sk_err = err;
256 if (parent) {
257 bt_accept_unlink(sk);
258 parent->sk_data_ready(parent, 0);
259 } else
260 sk->sk_state_change(sk);
263 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
265 u8 id;
267 /* Get next available identificator.
268 * 1 - 128 are used by kernel.
269 * 129 - 199 are reserved.
270 * 200 - 254 are used by utilities like l2ping, etc.
273 spin_lock_bh(&conn->lock);
275 if (++conn->tx_ident > 128)
276 conn->tx_ident = 1;
278 id = conn->tx_ident;
280 spin_unlock_bh(&conn->lock);
282 return id;
285 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
287 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
289 BT_DBG("code 0x%2.2x", code);
291 if (!skb)
292 return -ENOMEM;
294 return hci_send_acl(conn->hcon, skb, 0);
297 /* ---- L2CAP connections ---- */
298 static void l2cap_conn_start(struct l2cap_conn *conn)
300 struct l2cap_chan_list *l = &conn->chan_list;
301 struct sock *sk;
303 BT_DBG("conn %p", conn);
305 read_lock(&l->lock);
307 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
308 bh_lock_sock(sk);
310 if (sk->sk_type != SOCK_SEQPACKET) {
311 l2cap_sock_clear_timer(sk);
312 sk->sk_state = BT_CONNECTED;
313 sk->sk_state_change(sk);
314 } else if (sk->sk_state == BT_CONNECT) {
315 struct l2cap_conn_req req;
316 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
317 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
318 req.psm = l2cap_pi(sk)->psm;
319 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
320 L2CAP_CONN_REQ, sizeof(req), &req);
323 bh_unlock_sock(sk);
326 read_unlock(&l->lock);
329 static void l2cap_conn_ready(struct l2cap_conn *conn)
331 BT_DBG("conn %p", conn);
333 if (conn->chan_list.head || !hlist_empty(&l2cap_sk_list.head)) {
334 struct l2cap_info_req req;
336 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
338 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
339 conn->info_ident = l2cap_get_ident(conn);
341 mod_timer(&conn->info_timer,
342 jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
344 l2cap_send_cmd(conn, conn->info_ident,
345 L2CAP_INFO_REQ, sizeof(req), &req);
349 /* Notify sockets that we cannot guaranty reliability anymore */
350 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
352 struct l2cap_chan_list *l = &conn->chan_list;
353 struct sock *sk;
355 BT_DBG("conn %p", conn);
357 read_lock(&l->lock);
359 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
360 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
361 sk->sk_err = err;
364 read_unlock(&l->lock);
367 static void l2cap_info_timeout(unsigned long arg)
369 struct l2cap_conn *conn = (void *) arg;
371 conn->info_ident = 0;
373 l2cap_conn_start(conn);
376 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
378 struct l2cap_conn *conn = hcon->l2cap_data;
380 if (conn || status)
381 return conn;
383 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
384 if (!conn)
385 return NULL;
387 hcon->l2cap_data = conn;
388 conn->hcon = hcon;
390 BT_DBG("hcon %p conn %p", hcon, conn);
392 conn->mtu = hcon->hdev->acl_mtu;
393 conn->src = &hcon->hdev->bdaddr;
394 conn->dst = &hcon->dst;
396 conn->feat_mask = 0;
398 init_timer(&conn->info_timer);
399 conn->info_timer.function = l2cap_info_timeout;
400 conn->info_timer.data = (unsigned long) conn;
402 spin_lock_init(&conn->lock);
403 rwlock_init(&conn->chan_list.lock);
405 return conn;
408 static void l2cap_conn_del(struct hci_conn *hcon, int err)
410 struct l2cap_conn *conn = hcon->l2cap_data;
411 struct sock *sk;
413 if (!conn)
414 return;
416 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
418 if (conn->rx_skb)
419 kfree_skb(conn->rx_skb);
421 /* Kill channels */
422 while ((sk = conn->chan_list.head)) {
423 bh_lock_sock(sk);
424 l2cap_chan_del(sk, err);
425 bh_unlock_sock(sk);
426 l2cap_sock_kill(sk);
429 hcon->l2cap_data = NULL;
430 kfree(conn);
433 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
435 struct l2cap_chan_list *l = &conn->chan_list;
436 write_lock_bh(&l->lock);
437 __l2cap_chan_add(conn, sk, parent);
438 write_unlock_bh(&l->lock);
441 /* ---- Socket interface ---- */
442 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
444 struct sock *sk;
445 struct hlist_node *node;
446 sk_for_each(sk, node, &l2cap_sk_list.head)
447 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
448 goto found;
449 sk = NULL;
450 found:
451 return sk;
454 /* Find socket with psm and source bdaddr.
455 * Returns closest match.
457 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
459 struct sock *sk = NULL, *sk1 = NULL;
460 struct hlist_node *node;
462 sk_for_each(sk, node, &l2cap_sk_list.head) {
463 if (state && sk->sk_state != state)
464 continue;
466 if (l2cap_pi(sk)->psm == psm) {
467 /* Exact match. */
468 if (!bacmp(&bt_sk(sk)->src, src))
469 break;
471 /* Closest match */
472 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
473 sk1 = sk;
476 return node ? sk : sk1;
479 /* Find socket with given address (psm, src).
480 * Returns locked socket */
481 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
483 struct sock *s;
484 read_lock(&l2cap_sk_list.lock);
485 s = __l2cap_get_sock_by_psm(state, psm, src);
486 if (s) bh_lock_sock(s);
487 read_unlock(&l2cap_sk_list.lock);
488 return s;
491 static void l2cap_sock_destruct(struct sock *sk)
493 BT_DBG("sk %p", sk);
495 skb_queue_purge(&sk->sk_receive_queue);
496 skb_queue_purge(&sk->sk_write_queue);
499 static void l2cap_sock_cleanup_listen(struct sock *parent)
501 struct sock *sk;
503 BT_DBG("parent %p", parent);
505 /* Close not yet accepted channels */
506 while ((sk = bt_accept_dequeue(parent, NULL)))
507 l2cap_sock_close(sk);
509 parent->sk_state = BT_CLOSED;
510 sock_set_flag(parent, SOCK_ZAPPED);
513 /* Kill socket (only if zapped and orphan)
514 * Must be called on unlocked socket.
516 static void l2cap_sock_kill(struct sock *sk)
518 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
519 return;
521 BT_DBG("sk %p state %d", sk, sk->sk_state);
523 /* Kill poor orphan */
524 bt_sock_unlink(&l2cap_sk_list, sk);
525 sock_set_flag(sk, SOCK_DEAD);
526 sock_put(sk);
529 static void __l2cap_sock_close(struct sock *sk, int reason)
531 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
533 switch (sk->sk_state) {
534 case BT_LISTEN:
535 l2cap_sock_cleanup_listen(sk);
536 break;
538 case BT_CONNECTED:
539 case BT_CONFIG:
540 case BT_CONNECT2:
541 if (sk->sk_type == SOCK_SEQPACKET) {
542 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
543 struct l2cap_disconn_req req;
545 sk->sk_state = BT_DISCONN;
546 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
548 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
549 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
550 l2cap_send_cmd(conn, l2cap_get_ident(conn),
551 L2CAP_DISCONN_REQ, sizeof(req), &req);
552 } else {
553 l2cap_chan_del(sk, reason);
555 break;
557 case BT_CONNECT:
558 case BT_DISCONN:
559 l2cap_chan_del(sk, reason);
560 break;
562 default:
563 sock_set_flag(sk, SOCK_ZAPPED);
564 break;
568 /* Must be called on unlocked socket. */
569 static void l2cap_sock_close(struct sock *sk)
571 l2cap_sock_clear_timer(sk);
572 lock_sock(sk);
573 __l2cap_sock_close(sk, ECONNRESET);
574 release_sock(sk);
575 l2cap_sock_kill(sk);
578 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
580 struct l2cap_pinfo *pi = l2cap_pi(sk);
582 BT_DBG("sk %p", sk);
584 if (parent) {
585 sk->sk_type = parent->sk_type;
586 pi->imtu = l2cap_pi(parent)->imtu;
587 pi->omtu = l2cap_pi(parent)->omtu;
588 pi->link_mode = l2cap_pi(parent)->link_mode;
589 } else {
590 pi->imtu = L2CAP_DEFAULT_MTU;
591 pi->omtu = 0;
592 pi->link_mode = 0;
595 /* Default config options */
596 pi->conf_len = 0;
597 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
600 static struct proto l2cap_proto = {
601 .name = "L2CAP",
602 .owner = THIS_MODULE,
603 .obj_size = sizeof(struct l2cap_pinfo)
606 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
608 struct sock *sk;
610 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, 1);
611 if (!sk)
612 return NULL;
614 sock_init_data(sock, sk);
615 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
617 sk->sk_destruct = l2cap_sock_destruct;
618 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
620 sock_reset_flag(sk, SOCK_ZAPPED);
622 sk->sk_protocol = proto;
623 sk->sk_state = BT_OPEN;
625 l2cap_sock_init_timer(sk);
627 bt_sock_link(&l2cap_sk_list, sk);
628 return sk;
631 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
633 struct sock *sk;
635 BT_DBG("sock %p", sock);
637 sock->state = SS_UNCONNECTED;
639 if (sock->type != SOCK_SEQPACKET &&
640 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
641 return -ESOCKTNOSUPPORT;
643 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
644 return -EPERM;
646 sock->ops = &l2cap_sock_ops;
648 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
649 if (!sk)
650 return -ENOMEM;
652 l2cap_sock_init(sk, NULL);
653 return 0;
656 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
658 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
659 struct sock *sk = sock->sk;
660 int err = 0;
662 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
664 if (!addr || addr->sa_family != AF_BLUETOOTH)
665 return -EINVAL;
667 lock_sock(sk);
669 if (sk->sk_state != BT_OPEN) {
670 err = -EBADFD;
671 goto done;
674 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
675 !capable(CAP_NET_BIND_SERVICE)) {
676 err = -EACCES;
677 goto done;
680 write_lock_bh(&l2cap_sk_list.lock);
682 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
683 err = -EADDRINUSE;
684 } else {
685 /* Save source address */
686 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
687 l2cap_pi(sk)->psm = la->l2_psm;
688 l2cap_pi(sk)->sport = la->l2_psm;
689 sk->sk_state = BT_BOUND;
692 write_unlock_bh(&l2cap_sk_list.lock);
694 done:
695 release_sock(sk);
696 return err;
699 static int l2cap_do_connect(struct sock *sk)
701 bdaddr_t *src = &bt_sk(sk)->src;
702 bdaddr_t *dst = &bt_sk(sk)->dst;
703 struct l2cap_conn *conn;
704 struct hci_conn *hcon;
705 struct hci_dev *hdev;
706 int err = 0;
708 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
710 if (!(hdev = hci_get_route(dst, src)))
711 return -EHOSTUNREACH;
713 hci_dev_lock_bh(hdev);
715 err = -ENOMEM;
717 hcon = hci_connect(hdev, ACL_LINK, dst);
718 if (!hcon)
719 goto done;
721 conn = l2cap_conn_add(hcon, 0);
722 if (!conn) {
723 hci_conn_put(hcon);
724 goto done;
727 err = 0;
729 /* Update source addr of the socket */
730 bacpy(src, conn->src);
732 l2cap_chan_add(conn, sk, NULL);
734 sk->sk_state = BT_CONNECT;
735 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
737 if (hcon->state == BT_CONNECTED) {
738 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
739 l2cap_conn_ready(conn);
740 goto done;
743 if (sk->sk_type == SOCK_SEQPACKET) {
744 struct l2cap_conn_req req;
745 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
746 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
747 req.psm = l2cap_pi(sk)->psm;
748 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
749 L2CAP_CONN_REQ, sizeof(req), &req);
750 } else {
751 l2cap_sock_clear_timer(sk);
752 sk->sk_state = BT_CONNECTED;
756 done:
757 hci_dev_unlock_bh(hdev);
758 hci_dev_put(hdev);
759 return err;
762 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
764 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
765 struct sock *sk = sock->sk;
766 int err = 0;
768 lock_sock(sk);
770 BT_DBG("sk %p", sk);
772 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
773 err = -EINVAL;
774 goto done;
777 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
778 err = -EINVAL;
779 goto done;
782 switch(sk->sk_state) {
783 case BT_CONNECT:
784 case BT_CONNECT2:
785 case BT_CONFIG:
786 /* Already connecting */
787 goto wait;
789 case BT_CONNECTED:
790 /* Already connected */
791 goto done;
793 case BT_OPEN:
794 case BT_BOUND:
795 /* Can connect */
796 break;
798 default:
799 err = -EBADFD;
800 goto done;
803 /* Set destination address and psm */
804 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
805 l2cap_pi(sk)->psm = la->l2_psm;
807 if ((err = l2cap_do_connect(sk)))
808 goto done;
810 wait:
811 err = bt_sock_wait_state(sk, BT_CONNECTED,
812 sock_sndtimeo(sk, flags & O_NONBLOCK));
813 done:
814 release_sock(sk);
815 return err;
818 static int l2cap_sock_listen(struct socket *sock, int backlog)
820 struct sock *sk = sock->sk;
821 int err = 0;
823 BT_DBG("sk %p backlog %d", sk, backlog);
825 lock_sock(sk);
827 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
828 err = -EBADFD;
829 goto done;
832 if (!l2cap_pi(sk)->psm) {
833 bdaddr_t *src = &bt_sk(sk)->src;
834 u16 psm;
836 err = -EINVAL;
838 write_lock_bh(&l2cap_sk_list.lock);
840 for (psm = 0x1001; psm < 0x1100; psm += 2)
841 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
842 l2cap_pi(sk)->psm = htobs(psm);
843 l2cap_pi(sk)->sport = htobs(psm);
844 err = 0;
845 break;
848 write_unlock_bh(&l2cap_sk_list.lock);
850 if (err < 0)
851 goto done;
854 sk->sk_max_ack_backlog = backlog;
855 sk->sk_ack_backlog = 0;
856 sk->sk_state = BT_LISTEN;
858 done:
859 release_sock(sk);
860 return err;
863 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
865 DECLARE_WAITQUEUE(wait, current);
866 struct sock *sk = sock->sk, *nsk;
867 long timeo;
868 int err = 0;
870 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
872 if (sk->sk_state != BT_LISTEN) {
873 err = -EBADFD;
874 goto done;
877 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
879 BT_DBG("sk %p timeo %ld", sk, timeo);
881 /* Wait for an incoming connection. (wake-one). */
882 add_wait_queue_exclusive(sk->sk_sleep, &wait);
883 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
884 set_current_state(TASK_INTERRUPTIBLE);
885 if (!timeo) {
886 err = -EAGAIN;
887 break;
890 release_sock(sk);
891 timeo = schedule_timeout(timeo);
892 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
894 if (sk->sk_state != BT_LISTEN) {
895 err = -EBADFD;
896 break;
899 if (signal_pending(current)) {
900 err = sock_intr_errno(timeo);
901 break;
904 set_current_state(TASK_RUNNING);
905 remove_wait_queue(sk->sk_sleep, &wait);
907 if (err)
908 goto done;
910 newsock->state = SS_CONNECTED;
912 BT_DBG("new socket %p", nsk);
914 done:
915 release_sock(sk);
916 return err;
919 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
921 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
922 struct sock *sk = sock->sk;
924 BT_DBG("sock %p, sk %p", sock, sk);
926 addr->sa_family = AF_BLUETOOTH;
927 *len = sizeof(struct sockaddr_l2);
929 if (peer)
930 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
931 else
932 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
934 la->l2_psm = l2cap_pi(sk)->psm;
935 return 0;
938 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
940 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
941 struct sk_buff *skb, **frag;
942 int err, hlen, count, sent=0;
943 struct l2cap_hdr *lh;
945 BT_DBG("sk %p len %d", sk, len);
947 /* First fragment (with L2CAP header) */
948 if (sk->sk_type == SOCK_DGRAM)
949 hlen = L2CAP_HDR_SIZE + 2;
950 else
951 hlen = L2CAP_HDR_SIZE;
953 count = min_t(unsigned int, (conn->mtu - hlen), len);
955 skb = bt_skb_send_alloc(sk, hlen + count,
956 msg->msg_flags & MSG_DONTWAIT, &err);
957 if (!skb)
958 return err;
960 /* Create L2CAP header */
961 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
962 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
963 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
965 if (sk->sk_type == SOCK_DGRAM)
966 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
968 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
969 err = -EFAULT;
970 goto fail;
973 sent += count;
974 len -= count;
976 /* Continuation fragments (no L2CAP header) */
977 frag = &skb_shinfo(skb)->frag_list;
978 while (len) {
979 count = min_t(unsigned int, conn->mtu, len);
981 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
982 if (!*frag)
983 goto fail;
985 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
986 err = -EFAULT;
987 goto fail;
990 sent += count;
991 len -= count;
993 frag = &(*frag)->next;
996 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
997 goto fail;
999 return sent;
1001 fail:
1002 kfree_skb(skb);
1003 return err;
1006 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1008 struct sock *sk = sock->sk;
1009 int err = 0;
1011 BT_DBG("sock %p, sk %p", sock, sk);
1013 err = sock_error(sk);
1014 if (err)
1015 return err;
1017 if (msg->msg_flags & MSG_OOB)
1018 return -EOPNOTSUPP;
1020 /* Check outgoing MTU */
1021 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1022 return -EINVAL;
1024 lock_sock(sk);
1026 if (sk->sk_state == BT_CONNECTED)
1027 err = l2cap_do_send(sk, msg, len);
1028 else
1029 err = -ENOTCONN;
1031 release_sock(sk);
1032 return err;
1035 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1037 struct sock *sk = sock->sk;
1038 struct l2cap_options opts;
1039 int err = 0, len;
1040 u32 opt;
1042 BT_DBG("sk %p", sk);
1044 lock_sock(sk);
1046 switch (optname) {
1047 case L2CAP_OPTIONS:
1048 opts.imtu = l2cap_pi(sk)->imtu;
1049 opts.omtu = l2cap_pi(sk)->omtu;
1050 opts.flush_to = l2cap_pi(sk)->flush_to;
1051 opts.mode = L2CAP_MODE_BASIC;
1053 len = min_t(unsigned int, sizeof(opts), optlen);
1054 if (copy_from_user((char *) &opts, optval, len)) {
1055 err = -EFAULT;
1056 break;
1059 l2cap_pi(sk)->imtu = opts.imtu;
1060 l2cap_pi(sk)->omtu = opts.omtu;
1061 break;
1063 case L2CAP_LM:
1064 if (get_user(opt, (u32 __user *) optval)) {
1065 err = -EFAULT;
1066 break;
1069 l2cap_pi(sk)->link_mode = opt;
1070 break;
1072 default:
1073 err = -ENOPROTOOPT;
1074 break;
1077 release_sock(sk);
1078 return err;
1081 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1083 struct sock *sk = sock->sk;
1084 struct l2cap_options opts;
1085 struct l2cap_conninfo cinfo;
1086 int len, err = 0;
1088 BT_DBG("sk %p", sk);
1090 if (get_user(len, optlen))
1091 return -EFAULT;
1093 lock_sock(sk);
1095 switch (optname) {
1096 case L2CAP_OPTIONS:
1097 opts.imtu = l2cap_pi(sk)->imtu;
1098 opts.omtu = l2cap_pi(sk)->omtu;
1099 opts.flush_to = l2cap_pi(sk)->flush_to;
1100 opts.mode = L2CAP_MODE_BASIC;
1102 len = min_t(unsigned int, len, sizeof(opts));
1103 if (copy_to_user(optval, (char *) &opts, len))
1104 err = -EFAULT;
1106 break;
1108 case L2CAP_LM:
1109 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1110 err = -EFAULT;
1111 break;
1113 case L2CAP_CONNINFO:
1114 if (sk->sk_state != BT_CONNECTED) {
1115 err = -ENOTCONN;
1116 break;
1119 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1120 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1122 len = min_t(unsigned int, len, sizeof(cinfo));
1123 if (copy_to_user(optval, (char *) &cinfo, len))
1124 err = -EFAULT;
1126 break;
1128 default:
1129 err = -ENOPROTOOPT;
1130 break;
1133 release_sock(sk);
1134 return err;
1137 static int l2cap_sock_shutdown(struct socket *sock, int how)
1139 struct sock *sk = sock->sk;
1140 int err = 0;
1142 BT_DBG("sock %p, sk %p", sock, sk);
1144 if (!sk)
1145 return 0;
1147 lock_sock(sk);
1148 if (!sk->sk_shutdown) {
1149 sk->sk_shutdown = SHUTDOWN_MASK;
1150 l2cap_sock_clear_timer(sk);
1151 __l2cap_sock_close(sk, 0);
1153 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1154 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1156 release_sock(sk);
1157 return err;
1160 static int l2cap_sock_release(struct socket *sock)
1162 struct sock *sk = sock->sk;
1163 int err;
1165 BT_DBG("sock %p, sk %p", sock, sk);
1167 if (!sk)
1168 return 0;
1170 err = l2cap_sock_shutdown(sock, 2);
1172 sock_orphan(sk);
1173 l2cap_sock_kill(sk);
1174 return err;
1177 static void l2cap_chan_ready(struct sock *sk)
1179 struct sock *parent = bt_sk(sk)->parent;
1181 BT_DBG("sk %p, parent %p", sk, parent);
1183 l2cap_pi(sk)->conf_state = 0;
1184 l2cap_sock_clear_timer(sk);
1186 if (!parent) {
1187 /* Outgoing channel.
1188 * Wake up socket sleeping on connect.
1190 sk->sk_state = BT_CONNECTED;
1191 sk->sk_state_change(sk);
1192 } else {
1193 /* Incoming channel.
1194 * Wake up socket sleeping on accept.
1196 parent->sk_data_ready(parent, 0);
1200 /* Copy frame to all raw sockets on that connection */
1201 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1203 struct l2cap_chan_list *l = &conn->chan_list;
1204 struct sk_buff *nskb;
1205 struct sock * sk;
1207 BT_DBG("conn %p", conn);
1209 read_lock(&l->lock);
1210 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1211 if (sk->sk_type != SOCK_RAW)
1212 continue;
1214 /* Don't send frame to the socket it came from */
1215 if (skb->sk == sk)
1216 continue;
1218 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1219 continue;
1221 if (sock_queue_rcv_skb(sk, nskb))
1222 kfree_skb(nskb);
1224 read_unlock(&l->lock);
1227 /* ---- L2CAP signalling commands ---- */
1228 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1229 u8 code, u8 ident, u16 dlen, void *data)
1231 struct sk_buff *skb, **frag;
1232 struct l2cap_cmd_hdr *cmd;
1233 struct l2cap_hdr *lh;
1234 int len, count;
1236 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1238 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1239 count = min_t(unsigned int, conn->mtu, len);
1241 skb = bt_skb_alloc(count, GFP_ATOMIC);
1242 if (!skb)
1243 return NULL;
1245 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1246 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1247 lh->cid = cpu_to_le16(0x0001);
1249 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1250 cmd->code = code;
1251 cmd->ident = ident;
1252 cmd->len = cpu_to_le16(dlen);
1254 if (dlen) {
1255 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1256 memcpy(skb_put(skb, count), data, count);
1257 data += count;
1260 len -= skb->len;
1262 /* Continuation fragments (no L2CAP header) */
1263 frag = &skb_shinfo(skb)->frag_list;
1264 while (len) {
1265 count = min_t(unsigned int, conn->mtu, len);
1267 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1268 if (!*frag)
1269 goto fail;
1271 memcpy(skb_put(*frag, count), data, count);
1273 len -= count;
1274 data += count;
1276 frag = &(*frag)->next;
1279 return skb;
1281 fail:
1282 kfree_skb(skb);
1283 return NULL;
1286 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1288 struct l2cap_conf_opt *opt = *ptr;
1289 int len;
1291 len = L2CAP_CONF_OPT_SIZE + opt->len;
1292 *ptr += len;
1294 *type = opt->type;
1295 *olen = opt->len;
1297 switch (opt->len) {
1298 case 1:
1299 *val = *((u8 *) opt->val);
1300 break;
1302 case 2:
1303 *val = __le16_to_cpu(*((__le16 *) opt->val));
1304 break;
1306 case 4:
1307 *val = __le32_to_cpu(*((__le32 *) opt->val));
1308 break;
1310 default:
1311 *val = (unsigned long) opt->val;
1312 break;
1315 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1316 return len;
1319 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1321 struct l2cap_conf_opt *opt = *ptr;
1323 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1325 opt->type = type;
1326 opt->len = len;
1328 switch (len) {
1329 case 1:
1330 *((u8 *) opt->val) = val;
1331 break;
1333 case 2:
1334 *((__le16 *) opt->val) = cpu_to_le16(val);
1335 break;
1337 case 4:
1338 *((__le32 *) opt->val) = cpu_to_le32(val);
1339 break;
1341 default:
1342 memcpy(opt->val, (void *) val, len);
1343 break;
1346 *ptr += L2CAP_CONF_OPT_SIZE + len;
1349 static int l2cap_build_conf_req(struct sock *sk, void *data)
1351 struct l2cap_pinfo *pi = l2cap_pi(sk);
1352 struct l2cap_conf_req *req = data;
1353 void *ptr = req->data;
1355 BT_DBG("sk %p", sk);
1357 if (pi->imtu != L2CAP_DEFAULT_MTU)
1358 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1360 /* FIXME: Need actual value of the flush timeout */
1361 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1362 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1364 req->dcid = cpu_to_le16(pi->dcid);
1365 req->flags = cpu_to_le16(0);
1367 return ptr - data;
1370 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1372 struct l2cap_pinfo *pi = l2cap_pi(sk);
1373 struct l2cap_conf_rsp *rsp = data;
1374 void *ptr = rsp->data;
1375 void *req = pi->conf_req;
1376 int len = pi->conf_len;
1377 int type, hint, olen;
1378 unsigned long val;
1379 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1380 u16 mtu = L2CAP_DEFAULT_MTU;
1381 u16 result = L2CAP_CONF_SUCCESS;
1383 BT_DBG("sk %p", sk);
1385 while (len >= L2CAP_CONF_OPT_SIZE) {
1386 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1388 hint = type & 0x80;
1389 type &= 0x7f;
1391 switch (type) {
1392 case L2CAP_CONF_MTU:
1393 mtu = val;
1394 break;
1396 case L2CAP_CONF_FLUSH_TO:
1397 pi->flush_to = val;
1398 break;
1400 case L2CAP_CONF_QOS:
1401 break;
1403 case L2CAP_CONF_RFC:
1404 if (olen == sizeof(rfc))
1405 memcpy(&rfc, (void *) val, olen);
1406 break;
1408 default:
1409 if (hint)
1410 break;
1412 result = L2CAP_CONF_UNKNOWN;
1413 *((u8 *) ptr++) = type;
1414 break;
1418 if (result == L2CAP_CONF_SUCCESS) {
1419 /* Configure output options and let the other side know
1420 * which ones we don't like. */
1422 if (rfc.mode == L2CAP_MODE_BASIC) {
1423 if (mtu < pi->omtu)
1424 result = L2CAP_CONF_UNACCEPT;
1425 else {
1426 pi->omtu = mtu;
1427 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1431 } else {
1432 result = L2CAP_CONF_UNACCEPT;
1434 memset(&rfc, 0, sizeof(rfc));
1435 rfc.mode = L2CAP_MODE_BASIC;
1437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1438 sizeof(rfc), (unsigned long) &rfc);
1442 rsp->scid = cpu_to_le16(pi->dcid);
1443 rsp->result = cpu_to_le16(result);
1444 rsp->flags = cpu_to_le16(0x0000);
1446 return ptr - data;
1449 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1451 struct l2cap_conf_rsp *rsp = data;
1452 void *ptr = rsp->data;
1454 BT_DBG("sk %p", sk);
1456 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1457 rsp->result = cpu_to_le16(result);
1458 rsp->flags = cpu_to_le16(flags);
1460 return ptr - data;
1463 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1465 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1467 if (rej->reason != 0x0000)
1468 return 0;
1470 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1471 cmd->ident == conn->info_ident) {
1472 conn->info_ident = 0;
1473 del_timer(&conn->info_timer);
1474 l2cap_conn_start(conn);
1477 return 0;
1480 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1482 struct l2cap_chan_list *list = &conn->chan_list;
1483 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1484 struct l2cap_conn_rsp rsp;
1485 struct sock *sk, *parent;
1486 int result = 0, status = 0;
1488 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1489 __le16 psm = req->psm;
1491 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1493 /* Check if we have socket listening on psm */
1494 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1495 if (!parent) {
1496 result = L2CAP_CR_BAD_PSM;
1497 goto sendresp;
1500 result = L2CAP_CR_NO_MEM;
1502 /* Check for backlog size */
1503 if (sk_acceptq_is_full(parent)) {
1504 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1505 goto response;
1508 sk = l2cap_sock_alloc(parent->sk_net, NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1509 if (!sk)
1510 goto response;
1512 write_lock_bh(&list->lock);
1514 /* Check if we already have channel with that dcid */
1515 if (__l2cap_get_chan_by_dcid(list, scid)) {
1516 write_unlock_bh(&list->lock);
1517 sock_set_flag(sk, SOCK_ZAPPED);
1518 l2cap_sock_kill(sk);
1519 goto response;
1522 hci_conn_hold(conn->hcon);
1524 l2cap_sock_init(sk, parent);
1525 bacpy(&bt_sk(sk)->src, conn->src);
1526 bacpy(&bt_sk(sk)->dst, conn->dst);
1527 l2cap_pi(sk)->psm = psm;
1528 l2cap_pi(sk)->dcid = scid;
1530 __l2cap_chan_add(conn, sk, parent);
1531 dcid = l2cap_pi(sk)->scid;
1533 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1535 /* Service level security */
1536 result = L2CAP_CR_PEND;
1537 status = L2CAP_CS_AUTHEN_PEND;
1538 sk->sk_state = BT_CONNECT2;
1539 l2cap_pi(sk)->ident = cmd->ident;
1541 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1542 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1543 if (!hci_conn_encrypt(conn->hcon))
1544 goto done;
1545 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1546 if (!hci_conn_auth(conn->hcon))
1547 goto done;
1550 sk->sk_state = BT_CONFIG;
1551 result = status = 0;
1553 done:
1554 write_unlock_bh(&list->lock);
1556 response:
1557 bh_unlock_sock(parent);
1559 sendresp:
1560 rsp.scid = cpu_to_le16(scid);
1561 rsp.dcid = cpu_to_le16(dcid);
1562 rsp.result = cpu_to_le16(result);
1563 rsp.status = cpu_to_le16(status);
1564 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1565 return 0;
1568 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1570 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1571 u16 scid, dcid, result, status;
1572 struct sock *sk;
1573 u8 req[128];
1575 scid = __le16_to_cpu(rsp->scid);
1576 dcid = __le16_to_cpu(rsp->dcid);
1577 result = __le16_to_cpu(rsp->result);
1578 status = __le16_to_cpu(rsp->status);
1580 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1582 if (scid) {
1583 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1584 return 0;
1585 } else {
1586 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1587 return 0;
1590 switch (result) {
1591 case L2CAP_CR_SUCCESS:
1592 sk->sk_state = BT_CONFIG;
1593 l2cap_pi(sk)->ident = 0;
1594 l2cap_pi(sk)->dcid = dcid;
1595 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1597 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1598 l2cap_build_conf_req(sk, req), req);
1599 break;
1601 case L2CAP_CR_PEND:
1602 break;
1604 default:
1605 l2cap_chan_del(sk, ECONNREFUSED);
1606 break;
1609 bh_unlock_sock(sk);
1610 return 0;
1613 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1615 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1616 u16 dcid, flags;
1617 u8 rsp[64];
1618 struct sock *sk;
1619 int len;
1621 dcid = __le16_to_cpu(req->dcid);
1622 flags = __le16_to_cpu(req->flags);
1624 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1626 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1627 return -ENOENT;
1629 if (sk->sk_state == BT_DISCONN)
1630 goto unlock;
1632 /* Reject if config buffer is too small. */
1633 len = cmd_len - sizeof(*req);
1634 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1635 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1636 l2cap_build_conf_rsp(sk, rsp,
1637 L2CAP_CONF_REJECT, flags), rsp);
1638 goto unlock;
1641 /* Store config. */
1642 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1643 l2cap_pi(sk)->conf_len += len;
1645 if (flags & 0x0001) {
1646 /* Incomplete config. Send empty response. */
1647 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1648 l2cap_build_conf_rsp(sk, rsp,
1649 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1650 goto unlock;
1653 /* Complete config. */
1654 len = l2cap_parse_conf_req(sk, rsp);
1655 if (len < 0)
1656 goto unlock;
1658 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1660 /* Reset config buffer. */
1661 l2cap_pi(sk)->conf_len = 0;
1663 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1664 goto unlock;
1666 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1667 sk->sk_state = BT_CONNECTED;
1668 l2cap_chan_ready(sk);
1669 goto unlock;
1672 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1673 u8 req[64];
1674 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1675 l2cap_build_conf_req(sk, req), req);
1678 unlock:
1679 bh_unlock_sock(sk);
1680 return 0;
1683 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1685 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1686 u16 scid, flags, result;
1687 struct sock *sk;
1689 scid = __le16_to_cpu(rsp->scid);
1690 flags = __le16_to_cpu(rsp->flags);
1691 result = __le16_to_cpu(rsp->result);
1693 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1695 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1696 return 0;
1698 switch (result) {
1699 case L2CAP_CONF_SUCCESS:
1700 break;
1702 case L2CAP_CONF_UNACCEPT:
1703 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1704 char req[128];
1705 /* It does not make sense to adjust L2CAP parameters
1706 * that are currently defined in the spec. We simply
1707 * resend config request that we sent earlier. It is
1708 * stupid, but it helps qualification testing which
1709 * expects at least some response from us. */
1710 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1711 l2cap_build_conf_req(sk, req), req);
1712 goto done;
1715 default:
1716 sk->sk_state = BT_DISCONN;
1717 sk->sk_err = ECONNRESET;
1718 l2cap_sock_set_timer(sk, HZ * 5);
1720 struct l2cap_disconn_req req;
1721 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1722 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1723 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1724 L2CAP_DISCONN_REQ, sizeof(req), &req);
1726 goto done;
1729 if (flags & 0x01)
1730 goto done;
1732 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1734 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1735 sk->sk_state = BT_CONNECTED;
1736 l2cap_chan_ready(sk);
1739 done:
1740 bh_unlock_sock(sk);
1741 return 0;
1744 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1746 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1747 struct l2cap_disconn_rsp rsp;
1748 u16 dcid, scid;
1749 struct sock *sk;
1751 scid = __le16_to_cpu(req->scid);
1752 dcid = __le16_to_cpu(req->dcid);
1754 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1756 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1757 return 0;
1759 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1760 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1761 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1763 sk->sk_shutdown = SHUTDOWN_MASK;
1765 l2cap_chan_del(sk, ECONNRESET);
1766 bh_unlock_sock(sk);
1768 l2cap_sock_kill(sk);
1769 return 0;
1772 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1774 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1775 u16 dcid, scid;
1776 struct sock *sk;
1778 scid = __le16_to_cpu(rsp->scid);
1779 dcid = __le16_to_cpu(rsp->dcid);
1781 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1783 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1784 return 0;
1786 l2cap_chan_del(sk, 0);
1787 bh_unlock_sock(sk);
1789 l2cap_sock_kill(sk);
1790 return 0;
1793 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1795 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1796 u16 type;
1798 type = __le16_to_cpu(req->type);
1800 BT_DBG("type 0x%4.4x", type);
1802 if (type == L2CAP_IT_FEAT_MASK) {
1803 u8 buf[8];
1804 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1805 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1806 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1807 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1808 l2cap_send_cmd(conn, cmd->ident,
1809 L2CAP_INFO_RSP, sizeof(buf), buf);
1810 } else {
1811 struct l2cap_info_rsp rsp;
1812 rsp.type = cpu_to_le16(type);
1813 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1814 l2cap_send_cmd(conn, cmd->ident,
1815 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1818 return 0;
1821 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1823 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1824 u16 type, result;
1826 type = __le16_to_cpu(rsp->type);
1827 result = __le16_to_cpu(rsp->result);
1829 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1831 conn->info_ident = 0;
1833 del_timer(&conn->info_timer);
1835 if (type == L2CAP_IT_FEAT_MASK)
1836 conn->feat_mask = __le32_to_cpu(get_unaligned((__le32 *) rsp->data));
1838 l2cap_conn_start(conn);
1840 return 0;
1843 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1845 u8 *data = skb->data;
1846 int len = skb->len;
1847 struct l2cap_cmd_hdr cmd;
1848 int err = 0;
1850 l2cap_raw_recv(conn, skb);
1852 while (len >= L2CAP_CMD_HDR_SIZE) {
1853 u16 cmd_len;
1854 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1855 data += L2CAP_CMD_HDR_SIZE;
1856 len -= L2CAP_CMD_HDR_SIZE;
1858 cmd_len = le16_to_cpu(cmd.len);
1860 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1862 if (cmd_len > len || !cmd.ident) {
1863 BT_DBG("corrupted command");
1864 break;
1867 switch (cmd.code) {
1868 case L2CAP_COMMAND_REJ:
1869 l2cap_command_rej(conn, &cmd, data);
1870 break;
1872 case L2CAP_CONN_REQ:
1873 err = l2cap_connect_req(conn, &cmd, data);
1874 break;
1876 case L2CAP_CONN_RSP:
1877 err = l2cap_connect_rsp(conn, &cmd, data);
1878 break;
1880 case L2CAP_CONF_REQ:
1881 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1882 break;
1884 case L2CAP_CONF_RSP:
1885 err = l2cap_config_rsp(conn, &cmd, data);
1886 break;
1888 case L2CAP_DISCONN_REQ:
1889 err = l2cap_disconnect_req(conn, &cmd, data);
1890 break;
1892 case L2CAP_DISCONN_RSP:
1893 err = l2cap_disconnect_rsp(conn, &cmd, data);
1894 break;
1896 case L2CAP_ECHO_REQ:
1897 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1898 break;
1900 case L2CAP_ECHO_RSP:
1901 break;
1903 case L2CAP_INFO_REQ:
1904 err = l2cap_information_req(conn, &cmd, data);
1905 break;
1907 case L2CAP_INFO_RSP:
1908 err = l2cap_information_rsp(conn, &cmd, data);
1909 break;
1911 default:
1912 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1913 err = -EINVAL;
1914 break;
1917 if (err) {
1918 struct l2cap_cmd_rej rej;
1919 BT_DBG("error %d", err);
1921 /* FIXME: Map err to a valid reason */
1922 rej.reason = cpu_to_le16(0);
1923 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1926 data += cmd_len;
1927 len -= cmd_len;
1930 kfree_skb(skb);
1933 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1935 struct sock *sk;
1937 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1938 if (!sk) {
1939 BT_DBG("unknown cid 0x%4.4x", cid);
1940 goto drop;
1943 BT_DBG("sk %p, len %d", sk, skb->len);
1945 if (sk->sk_state != BT_CONNECTED)
1946 goto drop;
1948 if (l2cap_pi(sk)->imtu < skb->len)
1949 goto drop;
1951 /* If socket recv buffers overflows we drop data here
1952 * which is *bad* because L2CAP has to be reliable.
1953 * But we don't have any other choice. L2CAP doesn't
1954 * provide flow control mechanism. */
1956 if (!sock_queue_rcv_skb(sk, skb))
1957 goto done;
1959 drop:
1960 kfree_skb(skb);
1962 done:
1963 if (sk)
1964 bh_unlock_sock(sk);
1966 return 0;
1969 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
1971 struct sock *sk;
1973 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1974 if (!sk)
1975 goto drop;
1977 BT_DBG("sk %p, len %d", sk, skb->len);
1979 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1980 goto drop;
1982 if (l2cap_pi(sk)->imtu < skb->len)
1983 goto drop;
1985 if (!sock_queue_rcv_skb(sk, skb))
1986 goto done;
1988 drop:
1989 kfree_skb(skb);
1991 done:
1992 if (sk) bh_unlock_sock(sk);
1993 return 0;
1996 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1998 struct l2cap_hdr *lh = (void *) skb->data;
1999 u16 cid, len;
2000 __le16 psm;
2002 skb_pull(skb, L2CAP_HDR_SIZE);
2003 cid = __le16_to_cpu(lh->cid);
2004 len = __le16_to_cpu(lh->len);
2006 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2008 switch (cid) {
2009 case 0x0001:
2010 l2cap_sig_channel(conn, skb);
2011 break;
2013 case 0x0002:
2014 psm = get_unaligned((__le16 *) skb->data);
2015 skb_pull(skb, 2);
2016 l2cap_conless_channel(conn, psm, skb);
2017 break;
2019 default:
2020 l2cap_data_channel(conn, cid, skb);
2021 break;
2025 /* ---- L2CAP interface with lower layer (HCI) ---- */
2027 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2029 int exact = 0, lm1 = 0, lm2 = 0;
2030 register struct sock *sk;
2031 struct hlist_node *node;
2033 if (type != ACL_LINK)
2034 return 0;
2036 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2038 /* Find listening sockets and check their link_mode */
2039 read_lock(&l2cap_sk_list.lock);
2040 sk_for_each(sk, node, &l2cap_sk_list.head) {
2041 if (sk->sk_state != BT_LISTEN)
2042 continue;
2044 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2045 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2046 exact++;
2047 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2048 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2050 read_unlock(&l2cap_sk_list.lock);
2052 return exact ? lm1 : lm2;
2055 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2057 struct l2cap_conn *conn;
2059 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2061 if (hcon->type != ACL_LINK)
2062 return 0;
2064 if (!status) {
2065 conn = l2cap_conn_add(hcon, status);
2066 if (conn)
2067 l2cap_conn_ready(conn);
2068 } else
2069 l2cap_conn_del(hcon, bt_err(status));
2071 return 0;
2074 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2076 BT_DBG("hcon %p reason %d", hcon, reason);
2078 if (hcon->type != ACL_LINK)
2079 return 0;
2081 l2cap_conn_del(hcon, bt_err(reason));
2083 return 0;
2086 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2088 struct l2cap_chan_list *l;
2089 struct l2cap_conn *conn = conn = hcon->l2cap_data;
2090 struct l2cap_conn_rsp rsp;
2091 struct sock *sk;
2092 int result;
2094 if (!conn)
2095 return 0;
2097 l = &conn->chan_list;
2099 BT_DBG("conn %p", conn);
2101 read_lock(&l->lock);
2103 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2104 bh_lock_sock(sk);
2106 if (sk->sk_state != BT_CONNECT2 ||
2107 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
2108 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
2109 bh_unlock_sock(sk);
2110 continue;
2113 if (!status) {
2114 sk->sk_state = BT_CONFIG;
2115 result = 0;
2116 } else {
2117 sk->sk_state = BT_DISCONN;
2118 l2cap_sock_set_timer(sk, HZ/10);
2119 result = L2CAP_CR_SEC_BLOCK;
2122 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2123 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2124 rsp.result = cpu_to_le16(result);
2125 rsp.status = cpu_to_le16(0);
2126 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2127 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2129 bh_unlock_sock(sk);
2132 read_unlock(&l->lock);
2133 return 0;
2136 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2138 struct l2cap_chan_list *l;
2139 struct l2cap_conn *conn = hcon->l2cap_data;
2140 struct l2cap_conn_rsp rsp;
2141 struct sock *sk;
2142 int result;
2144 if (!conn)
2145 return 0;
2147 l = &conn->chan_list;
2149 BT_DBG("conn %p", conn);
2151 read_lock(&l->lock);
2153 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2154 bh_lock_sock(sk);
2156 if (sk->sk_state != BT_CONNECT2) {
2157 bh_unlock_sock(sk);
2158 continue;
2161 if (!status) {
2162 sk->sk_state = BT_CONFIG;
2163 result = 0;
2164 } else {
2165 sk->sk_state = BT_DISCONN;
2166 l2cap_sock_set_timer(sk, HZ/10);
2167 result = L2CAP_CR_SEC_BLOCK;
2170 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2171 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2172 rsp.result = cpu_to_le16(result);
2173 rsp.status = cpu_to_le16(0);
2174 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2175 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2177 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2178 hci_conn_change_link_key(hcon);
2180 bh_unlock_sock(sk);
2183 read_unlock(&l->lock);
2184 return 0;
2187 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2189 struct l2cap_conn *conn = hcon->l2cap_data;
2191 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2192 goto drop;
2194 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2196 if (flags & ACL_START) {
2197 struct l2cap_hdr *hdr;
2198 int len;
2200 if (conn->rx_len) {
2201 BT_ERR("Unexpected start frame (len %d)", skb->len);
2202 kfree_skb(conn->rx_skb);
2203 conn->rx_skb = NULL;
2204 conn->rx_len = 0;
2205 l2cap_conn_unreliable(conn, ECOMM);
2208 if (skb->len < 2) {
2209 BT_ERR("Frame is too short (len %d)", skb->len);
2210 l2cap_conn_unreliable(conn, ECOMM);
2211 goto drop;
2214 hdr = (struct l2cap_hdr *) skb->data;
2215 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2217 if (len == skb->len) {
2218 /* Complete frame received */
2219 l2cap_recv_frame(conn, skb);
2220 return 0;
2223 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2225 if (skb->len > len) {
2226 BT_ERR("Frame is too long (len %d, expected len %d)",
2227 skb->len, len);
2228 l2cap_conn_unreliable(conn, ECOMM);
2229 goto drop;
2232 /* Allocate skb for the complete frame (with header) */
2233 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2234 goto drop;
2236 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2237 skb->len);
2238 conn->rx_len = len - skb->len;
2239 } else {
2240 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2242 if (!conn->rx_len) {
2243 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2244 l2cap_conn_unreliable(conn, ECOMM);
2245 goto drop;
2248 if (skb->len > conn->rx_len) {
2249 BT_ERR("Fragment is too long (len %d, expected %d)",
2250 skb->len, conn->rx_len);
2251 kfree_skb(conn->rx_skb);
2252 conn->rx_skb = NULL;
2253 conn->rx_len = 0;
2254 l2cap_conn_unreliable(conn, ECOMM);
2255 goto drop;
2258 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2259 skb->len);
2260 conn->rx_len -= skb->len;
2262 if (!conn->rx_len) {
2263 /* Complete frame received */
2264 l2cap_recv_frame(conn, conn->rx_skb);
2265 conn->rx_skb = NULL;
2269 drop:
2270 kfree_skb(skb);
2271 return 0;
2274 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2276 struct sock *sk;
2277 struct hlist_node *node;
2278 char *str = buf;
2280 read_lock_bh(&l2cap_sk_list.lock);
2282 sk_for_each(sk, node, &l2cap_sk_list.head) {
2283 struct l2cap_pinfo *pi = l2cap_pi(sk);
2285 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2286 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2287 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2288 pi->imtu, pi->omtu, pi->link_mode);
2291 read_unlock_bh(&l2cap_sk_list.lock);
2293 return (str - buf);
2296 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2298 static const struct proto_ops l2cap_sock_ops = {
2299 .family = PF_BLUETOOTH,
2300 .owner = THIS_MODULE,
2301 .release = l2cap_sock_release,
2302 .bind = l2cap_sock_bind,
2303 .connect = l2cap_sock_connect,
2304 .listen = l2cap_sock_listen,
2305 .accept = l2cap_sock_accept,
2306 .getname = l2cap_sock_getname,
2307 .sendmsg = l2cap_sock_sendmsg,
2308 .recvmsg = bt_sock_recvmsg,
2309 .poll = bt_sock_poll,
2310 .mmap = sock_no_mmap,
2311 .socketpair = sock_no_socketpair,
2312 .ioctl = sock_no_ioctl,
2313 .shutdown = l2cap_sock_shutdown,
2314 .setsockopt = l2cap_sock_setsockopt,
2315 .getsockopt = l2cap_sock_getsockopt
2318 static struct net_proto_family l2cap_sock_family_ops = {
2319 .family = PF_BLUETOOTH,
2320 .owner = THIS_MODULE,
2321 .create = l2cap_sock_create,
2324 static struct hci_proto l2cap_hci_proto = {
2325 .name = "L2CAP",
2326 .id = HCI_PROTO_L2CAP,
2327 .connect_ind = l2cap_connect_ind,
2328 .connect_cfm = l2cap_connect_cfm,
2329 .disconn_ind = l2cap_disconn_ind,
2330 .auth_cfm = l2cap_auth_cfm,
2331 .encrypt_cfm = l2cap_encrypt_cfm,
2332 .recv_acldata = l2cap_recv_acldata
2335 static int __init l2cap_init(void)
2337 int err;
2339 err = proto_register(&l2cap_proto, 0);
2340 if (err < 0)
2341 return err;
2343 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2344 if (err < 0) {
2345 BT_ERR("L2CAP socket registration failed");
2346 goto error;
2349 err = hci_register_proto(&l2cap_hci_proto);
2350 if (err < 0) {
2351 BT_ERR("L2CAP protocol registration failed");
2352 bt_sock_unregister(BTPROTO_L2CAP);
2353 goto error;
2356 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2357 BT_ERR("Failed to create L2CAP info file");
2359 BT_INFO("L2CAP ver %s", VERSION);
2360 BT_INFO("L2CAP socket layer initialized");
2362 return 0;
2364 error:
2365 proto_unregister(&l2cap_proto);
2366 return err;
2369 static void __exit l2cap_exit(void)
2371 class_remove_file(bt_class, &class_attr_l2cap);
2373 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2374 BT_ERR("L2CAP socket unregistration failed");
2376 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2377 BT_ERR("L2CAP protocol unregistration failed");
2379 proto_unregister(&l2cap_proto);
2382 void l2cap_load(void)
2384 /* Dummy function to trigger automatic L2CAP module loading by
2385 * other modules that use L2CAP sockets but don't use any other
2386 * symbols from it. */
2387 return;
2389 EXPORT_SYMBOL(l2cap_load);
2391 module_init(l2cap_init);
2392 module_exit(l2cap_exit);
2394 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2395 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2396 MODULE_VERSION(VERSION);
2397 MODULE_LICENSE("GPL");
2398 MODULE_ALIAS("bt-proto-0");