bluetooth: delete timer in l2cap_conn_del()
[linux-2.6/mini2440.git] / net / bluetooth / l2cap.c
blob7c5459c8e8efdf9e7e9e51d485c9f9ff99eb20f9
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
58 #define VERSION "2.9"
60 static u32 l2cap_feat_mask = 0x0000;
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
82 bh_lock_sock(sk);
83 __l2cap_sock_close(sk, ETIMEDOUT);
84 bh_unlock_sock(sk);
86 l2cap_sock_kill(sk);
87 sock_put(sk);
90 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
92 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
93 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
96 static void l2cap_sock_clear_timer(struct sock *sk)
98 BT_DBG("sock %p state %d", sk, sk->sk_state);
99 sk_stop_timer(sk, &sk->sk_timer);
102 /* ---- L2CAP channels ---- */
103 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
105 struct sock *s;
106 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
107 if (l2cap_pi(s)->dcid == cid)
108 break;
110 return s;
113 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->scid == cid)
118 break;
120 return s;
123 /* Find channel with given SCID.
124 * Returns locked socket */
125 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
127 struct sock *s;
128 read_lock(&l->lock);
129 s = __l2cap_get_chan_by_scid(l, cid);
130 if (s) bh_lock_sock(s);
131 read_unlock(&l->lock);
132 return s;
135 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
137 struct sock *s;
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->ident == ident)
140 break;
142 return s;
145 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
147 struct sock *s;
148 read_lock(&l->lock);
149 s = __l2cap_get_chan_by_ident(l, ident);
150 if (s) bh_lock_sock(s);
151 read_unlock(&l->lock);
152 return s;
155 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
157 u16 cid = 0x0040;
159 for (; cid < 0xffff; cid++) {
160 if(!__l2cap_get_chan_by_scid(l, cid))
161 return cid;
164 return 0;
167 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
169 sock_hold(sk);
171 if (l->head)
172 l2cap_pi(l->head)->prev_c = sk;
174 l2cap_pi(sk)->next_c = l->head;
175 l2cap_pi(sk)->prev_c = NULL;
176 l->head = sk;
179 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
181 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
183 write_lock_bh(&l->lock);
184 if (sk == l->head)
185 l->head = next;
187 if (next)
188 l2cap_pi(next)->prev_c = prev;
189 if (prev)
190 l2cap_pi(prev)->next_c = next;
191 write_unlock_bh(&l->lock);
193 __sock_put(sk);
196 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
198 struct l2cap_chan_list *l = &conn->chan_list;
200 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
202 l2cap_pi(sk)->conn = conn;
204 if (sk->sk_type == SOCK_SEQPACKET) {
205 /* Alloc CID for connection-oriented socket */
206 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
207 } else if (sk->sk_type == SOCK_DGRAM) {
208 /* Connectionless socket */
209 l2cap_pi(sk)->scid = 0x0002;
210 l2cap_pi(sk)->dcid = 0x0002;
211 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
212 } else {
213 /* Raw socket can send/recv signalling messages only */
214 l2cap_pi(sk)->scid = 0x0001;
215 l2cap_pi(sk)->dcid = 0x0001;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
219 __l2cap_chan_link(l, sk);
221 if (parent)
222 bt_accept_enqueue(parent, sk);
225 /* Delete channel.
226 * Must be called on the locked socket. */
227 static void l2cap_chan_del(struct sock *sk, int err)
229 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
230 struct sock *parent = bt_sk(sk)->parent;
232 l2cap_sock_clear_timer(sk);
234 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
236 if (conn) {
237 /* Unlink from channel list */
238 l2cap_chan_unlink(&conn->chan_list, sk);
239 l2cap_pi(sk)->conn = NULL;
240 hci_conn_put(conn->hcon);
243 sk->sk_state = BT_CLOSED;
244 sock_set_flag(sk, SOCK_ZAPPED);
246 if (err)
247 sk->sk_err = err;
249 if (parent) {
250 bt_accept_unlink(sk);
251 parent->sk_data_ready(parent, 0);
252 } else
253 sk->sk_state_change(sk);
256 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
258 u8 id;
260 /* Get next available identificator.
261 * 1 - 128 are used by kernel.
262 * 129 - 199 are reserved.
263 * 200 - 254 are used by utilities like l2ping, etc.
266 spin_lock_bh(&conn->lock);
268 if (++conn->tx_ident > 128)
269 conn->tx_ident = 1;
271 id = conn->tx_ident;
273 spin_unlock_bh(&conn->lock);
275 return id;
278 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
280 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
282 BT_DBG("code 0x%2.2x", code);
284 if (!skb)
285 return -ENOMEM;
287 return hci_send_acl(conn->hcon, skb, 0);
290 /* ---- L2CAP connections ---- */
291 static void l2cap_conn_start(struct l2cap_conn *conn)
293 struct l2cap_chan_list *l = &conn->chan_list;
294 struct sock *sk;
296 BT_DBG("conn %p", conn);
298 read_lock(&l->lock);
300 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
301 bh_lock_sock(sk);
303 if (sk->sk_type != SOCK_SEQPACKET) {
304 l2cap_sock_clear_timer(sk);
305 sk->sk_state = BT_CONNECTED;
306 sk->sk_state_change(sk);
307 } else if (sk->sk_state == BT_CONNECT) {
308 struct l2cap_conn_req req;
309 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
310 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
311 req.psm = l2cap_pi(sk)->psm;
312 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
313 L2CAP_CONN_REQ, sizeof(req), &req);
316 bh_unlock_sock(sk);
319 read_unlock(&l->lock);
322 static void l2cap_conn_ready(struct l2cap_conn *conn)
324 BT_DBG("conn %p", conn);
326 if (conn->chan_list.head || !hlist_empty(&l2cap_sk_list.head)) {
327 struct l2cap_info_req req;
329 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
331 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
332 conn->info_ident = l2cap_get_ident(conn);
334 mod_timer(&conn->info_timer,
335 jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
337 l2cap_send_cmd(conn, conn->info_ident,
338 L2CAP_INFO_REQ, sizeof(req), &req);
342 /* Notify sockets that we cannot guaranty reliability anymore */
343 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
345 struct l2cap_chan_list *l = &conn->chan_list;
346 struct sock *sk;
348 BT_DBG("conn %p", conn);
350 read_lock(&l->lock);
352 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
353 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
354 sk->sk_err = err;
357 read_unlock(&l->lock);
360 static void l2cap_info_timeout(unsigned long arg)
362 struct l2cap_conn *conn = (void *) arg;
364 conn->info_ident = 0;
366 l2cap_conn_start(conn);
369 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
371 struct l2cap_conn *conn = hcon->l2cap_data;
373 if (conn || status)
374 return conn;
376 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
377 if (!conn)
378 return NULL;
380 hcon->l2cap_data = conn;
381 conn->hcon = hcon;
383 BT_DBG("hcon %p conn %p", hcon, conn);
385 conn->mtu = hcon->hdev->acl_mtu;
386 conn->src = &hcon->hdev->bdaddr;
387 conn->dst = &hcon->dst;
389 conn->feat_mask = 0;
391 setup_timer(&conn->info_timer, l2cap_info_timeout, (unsigned long)conn);
393 spin_lock_init(&conn->lock);
394 rwlock_init(&conn->chan_list.lock);
396 return conn;
399 static void l2cap_conn_del(struct hci_conn *hcon, int err)
401 struct l2cap_conn *conn = hcon->l2cap_data;
402 struct sock *sk;
404 if (!conn)
405 return;
407 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
409 if (conn->rx_skb)
410 kfree_skb(conn->rx_skb);
412 /* Kill channels */
413 while ((sk = conn->chan_list.head)) {
414 bh_lock_sock(sk);
415 l2cap_chan_del(sk, err);
416 bh_unlock_sock(sk);
417 l2cap_sock_kill(sk);
420 del_timer_sync(&conn->info_timer);
422 hcon->l2cap_data = NULL;
423 kfree(conn);
426 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
428 struct l2cap_chan_list *l = &conn->chan_list;
429 write_lock_bh(&l->lock);
430 __l2cap_chan_add(conn, sk, parent);
431 write_unlock_bh(&l->lock);
434 /* ---- Socket interface ---- */
435 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
437 struct sock *sk;
438 struct hlist_node *node;
439 sk_for_each(sk, node, &l2cap_sk_list.head)
440 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
441 goto found;
442 sk = NULL;
443 found:
444 return sk;
447 /* Find socket with psm and source bdaddr.
448 * Returns closest match.
450 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
452 struct sock *sk = NULL, *sk1 = NULL;
453 struct hlist_node *node;
455 sk_for_each(sk, node, &l2cap_sk_list.head) {
456 if (state && sk->sk_state != state)
457 continue;
459 if (l2cap_pi(sk)->psm == psm) {
460 /* Exact match. */
461 if (!bacmp(&bt_sk(sk)->src, src))
462 break;
464 /* Closest match */
465 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
466 sk1 = sk;
469 return node ? sk : sk1;
472 /* Find socket with given address (psm, src).
473 * Returns locked socket */
474 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
476 struct sock *s;
477 read_lock(&l2cap_sk_list.lock);
478 s = __l2cap_get_sock_by_psm(state, psm, src);
479 if (s) bh_lock_sock(s);
480 read_unlock(&l2cap_sk_list.lock);
481 return s;
484 static void l2cap_sock_destruct(struct sock *sk)
486 BT_DBG("sk %p", sk);
488 skb_queue_purge(&sk->sk_receive_queue);
489 skb_queue_purge(&sk->sk_write_queue);
492 static void l2cap_sock_cleanup_listen(struct sock *parent)
494 struct sock *sk;
496 BT_DBG("parent %p", parent);
498 /* Close not yet accepted channels */
499 while ((sk = bt_accept_dequeue(parent, NULL)))
500 l2cap_sock_close(sk);
502 parent->sk_state = BT_CLOSED;
503 sock_set_flag(parent, SOCK_ZAPPED);
506 /* Kill socket (only if zapped and orphan)
507 * Must be called on unlocked socket.
509 static void l2cap_sock_kill(struct sock *sk)
511 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
512 return;
514 BT_DBG("sk %p state %d", sk, sk->sk_state);
516 /* Kill poor orphan */
517 bt_sock_unlink(&l2cap_sk_list, sk);
518 sock_set_flag(sk, SOCK_DEAD);
519 sock_put(sk);
522 static void __l2cap_sock_close(struct sock *sk, int reason)
524 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
526 switch (sk->sk_state) {
527 case BT_LISTEN:
528 l2cap_sock_cleanup_listen(sk);
529 break;
531 case BT_CONNECTED:
532 case BT_CONFIG:
533 case BT_CONNECT2:
534 if (sk->sk_type == SOCK_SEQPACKET) {
535 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
536 struct l2cap_disconn_req req;
538 sk->sk_state = BT_DISCONN;
539 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
541 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
542 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
543 l2cap_send_cmd(conn, l2cap_get_ident(conn),
544 L2CAP_DISCONN_REQ, sizeof(req), &req);
545 } else {
546 l2cap_chan_del(sk, reason);
548 break;
550 case BT_CONNECT:
551 case BT_DISCONN:
552 l2cap_chan_del(sk, reason);
553 break;
555 default:
556 sock_set_flag(sk, SOCK_ZAPPED);
557 break;
561 /* Must be called on unlocked socket. */
562 static void l2cap_sock_close(struct sock *sk)
564 l2cap_sock_clear_timer(sk);
565 lock_sock(sk);
566 __l2cap_sock_close(sk, ECONNRESET);
567 release_sock(sk);
568 l2cap_sock_kill(sk);
571 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
573 struct l2cap_pinfo *pi = l2cap_pi(sk);
575 BT_DBG("sk %p", sk);
577 if (parent) {
578 sk->sk_type = parent->sk_type;
579 pi->imtu = l2cap_pi(parent)->imtu;
580 pi->omtu = l2cap_pi(parent)->omtu;
581 pi->link_mode = l2cap_pi(parent)->link_mode;
582 } else {
583 pi->imtu = L2CAP_DEFAULT_MTU;
584 pi->omtu = 0;
585 pi->link_mode = 0;
588 /* Default config options */
589 pi->conf_len = 0;
590 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
593 static struct proto l2cap_proto = {
594 .name = "L2CAP",
595 .owner = THIS_MODULE,
596 .obj_size = sizeof(struct l2cap_pinfo)
599 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
601 struct sock *sk;
603 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
604 if (!sk)
605 return NULL;
607 sock_init_data(sock, sk);
608 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
610 sk->sk_destruct = l2cap_sock_destruct;
611 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
613 sock_reset_flag(sk, SOCK_ZAPPED);
615 sk->sk_protocol = proto;
616 sk->sk_state = BT_OPEN;
618 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long)sk);
620 bt_sock_link(&l2cap_sk_list, sk);
621 return sk;
624 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
626 struct sock *sk;
628 BT_DBG("sock %p", sock);
630 sock->state = SS_UNCONNECTED;
632 if (sock->type != SOCK_SEQPACKET &&
633 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
634 return -ESOCKTNOSUPPORT;
636 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
637 return -EPERM;
639 sock->ops = &l2cap_sock_ops;
641 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
642 if (!sk)
643 return -ENOMEM;
645 l2cap_sock_init(sk, NULL);
646 return 0;
649 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
651 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
652 struct sock *sk = sock->sk;
653 int err = 0;
655 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
657 if (!addr || addr->sa_family != AF_BLUETOOTH)
658 return -EINVAL;
660 lock_sock(sk);
662 if (sk->sk_state != BT_OPEN) {
663 err = -EBADFD;
664 goto done;
667 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
668 !capable(CAP_NET_BIND_SERVICE)) {
669 err = -EACCES;
670 goto done;
673 write_lock_bh(&l2cap_sk_list.lock);
675 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
676 err = -EADDRINUSE;
677 } else {
678 /* Save source address */
679 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
680 l2cap_pi(sk)->psm = la->l2_psm;
681 l2cap_pi(sk)->sport = la->l2_psm;
682 sk->sk_state = BT_BOUND;
685 write_unlock_bh(&l2cap_sk_list.lock);
687 done:
688 release_sock(sk);
689 return err;
692 static int l2cap_do_connect(struct sock *sk)
694 bdaddr_t *src = &bt_sk(sk)->src;
695 bdaddr_t *dst = &bt_sk(sk)->dst;
696 struct l2cap_conn *conn;
697 struct hci_conn *hcon;
698 struct hci_dev *hdev;
699 int err = 0;
701 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
703 if (!(hdev = hci_get_route(dst, src)))
704 return -EHOSTUNREACH;
706 hci_dev_lock_bh(hdev);
708 err = -ENOMEM;
710 hcon = hci_connect(hdev, ACL_LINK, dst);
711 if (!hcon)
712 goto done;
714 conn = l2cap_conn_add(hcon, 0);
715 if (!conn) {
716 hci_conn_put(hcon);
717 goto done;
720 err = 0;
722 /* Update source addr of the socket */
723 bacpy(src, conn->src);
725 l2cap_chan_add(conn, sk, NULL);
727 sk->sk_state = BT_CONNECT;
728 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
730 if (hcon->state == BT_CONNECTED) {
731 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
732 l2cap_conn_ready(conn);
733 goto done;
736 if (sk->sk_type == SOCK_SEQPACKET) {
737 struct l2cap_conn_req req;
738 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
739 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
740 req.psm = l2cap_pi(sk)->psm;
741 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
742 L2CAP_CONN_REQ, sizeof(req), &req);
743 } else {
744 l2cap_sock_clear_timer(sk);
745 sk->sk_state = BT_CONNECTED;
749 done:
750 hci_dev_unlock_bh(hdev);
751 hci_dev_put(hdev);
752 return err;
755 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
757 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
758 struct sock *sk = sock->sk;
759 int err = 0;
761 lock_sock(sk);
763 BT_DBG("sk %p", sk);
765 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
766 err = -EINVAL;
767 goto done;
770 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
771 err = -EINVAL;
772 goto done;
775 switch(sk->sk_state) {
776 case BT_CONNECT:
777 case BT_CONNECT2:
778 case BT_CONFIG:
779 /* Already connecting */
780 goto wait;
782 case BT_CONNECTED:
783 /* Already connected */
784 goto done;
786 case BT_OPEN:
787 case BT_BOUND:
788 /* Can connect */
789 break;
791 default:
792 err = -EBADFD;
793 goto done;
796 /* Set destination address and psm */
797 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
798 l2cap_pi(sk)->psm = la->l2_psm;
800 if ((err = l2cap_do_connect(sk)))
801 goto done;
803 wait:
804 err = bt_sock_wait_state(sk, BT_CONNECTED,
805 sock_sndtimeo(sk, flags & O_NONBLOCK));
806 done:
807 release_sock(sk);
808 return err;
811 static int l2cap_sock_listen(struct socket *sock, int backlog)
813 struct sock *sk = sock->sk;
814 int err = 0;
816 BT_DBG("sk %p backlog %d", sk, backlog);
818 lock_sock(sk);
820 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
821 err = -EBADFD;
822 goto done;
825 if (!l2cap_pi(sk)->psm) {
826 bdaddr_t *src = &bt_sk(sk)->src;
827 u16 psm;
829 err = -EINVAL;
831 write_lock_bh(&l2cap_sk_list.lock);
833 for (psm = 0x1001; psm < 0x1100; psm += 2)
834 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
835 l2cap_pi(sk)->psm = htobs(psm);
836 l2cap_pi(sk)->sport = htobs(psm);
837 err = 0;
838 break;
841 write_unlock_bh(&l2cap_sk_list.lock);
843 if (err < 0)
844 goto done;
847 sk->sk_max_ack_backlog = backlog;
848 sk->sk_ack_backlog = 0;
849 sk->sk_state = BT_LISTEN;
851 done:
852 release_sock(sk);
853 return err;
856 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
858 DECLARE_WAITQUEUE(wait, current);
859 struct sock *sk = sock->sk, *nsk;
860 long timeo;
861 int err = 0;
863 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
865 if (sk->sk_state != BT_LISTEN) {
866 err = -EBADFD;
867 goto done;
870 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
872 BT_DBG("sk %p timeo %ld", sk, timeo);
874 /* Wait for an incoming connection. (wake-one). */
875 add_wait_queue_exclusive(sk->sk_sleep, &wait);
876 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
877 set_current_state(TASK_INTERRUPTIBLE);
878 if (!timeo) {
879 err = -EAGAIN;
880 break;
883 release_sock(sk);
884 timeo = schedule_timeout(timeo);
885 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
887 if (sk->sk_state != BT_LISTEN) {
888 err = -EBADFD;
889 break;
892 if (signal_pending(current)) {
893 err = sock_intr_errno(timeo);
894 break;
897 set_current_state(TASK_RUNNING);
898 remove_wait_queue(sk->sk_sleep, &wait);
900 if (err)
901 goto done;
903 newsock->state = SS_CONNECTED;
905 BT_DBG("new socket %p", nsk);
907 done:
908 release_sock(sk);
909 return err;
912 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
914 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
915 struct sock *sk = sock->sk;
917 BT_DBG("sock %p, sk %p", sock, sk);
919 addr->sa_family = AF_BLUETOOTH;
920 *len = sizeof(struct sockaddr_l2);
922 if (peer)
923 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
924 else
925 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
927 la->l2_psm = l2cap_pi(sk)->psm;
928 return 0;
931 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
933 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
934 struct sk_buff *skb, **frag;
935 int err, hlen, count, sent=0;
936 struct l2cap_hdr *lh;
938 BT_DBG("sk %p len %d", sk, len);
940 /* First fragment (with L2CAP header) */
941 if (sk->sk_type == SOCK_DGRAM)
942 hlen = L2CAP_HDR_SIZE + 2;
943 else
944 hlen = L2CAP_HDR_SIZE;
946 count = min_t(unsigned int, (conn->mtu - hlen), len);
948 skb = bt_skb_send_alloc(sk, hlen + count,
949 msg->msg_flags & MSG_DONTWAIT, &err);
950 if (!skb)
951 return err;
953 /* Create L2CAP header */
954 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
955 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
956 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
958 if (sk->sk_type == SOCK_DGRAM)
959 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
961 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
962 err = -EFAULT;
963 goto fail;
966 sent += count;
967 len -= count;
969 /* Continuation fragments (no L2CAP header) */
970 frag = &skb_shinfo(skb)->frag_list;
971 while (len) {
972 count = min_t(unsigned int, conn->mtu, len);
974 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
975 if (!*frag)
976 goto fail;
978 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
979 err = -EFAULT;
980 goto fail;
983 sent += count;
984 len -= count;
986 frag = &(*frag)->next;
989 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
990 goto fail;
992 return sent;
994 fail:
995 kfree_skb(skb);
996 return err;
999 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1001 struct sock *sk = sock->sk;
1002 int err = 0;
1004 BT_DBG("sock %p, sk %p", sock, sk);
1006 err = sock_error(sk);
1007 if (err)
1008 return err;
1010 if (msg->msg_flags & MSG_OOB)
1011 return -EOPNOTSUPP;
1013 /* Check outgoing MTU */
1014 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1015 return -EINVAL;
1017 lock_sock(sk);
1019 if (sk->sk_state == BT_CONNECTED)
1020 err = l2cap_do_send(sk, msg, len);
1021 else
1022 err = -ENOTCONN;
1024 release_sock(sk);
1025 return err;
1028 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1030 struct sock *sk = sock->sk;
1031 struct l2cap_options opts;
1032 int err = 0, len;
1033 u32 opt;
1035 BT_DBG("sk %p", sk);
1037 lock_sock(sk);
1039 switch (optname) {
1040 case L2CAP_OPTIONS:
1041 opts.imtu = l2cap_pi(sk)->imtu;
1042 opts.omtu = l2cap_pi(sk)->omtu;
1043 opts.flush_to = l2cap_pi(sk)->flush_to;
1044 opts.mode = L2CAP_MODE_BASIC;
1046 len = min_t(unsigned int, sizeof(opts), optlen);
1047 if (copy_from_user((char *) &opts, optval, len)) {
1048 err = -EFAULT;
1049 break;
1052 l2cap_pi(sk)->imtu = opts.imtu;
1053 l2cap_pi(sk)->omtu = opts.omtu;
1054 break;
1056 case L2CAP_LM:
1057 if (get_user(opt, (u32 __user *) optval)) {
1058 err = -EFAULT;
1059 break;
1062 l2cap_pi(sk)->link_mode = opt;
1063 break;
1065 default:
1066 err = -ENOPROTOOPT;
1067 break;
1070 release_sock(sk);
1071 return err;
1074 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1076 struct sock *sk = sock->sk;
1077 struct l2cap_options opts;
1078 struct l2cap_conninfo cinfo;
1079 int len, err = 0;
1081 BT_DBG("sk %p", sk);
1083 if (get_user(len, optlen))
1084 return -EFAULT;
1086 lock_sock(sk);
1088 switch (optname) {
1089 case L2CAP_OPTIONS:
1090 opts.imtu = l2cap_pi(sk)->imtu;
1091 opts.omtu = l2cap_pi(sk)->omtu;
1092 opts.flush_to = l2cap_pi(sk)->flush_to;
1093 opts.mode = L2CAP_MODE_BASIC;
1095 len = min_t(unsigned int, len, sizeof(opts));
1096 if (copy_to_user(optval, (char *) &opts, len))
1097 err = -EFAULT;
1099 break;
1101 case L2CAP_LM:
1102 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1103 err = -EFAULT;
1104 break;
1106 case L2CAP_CONNINFO:
1107 if (sk->sk_state != BT_CONNECTED) {
1108 err = -ENOTCONN;
1109 break;
1112 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1113 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1115 len = min_t(unsigned int, len, sizeof(cinfo));
1116 if (copy_to_user(optval, (char *) &cinfo, len))
1117 err = -EFAULT;
1119 break;
1121 default:
1122 err = -ENOPROTOOPT;
1123 break;
1126 release_sock(sk);
1127 return err;
1130 static int l2cap_sock_shutdown(struct socket *sock, int how)
1132 struct sock *sk = sock->sk;
1133 int err = 0;
1135 BT_DBG("sock %p, sk %p", sock, sk);
1137 if (!sk)
1138 return 0;
1140 lock_sock(sk);
1141 if (!sk->sk_shutdown) {
1142 sk->sk_shutdown = SHUTDOWN_MASK;
1143 l2cap_sock_clear_timer(sk);
1144 __l2cap_sock_close(sk, 0);
1146 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1147 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1149 release_sock(sk);
1150 return err;
1153 static int l2cap_sock_release(struct socket *sock)
1155 struct sock *sk = sock->sk;
1156 int err;
1158 BT_DBG("sock %p, sk %p", sock, sk);
1160 if (!sk)
1161 return 0;
1163 err = l2cap_sock_shutdown(sock, 2);
1165 sock_orphan(sk);
1166 l2cap_sock_kill(sk);
1167 return err;
1170 static void l2cap_chan_ready(struct sock *sk)
1172 struct sock *parent = bt_sk(sk)->parent;
1174 BT_DBG("sk %p, parent %p", sk, parent);
1176 l2cap_pi(sk)->conf_state = 0;
1177 l2cap_sock_clear_timer(sk);
1179 if (!parent) {
1180 /* Outgoing channel.
1181 * Wake up socket sleeping on connect.
1183 sk->sk_state = BT_CONNECTED;
1184 sk->sk_state_change(sk);
1185 } else {
1186 /* Incoming channel.
1187 * Wake up socket sleeping on accept.
1189 parent->sk_data_ready(parent, 0);
1193 /* Copy frame to all raw sockets on that connection */
1194 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1196 struct l2cap_chan_list *l = &conn->chan_list;
1197 struct sk_buff *nskb;
1198 struct sock * sk;
1200 BT_DBG("conn %p", conn);
1202 read_lock(&l->lock);
1203 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1204 if (sk->sk_type != SOCK_RAW)
1205 continue;
1207 /* Don't send frame to the socket it came from */
1208 if (skb->sk == sk)
1209 continue;
1211 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1212 continue;
1214 if (sock_queue_rcv_skb(sk, nskb))
1215 kfree_skb(nskb);
1217 read_unlock(&l->lock);
1220 /* ---- L2CAP signalling commands ---- */
1221 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1222 u8 code, u8 ident, u16 dlen, void *data)
1224 struct sk_buff *skb, **frag;
1225 struct l2cap_cmd_hdr *cmd;
1226 struct l2cap_hdr *lh;
1227 int len, count;
1229 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1231 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1232 count = min_t(unsigned int, conn->mtu, len);
1234 skb = bt_skb_alloc(count, GFP_ATOMIC);
1235 if (!skb)
1236 return NULL;
1238 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1239 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1240 lh->cid = cpu_to_le16(0x0001);
1242 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1243 cmd->code = code;
1244 cmd->ident = ident;
1245 cmd->len = cpu_to_le16(dlen);
1247 if (dlen) {
1248 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1249 memcpy(skb_put(skb, count), data, count);
1250 data += count;
1253 len -= skb->len;
1255 /* Continuation fragments (no L2CAP header) */
1256 frag = &skb_shinfo(skb)->frag_list;
1257 while (len) {
1258 count = min_t(unsigned int, conn->mtu, len);
1260 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1261 if (!*frag)
1262 goto fail;
1264 memcpy(skb_put(*frag, count), data, count);
1266 len -= count;
1267 data += count;
1269 frag = &(*frag)->next;
1272 return skb;
1274 fail:
1275 kfree_skb(skb);
1276 return NULL;
1279 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1281 struct l2cap_conf_opt *opt = *ptr;
1282 int len;
1284 len = L2CAP_CONF_OPT_SIZE + opt->len;
1285 *ptr += len;
1287 *type = opt->type;
1288 *olen = opt->len;
1290 switch (opt->len) {
1291 case 1:
1292 *val = *((u8 *) opt->val);
1293 break;
1295 case 2:
1296 *val = __le16_to_cpu(*((__le16 *) opt->val));
1297 break;
1299 case 4:
1300 *val = __le32_to_cpu(*((__le32 *) opt->val));
1301 break;
1303 default:
1304 *val = (unsigned long) opt->val;
1305 break;
1308 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1309 return len;
1312 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1314 struct l2cap_conf_opt *opt = *ptr;
1316 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1318 opt->type = type;
1319 opt->len = len;
1321 switch (len) {
1322 case 1:
1323 *((u8 *) opt->val) = val;
1324 break;
1326 case 2:
1327 *((__le16 *) opt->val) = cpu_to_le16(val);
1328 break;
1330 case 4:
1331 *((__le32 *) opt->val) = cpu_to_le32(val);
1332 break;
1334 default:
1335 memcpy(opt->val, (void *) val, len);
1336 break;
1339 *ptr += L2CAP_CONF_OPT_SIZE + len;
1342 static int l2cap_build_conf_req(struct sock *sk, void *data)
1344 struct l2cap_pinfo *pi = l2cap_pi(sk);
1345 struct l2cap_conf_req *req = data;
1346 void *ptr = req->data;
1348 BT_DBG("sk %p", sk);
1350 if (pi->imtu != L2CAP_DEFAULT_MTU)
1351 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1353 /* FIXME: Need actual value of the flush timeout */
1354 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1355 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1357 req->dcid = cpu_to_le16(pi->dcid);
1358 req->flags = cpu_to_le16(0);
1360 return ptr - data;
1363 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1365 struct l2cap_pinfo *pi = l2cap_pi(sk);
1366 struct l2cap_conf_rsp *rsp = data;
1367 void *ptr = rsp->data;
1368 void *req = pi->conf_req;
1369 int len = pi->conf_len;
1370 int type, hint, olen;
1371 unsigned long val;
1372 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1373 u16 mtu = L2CAP_DEFAULT_MTU;
1374 u16 result = L2CAP_CONF_SUCCESS;
1376 BT_DBG("sk %p", sk);
1378 while (len >= L2CAP_CONF_OPT_SIZE) {
1379 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1381 hint = type & 0x80;
1382 type &= 0x7f;
1384 switch (type) {
1385 case L2CAP_CONF_MTU:
1386 mtu = val;
1387 break;
1389 case L2CAP_CONF_FLUSH_TO:
1390 pi->flush_to = val;
1391 break;
1393 case L2CAP_CONF_QOS:
1394 break;
1396 case L2CAP_CONF_RFC:
1397 if (olen == sizeof(rfc))
1398 memcpy(&rfc, (void *) val, olen);
1399 break;
1401 default:
1402 if (hint)
1403 break;
1405 result = L2CAP_CONF_UNKNOWN;
1406 *((u8 *) ptr++) = type;
1407 break;
1411 if (result == L2CAP_CONF_SUCCESS) {
1412 /* Configure output options and let the other side know
1413 * which ones we don't like. */
1415 if (rfc.mode == L2CAP_MODE_BASIC) {
1416 if (mtu < pi->omtu)
1417 result = L2CAP_CONF_UNACCEPT;
1418 else {
1419 pi->omtu = mtu;
1420 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1423 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1424 } else {
1425 result = L2CAP_CONF_UNACCEPT;
1427 memset(&rfc, 0, sizeof(rfc));
1428 rfc.mode = L2CAP_MODE_BASIC;
1430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1431 sizeof(rfc), (unsigned long) &rfc);
1435 rsp->scid = cpu_to_le16(pi->dcid);
1436 rsp->result = cpu_to_le16(result);
1437 rsp->flags = cpu_to_le16(0x0000);
1439 return ptr - data;
1442 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1444 struct l2cap_conf_rsp *rsp = data;
1445 void *ptr = rsp->data;
1447 BT_DBG("sk %p", sk);
1449 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1450 rsp->result = cpu_to_le16(result);
1451 rsp->flags = cpu_to_le16(flags);
1453 return ptr - data;
1456 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1458 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1460 if (rej->reason != 0x0000)
1461 return 0;
1463 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1464 cmd->ident == conn->info_ident) {
1465 conn->info_ident = 0;
1466 del_timer(&conn->info_timer);
1467 l2cap_conn_start(conn);
1470 return 0;
1473 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1475 struct l2cap_chan_list *list = &conn->chan_list;
1476 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1477 struct l2cap_conn_rsp rsp;
1478 struct sock *sk, *parent;
1479 int result = 0, status = 0;
1481 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1482 __le16 psm = req->psm;
1484 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1486 /* Check if we have socket listening on psm */
1487 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1488 if (!parent) {
1489 result = L2CAP_CR_BAD_PSM;
1490 goto sendresp;
1493 result = L2CAP_CR_NO_MEM;
1495 /* Check for backlog size */
1496 if (sk_acceptq_is_full(parent)) {
1497 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1498 goto response;
1501 sk = l2cap_sock_alloc(parent->sk_net, NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1502 if (!sk)
1503 goto response;
1505 write_lock_bh(&list->lock);
1507 /* Check if we already have channel with that dcid */
1508 if (__l2cap_get_chan_by_dcid(list, scid)) {
1509 write_unlock_bh(&list->lock);
1510 sock_set_flag(sk, SOCK_ZAPPED);
1511 l2cap_sock_kill(sk);
1512 goto response;
1515 hci_conn_hold(conn->hcon);
1517 l2cap_sock_init(sk, parent);
1518 bacpy(&bt_sk(sk)->src, conn->src);
1519 bacpy(&bt_sk(sk)->dst, conn->dst);
1520 l2cap_pi(sk)->psm = psm;
1521 l2cap_pi(sk)->dcid = scid;
1523 __l2cap_chan_add(conn, sk, parent);
1524 dcid = l2cap_pi(sk)->scid;
1526 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1528 /* Service level security */
1529 result = L2CAP_CR_PEND;
1530 status = L2CAP_CS_AUTHEN_PEND;
1531 sk->sk_state = BT_CONNECT2;
1532 l2cap_pi(sk)->ident = cmd->ident;
1534 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1535 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1536 if (!hci_conn_encrypt(conn->hcon))
1537 goto done;
1538 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1539 if (!hci_conn_auth(conn->hcon))
1540 goto done;
1543 sk->sk_state = BT_CONFIG;
1544 result = status = 0;
1546 done:
1547 write_unlock_bh(&list->lock);
1549 response:
1550 bh_unlock_sock(parent);
1552 sendresp:
1553 rsp.scid = cpu_to_le16(scid);
1554 rsp.dcid = cpu_to_le16(dcid);
1555 rsp.result = cpu_to_le16(result);
1556 rsp.status = cpu_to_le16(status);
1557 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1558 return 0;
1561 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1563 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1564 u16 scid, dcid, result, status;
1565 struct sock *sk;
1566 u8 req[128];
1568 scid = __le16_to_cpu(rsp->scid);
1569 dcid = __le16_to_cpu(rsp->dcid);
1570 result = __le16_to_cpu(rsp->result);
1571 status = __le16_to_cpu(rsp->status);
1573 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1575 if (scid) {
1576 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1577 return 0;
1578 } else {
1579 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1580 return 0;
1583 switch (result) {
1584 case L2CAP_CR_SUCCESS:
1585 sk->sk_state = BT_CONFIG;
1586 l2cap_pi(sk)->ident = 0;
1587 l2cap_pi(sk)->dcid = dcid;
1588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1590 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1591 l2cap_build_conf_req(sk, req), req);
1592 break;
1594 case L2CAP_CR_PEND:
1595 break;
1597 default:
1598 l2cap_chan_del(sk, ECONNREFUSED);
1599 break;
1602 bh_unlock_sock(sk);
1603 return 0;
1606 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1608 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1609 u16 dcid, flags;
1610 u8 rsp[64];
1611 struct sock *sk;
1612 int len;
1614 dcid = __le16_to_cpu(req->dcid);
1615 flags = __le16_to_cpu(req->flags);
1617 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1619 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1620 return -ENOENT;
1622 if (sk->sk_state == BT_DISCONN)
1623 goto unlock;
1625 /* Reject if config buffer is too small. */
1626 len = cmd_len - sizeof(*req);
1627 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1628 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1629 l2cap_build_conf_rsp(sk, rsp,
1630 L2CAP_CONF_REJECT, flags), rsp);
1631 goto unlock;
1634 /* Store config. */
1635 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1636 l2cap_pi(sk)->conf_len += len;
1638 if (flags & 0x0001) {
1639 /* Incomplete config. Send empty response. */
1640 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1641 l2cap_build_conf_rsp(sk, rsp,
1642 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1643 goto unlock;
1646 /* Complete config. */
1647 len = l2cap_parse_conf_req(sk, rsp);
1648 if (len < 0)
1649 goto unlock;
1651 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1653 /* Reset config buffer. */
1654 l2cap_pi(sk)->conf_len = 0;
1656 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1657 goto unlock;
1659 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1660 sk->sk_state = BT_CONNECTED;
1661 l2cap_chan_ready(sk);
1662 goto unlock;
1665 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1666 u8 req[64];
1667 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1668 l2cap_build_conf_req(sk, req), req);
1671 unlock:
1672 bh_unlock_sock(sk);
1673 return 0;
1676 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1678 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1679 u16 scid, flags, result;
1680 struct sock *sk;
1682 scid = __le16_to_cpu(rsp->scid);
1683 flags = __le16_to_cpu(rsp->flags);
1684 result = __le16_to_cpu(rsp->result);
1686 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1688 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1689 return 0;
1691 switch (result) {
1692 case L2CAP_CONF_SUCCESS:
1693 break;
1695 case L2CAP_CONF_UNACCEPT:
1696 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1697 char req[128];
1698 /* It does not make sense to adjust L2CAP parameters
1699 * that are currently defined in the spec. We simply
1700 * resend config request that we sent earlier. It is
1701 * stupid, but it helps qualification testing which
1702 * expects at least some response from us. */
1703 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1704 l2cap_build_conf_req(sk, req), req);
1705 goto done;
1708 default:
1709 sk->sk_state = BT_DISCONN;
1710 sk->sk_err = ECONNRESET;
1711 l2cap_sock_set_timer(sk, HZ * 5);
1713 struct l2cap_disconn_req req;
1714 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1715 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1716 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1717 L2CAP_DISCONN_REQ, sizeof(req), &req);
1719 goto done;
1722 if (flags & 0x01)
1723 goto done;
1725 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1727 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1728 sk->sk_state = BT_CONNECTED;
1729 l2cap_chan_ready(sk);
1732 done:
1733 bh_unlock_sock(sk);
1734 return 0;
1737 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1739 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1740 struct l2cap_disconn_rsp rsp;
1741 u16 dcid, scid;
1742 struct sock *sk;
1744 scid = __le16_to_cpu(req->scid);
1745 dcid = __le16_to_cpu(req->dcid);
1747 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1749 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1750 return 0;
1752 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1753 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1754 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1756 sk->sk_shutdown = SHUTDOWN_MASK;
1758 l2cap_chan_del(sk, ECONNRESET);
1759 bh_unlock_sock(sk);
1761 l2cap_sock_kill(sk);
1762 return 0;
1765 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1767 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1768 u16 dcid, scid;
1769 struct sock *sk;
1771 scid = __le16_to_cpu(rsp->scid);
1772 dcid = __le16_to_cpu(rsp->dcid);
1774 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1776 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1777 return 0;
1779 l2cap_chan_del(sk, 0);
1780 bh_unlock_sock(sk);
1782 l2cap_sock_kill(sk);
1783 return 0;
1786 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1788 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1789 u16 type;
1791 type = __le16_to_cpu(req->type);
1793 BT_DBG("type 0x%4.4x", type);
1795 if (type == L2CAP_IT_FEAT_MASK) {
1796 u8 buf[8];
1797 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1798 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1799 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1800 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1801 l2cap_send_cmd(conn, cmd->ident,
1802 L2CAP_INFO_RSP, sizeof(buf), buf);
1803 } else {
1804 struct l2cap_info_rsp rsp;
1805 rsp.type = cpu_to_le16(type);
1806 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1807 l2cap_send_cmd(conn, cmd->ident,
1808 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1811 return 0;
1814 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1816 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1817 u16 type, result;
1819 type = __le16_to_cpu(rsp->type);
1820 result = __le16_to_cpu(rsp->result);
1822 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1824 conn->info_ident = 0;
1826 del_timer(&conn->info_timer);
1828 if (type == L2CAP_IT_FEAT_MASK)
1829 conn->feat_mask = __le32_to_cpu(get_unaligned((__le32 *) rsp->data));
1831 l2cap_conn_start(conn);
1833 return 0;
1836 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1838 u8 *data = skb->data;
1839 int len = skb->len;
1840 struct l2cap_cmd_hdr cmd;
1841 int err = 0;
1843 l2cap_raw_recv(conn, skb);
1845 while (len >= L2CAP_CMD_HDR_SIZE) {
1846 u16 cmd_len;
1847 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1848 data += L2CAP_CMD_HDR_SIZE;
1849 len -= L2CAP_CMD_HDR_SIZE;
1851 cmd_len = le16_to_cpu(cmd.len);
1853 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1855 if (cmd_len > len || !cmd.ident) {
1856 BT_DBG("corrupted command");
1857 break;
1860 switch (cmd.code) {
1861 case L2CAP_COMMAND_REJ:
1862 l2cap_command_rej(conn, &cmd, data);
1863 break;
1865 case L2CAP_CONN_REQ:
1866 err = l2cap_connect_req(conn, &cmd, data);
1867 break;
1869 case L2CAP_CONN_RSP:
1870 err = l2cap_connect_rsp(conn, &cmd, data);
1871 break;
1873 case L2CAP_CONF_REQ:
1874 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1875 break;
1877 case L2CAP_CONF_RSP:
1878 err = l2cap_config_rsp(conn, &cmd, data);
1879 break;
1881 case L2CAP_DISCONN_REQ:
1882 err = l2cap_disconnect_req(conn, &cmd, data);
1883 break;
1885 case L2CAP_DISCONN_RSP:
1886 err = l2cap_disconnect_rsp(conn, &cmd, data);
1887 break;
1889 case L2CAP_ECHO_REQ:
1890 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1891 break;
1893 case L2CAP_ECHO_RSP:
1894 break;
1896 case L2CAP_INFO_REQ:
1897 err = l2cap_information_req(conn, &cmd, data);
1898 break;
1900 case L2CAP_INFO_RSP:
1901 err = l2cap_information_rsp(conn, &cmd, data);
1902 break;
1904 default:
1905 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1906 err = -EINVAL;
1907 break;
1910 if (err) {
1911 struct l2cap_cmd_rej rej;
1912 BT_DBG("error %d", err);
1914 /* FIXME: Map err to a valid reason */
1915 rej.reason = cpu_to_le16(0);
1916 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1919 data += cmd_len;
1920 len -= cmd_len;
1923 kfree_skb(skb);
1926 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1928 struct sock *sk;
1930 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1931 if (!sk) {
1932 BT_DBG("unknown cid 0x%4.4x", cid);
1933 goto drop;
1936 BT_DBG("sk %p, len %d", sk, skb->len);
1938 if (sk->sk_state != BT_CONNECTED)
1939 goto drop;
1941 if (l2cap_pi(sk)->imtu < skb->len)
1942 goto drop;
1944 /* If socket recv buffers overflows we drop data here
1945 * which is *bad* because L2CAP has to be reliable.
1946 * But we don't have any other choice. L2CAP doesn't
1947 * provide flow control mechanism. */
1949 if (!sock_queue_rcv_skb(sk, skb))
1950 goto done;
1952 drop:
1953 kfree_skb(skb);
1955 done:
1956 if (sk)
1957 bh_unlock_sock(sk);
1959 return 0;
1962 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
1964 struct sock *sk;
1966 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1967 if (!sk)
1968 goto drop;
1970 BT_DBG("sk %p, len %d", sk, skb->len);
1972 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1973 goto drop;
1975 if (l2cap_pi(sk)->imtu < skb->len)
1976 goto drop;
1978 if (!sock_queue_rcv_skb(sk, skb))
1979 goto done;
1981 drop:
1982 kfree_skb(skb);
1984 done:
1985 if (sk) bh_unlock_sock(sk);
1986 return 0;
1989 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1991 struct l2cap_hdr *lh = (void *) skb->data;
1992 u16 cid, len;
1993 __le16 psm;
1995 skb_pull(skb, L2CAP_HDR_SIZE);
1996 cid = __le16_to_cpu(lh->cid);
1997 len = __le16_to_cpu(lh->len);
1999 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2001 switch (cid) {
2002 case 0x0001:
2003 l2cap_sig_channel(conn, skb);
2004 break;
2006 case 0x0002:
2007 psm = get_unaligned((__le16 *) skb->data);
2008 skb_pull(skb, 2);
2009 l2cap_conless_channel(conn, psm, skb);
2010 break;
2012 default:
2013 l2cap_data_channel(conn, cid, skb);
2014 break;
2018 /* ---- L2CAP interface with lower layer (HCI) ---- */
2020 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2022 int exact = 0, lm1 = 0, lm2 = 0;
2023 register struct sock *sk;
2024 struct hlist_node *node;
2026 if (type != ACL_LINK)
2027 return 0;
2029 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2031 /* Find listening sockets and check their link_mode */
2032 read_lock(&l2cap_sk_list.lock);
2033 sk_for_each(sk, node, &l2cap_sk_list.head) {
2034 if (sk->sk_state != BT_LISTEN)
2035 continue;
2037 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2038 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2039 exact++;
2040 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2041 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2043 read_unlock(&l2cap_sk_list.lock);
2045 return exact ? lm1 : lm2;
2048 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2050 struct l2cap_conn *conn;
2052 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2054 if (hcon->type != ACL_LINK)
2055 return 0;
2057 if (!status) {
2058 conn = l2cap_conn_add(hcon, status);
2059 if (conn)
2060 l2cap_conn_ready(conn);
2061 } else
2062 l2cap_conn_del(hcon, bt_err(status));
2064 return 0;
2067 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2069 BT_DBG("hcon %p reason %d", hcon, reason);
2071 if (hcon->type != ACL_LINK)
2072 return 0;
2074 l2cap_conn_del(hcon, bt_err(reason));
2076 return 0;
2079 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2081 struct l2cap_chan_list *l;
2082 struct l2cap_conn *conn = conn = hcon->l2cap_data;
2083 struct l2cap_conn_rsp rsp;
2084 struct sock *sk;
2085 int result;
2087 if (!conn)
2088 return 0;
2090 l = &conn->chan_list;
2092 BT_DBG("conn %p", conn);
2094 read_lock(&l->lock);
2096 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2097 bh_lock_sock(sk);
2099 if (sk->sk_state != BT_CONNECT2 ||
2100 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
2101 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
2102 bh_unlock_sock(sk);
2103 continue;
2106 if (!status) {
2107 sk->sk_state = BT_CONFIG;
2108 result = 0;
2109 } else {
2110 sk->sk_state = BT_DISCONN;
2111 l2cap_sock_set_timer(sk, HZ/10);
2112 result = L2CAP_CR_SEC_BLOCK;
2115 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2116 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2117 rsp.result = cpu_to_le16(result);
2118 rsp.status = cpu_to_le16(0);
2119 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2120 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2122 bh_unlock_sock(sk);
2125 read_unlock(&l->lock);
2126 return 0;
2129 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2131 struct l2cap_chan_list *l;
2132 struct l2cap_conn *conn = hcon->l2cap_data;
2133 struct l2cap_conn_rsp rsp;
2134 struct sock *sk;
2135 int result;
2137 if (!conn)
2138 return 0;
2140 l = &conn->chan_list;
2142 BT_DBG("conn %p", conn);
2144 read_lock(&l->lock);
2146 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2147 bh_lock_sock(sk);
2149 if (sk->sk_state != BT_CONNECT2) {
2150 bh_unlock_sock(sk);
2151 continue;
2154 if (!status) {
2155 sk->sk_state = BT_CONFIG;
2156 result = 0;
2157 } else {
2158 sk->sk_state = BT_DISCONN;
2159 l2cap_sock_set_timer(sk, HZ/10);
2160 result = L2CAP_CR_SEC_BLOCK;
2163 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2164 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2165 rsp.result = cpu_to_le16(result);
2166 rsp.status = cpu_to_le16(0);
2167 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2168 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2170 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2171 hci_conn_change_link_key(hcon);
2173 bh_unlock_sock(sk);
2176 read_unlock(&l->lock);
2177 return 0;
2180 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2182 struct l2cap_conn *conn = hcon->l2cap_data;
2184 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2185 goto drop;
2187 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2189 if (flags & ACL_START) {
2190 struct l2cap_hdr *hdr;
2191 int len;
2193 if (conn->rx_len) {
2194 BT_ERR("Unexpected start frame (len %d)", skb->len);
2195 kfree_skb(conn->rx_skb);
2196 conn->rx_skb = NULL;
2197 conn->rx_len = 0;
2198 l2cap_conn_unreliable(conn, ECOMM);
2201 if (skb->len < 2) {
2202 BT_ERR("Frame is too short (len %d)", skb->len);
2203 l2cap_conn_unreliable(conn, ECOMM);
2204 goto drop;
2207 hdr = (struct l2cap_hdr *) skb->data;
2208 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2210 if (len == skb->len) {
2211 /* Complete frame received */
2212 l2cap_recv_frame(conn, skb);
2213 return 0;
2216 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2218 if (skb->len > len) {
2219 BT_ERR("Frame is too long (len %d, expected len %d)",
2220 skb->len, len);
2221 l2cap_conn_unreliable(conn, ECOMM);
2222 goto drop;
2225 /* Allocate skb for the complete frame (with header) */
2226 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2227 goto drop;
2229 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2230 skb->len);
2231 conn->rx_len = len - skb->len;
2232 } else {
2233 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2235 if (!conn->rx_len) {
2236 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2237 l2cap_conn_unreliable(conn, ECOMM);
2238 goto drop;
2241 if (skb->len > conn->rx_len) {
2242 BT_ERR("Fragment is too long (len %d, expected %d)",
2243 skb->len, conn->rx_len);
2244 kfree_skb(conn->rx_skb);
2245 conn->rx_skb = NULL;
2246 conn->rx_len = 0;
2247 l2cap_conn_unreliable(conn, ECOMM);
2248 goto drop;
2251 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2252 skb->len);
2253 conn->rx_len -= skb->len;
2255 if (!conn->rx_len) {
2256 /* Complete frame received */
2257 l2cap_recv_frame(conn, conn->rx_skb);
2258 conn->rx_skb = NULL;
2262 drop:
2263 kfree_skb(skb);
2264 return 0;
2267 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2269 struct sock *sk;
2270 struct hlist_node *node;
2271 char *str = buf;
2273 read_lock_bh(&l2cap_sk_list.lock);
2275 sk_for_each(sk, node, &l2cap_sk_list.head) {
2276 struct l2cap_pinfo *pi = l2cap_pi(sk);
2278 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2279 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2280 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2281 pi->imtu, pi->omtu, pi->link_mode);
2284 read_unlock_bh(&l2cap_sk_list.lock);
2286 return (str - buf);
2289 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2291 static const struct proto_ops l2cap_sock_ops = {
2292 .family = PF_BLUETOOTH,
2293 .owner = THIS_MODULE,
2294 .release = l2cap_sock_release,
2295 .bind = l2cap_sock_bind,
2296 .connect = l2cap_sock_connect,
2297 .listen = l2cap_sock_listen,
2298 .accept = l2cap_sock_accept,
2299 .getname = l2cap_sock_getname,
2300 .sendmsg = l2cap_sock_sendmsg,
2301 .recvmsg = bt_sock_recvmsg,
2302 .poll = bt_sock_poll,
2303 .mmap = sock_no_mmap,
2304 .socketpair = sock_no_socketpair,
2305 .ioctl = sock_no_ioctl,
2306 .shutdown = l2cap_sock_shutdown,
2307 .setsockopt = l2cap_sock_setsockopt,
2308 .getsockopt = l2cap_sock_getsockopt
2311 static struct net_proto_family l2cap_sock_family_ops = {
2312 .family = PF_BLUETOOTH,
2313 .owner = THIS_MODULE,
2314 .create = l2cap_sock_create,
2317 static struct hci_proto l2cap_hci_proto = {
2318 .name = "L2CAP",
2319 .id = HCI_PROTO_L2CAP,
2320 .connect_ind = l2cap_connect_ind,
2321 .connect_cfm = l2cap_connect_cfm,
2322 .disconn_ind = l2cap_disconn_ind,
2323 .auth_cfm = l2cap_auth_cfm,
2324 .encrypt_cfm = l2cap_encrypt_cfm,
2325 .recv_acldata = l2cap_recv_acldata
2328 static int __init l2cap_init(void)
2330 int err;
2332 err = proto_register(&l2cap_proto, 0);
2333 if (err < 0)
2334 return err;
2336 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2337 if (err < 0) {
2338 BT_ERR("L2CAP socket registration failed");
2339 goto error;
2342 err = hci_register_proto(&l2cap_hci_proto);
2343 if (err < 0) {
2344 BT_ERR("L2CAP protocol registration failed");
2345 bt_sock_unregister(BTPROTO_L2CAP);
2346 goto error;
2349 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2350 BT_ERR("Failed to create L2CAP info file");
2352 BT_INFO("L2CAP ver %s", VERSION);
2353 BT_INFO("L2CAP socket layer initialized");
2355 return 0;
2357 error:
2358 proto_unregister(&l2cap_proto);
2359 return err;
2362 static void __exit l2cap_exit(void)
2364 class_remove_file(bt_class, &class_attr_l2cap);
2366 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2367 BT_ERR("L2CAP socket unregistration failed");
2369 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2370 BT_ERR("L2CAP protocol unregistration failed");
2372 proto_unregister(&l2cap_proto);
2375 void l2cap_load(void)
2377 /* Dummy function to trigger automatic L2CAP module loading by
2378 * other modules that use L2CAP sockets but don't use any other
2379 * symbols from it. */
2380 return;
2382 EXPORT_SYMBOL(l2cap_load);
2384 module_init(l2cap_init);
2385 module_exit(l2cap_exit);
2387 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2388 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2389 MODULE_VERSION(VERSION);
2390 MODULE_LICENSE("GPL");
2391 MODULE_ALIAS("bt-proto-0");