x86: move x86_cpu_to_apicid_init to smpboot.c
[linux-2.6/sactl.git] / net / bluetooth / l2cap.c
blob2957df4b6c0b01fd1db0d9e1b45081d41f786739
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
58 #define VERSION "2.9"
60 static u32 l2cap_feat_mask = 0x0000;
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
82 bh_lock_sock(sk);
83 __l2cap_sock_close(sk, ETIMEDOUT);
84 bh_unlock_sock(sk);
86 l2cap_sock_kill(sk);
87 sock_put(sk);
90 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
92 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
93 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
96 static void l2cap_sock_clear_timer(struct sock *sk)
98 BT_DBG("sock %p state %d", sk, sk->sk_state);
99 sk_stop_timer(sk, &sk->sk_timer);
102 /* ---- L2CAP channels ---- */
103 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
105 struct sock *s;
106 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
107 if (l2cap_pi(s)->dcid == cid)
108 break;
110 return s;
113 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->scid == cid)
118 break;
120 return s;
123 /* Find channel with given SCID.
124 * Returns locked socket */
125 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
127 struct sock *s;
128 read_lock(&l->lock);
129 s = __l2cap_get_chan_by_scid(l, cid);
130 if (s) bh_lock_sock(s);
131 read_unlock(&l->lock);
132 return s;
135 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
137 struct sock *s;
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->ident == ident)
140 break;
142 return s;
145 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
147 struct sock *s;
148 read_lock(&l->lock);
149 s = __l2cap_get_chan_by_ident(l, ident);
150 if (s) bh_lock_sock(s);
151 read_unlock(&l->lock);
152 return s;
155 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
157 u16 cid = 0x0040;
159 for (; cid < 0xffff; cid++) {
160 if(!__l2cap_get_chan_by_scid(l, cid))
161 return cid;
164 return 0;
167 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
169 sock_hold(sk);
171 if (l->head)
172 l2cap_pi(l->head)->prev_c = sk;
174 l2cap_pi(sk)->next_c = l->head;
175 l2cap_pi(sk)->prev_c = NULL;
176 l->head = sk;
179 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
181 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
183 write_lock_bh(&l->lock);
184 if (sk == l->head)
185 l->head = next;
187 if (next)
188 l2cap_pi(next)->prev_c = prev;
189 if (prev)
190 l2cap_pi(prev)->next_c = next;
191 write_unlock_bh(&l->lock);
193 __sock_put(sk);
196 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
198 struct l2cap_chan_list *l = &conn->chan_list;
200 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
202 l2cap_pi(sk)->conn = conn;
204 if (sk->sk_type == SOCK_SEQPACKET) {
205 /* Alloc CID for connection-oriented socket */
206 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
207 } else if (sk->sk_type == SOCK_DGRAM) {
208 /* Connectionless socket */
209 l2cap_pi(sk)->scid = 0x0002;
210 l2cap_pi(sk)->dcid = 0x0002;
211 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
212 } else {
213 /* Raw socket can send/recv signalling messages only */
214 l2cap_pi(sk)->scid = 0x0001;
215 l2cap_pi(sk)->dcid = 0x0001;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
219 __l2cap_chan_link(l, sk);
221 if (parent)
222 bt_accept_enqueue(parent, sk);
225 /* Delete channel.
226 * Must be called on the locked socket. */
227 static void l2cap_chan_del(struct sock *sk, int err)
229 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
230 struct sock *parent = bt_sk(sk)->parent;
232 l2cap_sock_clear_timer(sk);
234 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
236 if (conn) {
237 /* Unlink from channel list */
238 l2cap_chan_unlink(&conn->chan_list, sk);
239 l2cap_pi(sk)->conn = NULL;
240 hci_conn_put(conn->hcon);
243 sk->sk_state = BT_CLOSED;
244 sock_set_flag(sk, SOCK_ZAPPED);
246 if (err)
247 sk->sk_err = err;
249 if (parent) {
250 bt_accept_unlink(sk);
251 parent->sk_data_ready(parent, 0);
252 } else
253 sk->sk_state_change(sk);
256 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
258 u8 id;
260 /* Get next available identificator.
261 * 1 - 128 are used by kernel.
262 * 129 - 199 are reserved.
263 * 200 - 254 are used by utilities like l2ping, etc.
266 spin_lock_bh(&conn->lock);
268 if (++conn->tx_ident > 128)
269 conn->tx_ident = 1;
271 id = conn->tx_ident;
273 spin_unlock_bh(&conn->lock);
275 return id;
278 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
280 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
282 BT_DBG("code 0x%2.2x", code);
284 if (!skb)
285 return -ENOMEM;
287 return hci_send_acl(conn->hcon, skb, 0);
290 /* ---- L2CAP connections ---- */
291 static void l2cap_conn_start(struct l2cap_conn *conn)
293 struct l2cap_chan_list *l = &conn->chan_list;
294 struct sock *sk;
296 BT_DBG("conn %p", conn);
298 read_lock(&l->lock);
300 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
301 bh_lock_sock(sk);
303 if (sk->sk_type != SOCK_SEQPACKET) {
304 l2cap_sock_clear_timer(sk);
305 sk->sk_state = BT_CONNECTED;
306 sk->sk_state_change(sk);
307 } else if (sk->sk_state == BT_CONNECT) {
308 struct l2cap_conn_req req;
309 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
310 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
311 req.psm = l2cap_pi(sk)->psm;
312 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
313 L2CAP_CONN_REQ, sizeof(req), &req);
316 bh_unlock_sock(sk);
319 read_unlock(&l->lock);
322 static void l2cap_conn_ready(struct l2cap_conn *conn)
324 BT_DBG("conn %p", conn);
326 if (conn->chan_list.head || !hlist_empty(&l2cap_sk_list.head)) {
327 struct l2cap_info_req req;
329 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
331 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
332 conn->info_ident = l2cap_get_ident(conn);
334 mod_timer(&conn->info_timer,
335 jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
337 l2cap_send_cmd(conn, conn->info_ident,
338 L2CAP_INFO_REQ, sizeof(req), &req);
342 /* Notify sockets that we cannot guaranty reliability anymore */
343 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
345 struct l2cap_chan_list *l = &conn->chan_list;
346 struct sock *sk;
348 BT_DBG("conn %p", conn);
350 read_lock(&l->lock);
352 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
353 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
354 sk->sk_err = err;
357 read_unlock(&l->lock);
360 static void l2cap_info_timeout(unsigned long arg)
362 struct l2cap_conn *conn = (void *) arg;
364 conn->info_ident = 0;
366 l2cap_conn_start(conn);
369 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
371 struct l2cap_conn *conn = hcon->l2cap_data;
373 if (conn || status)
374 return conn;
376 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
377 if (!conn)
378 return NULL;
380 hcon->l2cap_data = conn;
381 conn->hcon = hcon;
383 BT_DBG("hcon %p conn %p", hcon, conn);
385 conn->mtu = hcon->hdev->acl_mtu;
386 conn->src = &hcon->hdev->bdaddr;
387 conn->dst = &hcon->dst;
389 conn->feat_mask = 0;
391 setup_timer(&conn->info_timer, l2cap_info_timeout, (unsigned long)conn);
393 spin_lock_init(&conn->lock);
394 rwlock_init(&conn->chan_list.lock);
396 return conn;
399 static void l2cap_conn_del(struct hci_conn *hcon, int err)
401 struct l2cap_conn *conn = hcon->l2cap_data;
402 struct sock *sk;
404 if (!conn)
405 return;
407 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
409 if (conn->rx_skb)
410 kfree_skb(conn->rx_skb);
412 /* Kill channels */
413 while ((sk = conn->chan_list.head)) {
414 bh_lock_sock(sk);
415 l2cap_chan_del(sk, err);
416 bh_unlock_sock(sk);
417 l2cap_sock_kill(sk);
420 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
421 del_timer_sync(&conn->info_timer);
423 hcon->l2cap_data = NULL;
424 kfree(conn);
427 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
429 struct l2cap_chan_list *l = &conn->chan_list;
430 write_lock_bh(&l->lock);
431 __l2cap_chan_add(conn, sk, parent);
432 write_unlock_bh(&l->lock);
435 /* ---- Socket interface ---- */
436 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
438 struct sock *sk;
439 struct hlist_node *node;
440 sk_for_each(sk, node, &l2cap_sk_list.head)
441 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
442 goto found;
443 sk = NULL;
444 found:
445 return sk;
448 /* Find socket with psm and source bdaddr.
449 * Returns closest match.
451 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
453 struct sock *sk = NULL, *sk1 = NULL;
454 struct hlist_node *node;
456 sk_for_each(sk, node, &l2cap_sk_list.head) {
457 if (state && sk->sk_state != state)
458 continue;
460 if (l2cap_pi(sk)->psm == psm) {
461 /* Exact match. */
462 if (!bacmp(&bt_sk(sk)->src, src))
463 break;
465 /* Closest match */
466 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
467 sk1 = sk;
470 return node ? sk : sk1;
473 /* Find socket with given address (psm, src).
474 * Returns locked socket */
475 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
477 struct sock *s;
478 read_lock(&l2cap_sk_list.lock);
479 s = __l2cap_get_sock_by_psm(state, psm, src);
480 if (s) bh_lock_sock(s);
481 read_unlock(&l2cap_sk_list.lock);
482 return s;
485 static void l2cap_sock_destruct(struct sock *sk)
487 BT_DBG("sk %p", sk);
489 skb_queue_purge(&sk->sk_receive_queue);
490 skb_queue_purge(&sk->sk_write_queue);
493 static void l2cap_sock_cleanup_listen(struct sock *parent)
495 struct sock *sk;
497 BT_DBG("parent %p", parent);
499 /* Close not yet accepted channels */
500 while ((sk = bt_accept_dequeue(parent, NULL)))
501 l2cap_sock_close(sk);
503 parent->sk_state = BT_CLOSED;
504 sock_set_flag(parent, SOCK_ZAPPED);
507 /* Kill socket (only if zapped and orphan)
508 * Must be called on unlocked socket.
510 static void l2cap_sock_kill(struct sock *sk)
512 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
513 return;
515 BT_DBG("sk %p state %d", sk, sk->sk_state);
517 /* Kill poor orphan */
518 bt_sock_unlink(&l2cap_sk_list, sk);
519 sock_set_flag(sk, SOCK_DEAD);
520 sock_put(sk);
523 static void __l2cap_sock_close(struct sock *sk, int reason)
525 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
527 switch (sk->sk_state) {
528 case BT_LISTEN:
529 l2cap_sock_cleanup_listen(sk);
530 break;
532 case BT_CONNECTED:
533 case BT_CONFIG:
534 case BT_CONNECT2:
535 if (sk->sk_type == SOCK_SEQPACKET) {
536 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
537 struct l2cap_disconn_req req;
539 sk->sk_state = BT_DISCONN;
540 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
542 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
543 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
544 l2cap_send_cmd(conn, l2cap_get_ident(conn),
545 L2CAP_DISCONN_REQ, sizeof(req), &req);
546 } else {
547 l2cap_chan_del(sk, reason);
549 break;
551 case BT_CONNECT:
552 case BT_DISCONN:
553 l2cap_chan_del(sk, reason);
554 break;
556 default:
557 sock_set_flag(sk, SOCK_ZAPPED);
558 break;
562 /* Must be called on unlocked socket. */
563 static void l2cap_sock_close(struct sock *sk)
565 l2cap_sock_clear_timer(sk);
566 lock_sock(sk);
567 __l2cap_sock_close(sk, ECONNRESET);
568 release_sock(sk);
569 l2cap_sock_kill(sk);
572 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
574 struct l2cap_pinfo *pi = l2cap_pi(sk);
576 BT_DBG("sk %p", sk);
578 if (parent) {
579 sk->sk_type = parent->sk_type;
580 pi->imtu = l2cap_pi(parent)->imtu;
581 pi->omtu = l2cap_pi(parent)->omtu;
582 pi->link_mode = l2cap_pi(parent)->link_mode;
583 } else {
584 pi->imtu = L2CAP_DEFAULT_MTU;
585 pi->omtu = 0;
586 pi->link_mode = 0;
589 /* Default config options */
590 pi->conf_len = 0;
591 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
594 static struct proto l2cap_proto = {
595 .name = "L2CAP",
596 .owner = THIS_MODULE,
597 .obj_size = sizeof(struct l2cap_pinfo)
600 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
602 struct sock *sk;
604 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
605 if (!sk)
606 return NULL;
608 sock_init_data(sock, sk);
609 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
611 sk->sk_destruct = l2cap_sock_destruct;
612 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
614 sock_reset_flag(sk, SOCK_ZAPPED);
616 sk->sk_protocol = proto;
617 sk->sk_state = BT_OPEN;
619 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long)sk);
621 bt_sock_link(&l2cap_sk_list, sk);
622 return sk;
625 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
627 struct sock *sk;
629 BT_DBG("sock %p", sock);
631 sock->state = SS_UNCONNECTED;
633 if (sock->type != SOCK_SEQPACKET &&
634 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
635 return -ESOCKTNOSUPPORT;
637 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
638 return -EPERM;
640 sock->ops = &l2cap_sock_ops;
642 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
643 if (!sk)
644 return -ENOMEM;
646 l2cap_sock_init(sk, NULL);
647 return 0;
650 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
652 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
653 struct sock *sk = sock->sk;
654 int err = 0;
656 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
658 if (!addr || addr->sa_family != AF_BLUETOOTH)
659 return -EINVAL;
661 lock_sock(sk);
663 if (sk->sk_state != BT_OPEN) {
664 err = -EBADFD;
665 goto done;
668 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
669 !capable(CAP_NET_BIND_SERVICE)) {
670 err = -EACCES;
671 goto done;
674 write_lock_bh(&l2cap_sk_list.lock);
676 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
677 err = -EADDRINUSE;
678 } else {
679 /* Save source address */
680 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
681 l2cap_pi(sk)->psm = la->l2_psm;
682 l2cap_pi(sk)->sport = la->l2_psm;
683 sk->sk_state = BT_BOUND;
686 write_unlock_bh(&l2cap_sk_list.lock);
688 done:
689 release_sock(sk);
690 return err;
693 static int l2cap_do_connect(struct sock *sk)
695 bdaddr_t *src = &bt_sk(sk)->src;
696 bdaddr_t *dst = &bt_sk(sk)->dst;
697 struct l2cap_conn *conn;
698 struct hci_conn *hcon;
699 struct hci_dev *hdev;
700 int err = 0;
702 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
704 if (!(hdev = hci_get_route(dst, src)))
705 return -EHOSTUNREACH;
707 hci_dev_lock_bh(hdev);
709 err = -ENOMEM;
711 hcon = hci_connect(hdev, ACL_LINK, dst);
712 if (!hcon)
713 goto done;
715 conn = l2cap_conn_add(hcon, 0);
716 if (!conn) {
717 hci_conn_put(hcon);
718 goto done;
721 err = 0;
723 /* Update source addr of the socket */
724 bacpy(src, conn->src);
726 l2cap_chan_add(conn, sk, NULL);
728 sk->sk_state = BT_CONNECT;
729 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
731 if (hcon->state == BT_CONNECTED) {
732 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
733 l2cap_conn_ready(conn);
734 goto done;
737 if (sk->sk_type == SOCK_SEQPACKET) {
738 struct l2cap_conn_req req;
739 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
740 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
741 req.psm = l2cap_pi(sk)->psm;
742 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
743 L2CAP_CONN_REQ, sizeof(req), &req);
744 } else {
745 l2cap_sock_clear_timer(sk);
746 sk->sk_state = BT_CONNECTED;
750 done:
751 hci_dev_unlock_bh(hdev);
752 hci_dev_put(hdev);
753 return err;
756 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
758 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
759 struct sock *sk = sock->sk;
760 int err = 0;
762 lock_sock(sk);
764 BT_DBG("sk %p", sk);
766 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
767 err = -EINVAL;
768 goto done;
771 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
772 err = -EINVAL;
773 goto done;
776 switch(sk->sk_state) {
777 case BT_CONNECT:
778 case BT_CONNECT2:
779 case BT_CONFIG:
780 /* Already connecting */
781 goto wait;
783 case BT_CONNECTED:
784 /* Already connected */
785 goto done;
787 case BT_OPEN:
788 case BT_BOUND:
789 /* Can connect */
790 break;
792 default:
793 err = -EBADFD;
794 goto done;
797 /* Set destination address and psm */
798 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
799 l2cap_pi(sk)->psm = la->l2_psm;
801 if ((err = l2cap_do_connect(sk)))
802 goto done;
804 wait:
805 err = bt_sock_wait_state(sk, BT_CONNECTED,
806 sock_sndtimeo(sk, flags & O_NONBLOCK));
807 done:
808 release_sock(sk);
809 return err;
812 static int l2cap_sock_listen(struct socket *sock, int backlog)
814 struct sock *sk = sock->sk;
815 int err = 0;
817 BT_DBG("sk %p backlog %d", sk, backlog);
819 lock_sock(sk);
821 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
822 err = -EBADFD;
823 goto done;
826 if (!l2cap_pi(sk)->psm) {
827 bdaddr_t *src = &bt_sk(sk)->src;
828 u16 psm;
830 err = -EINVAL;
832 write_lock_bh(&l2cap_sk_list.lock);
834 for (psm = 0x1001; psm < 0x1100; psm += 2)
835 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
836 l2cap_pi(sk)->psm = htobs(psm);
837 l2cap_pi(sk)->sport = htobs(psm);
838 err = 0;
839 break;
842 write_unlock_bh(&l2cap_sk_list.lock);
844 if (err < 0)
845 goto done;
848 sk->sk_max_ack_backlog = backlog;
849 sk->sk_ack_backlog = 0;
850 sk->sk_state = BT_LISTEN;
852 done:
853 release_sock(sk);
854 return err;
857 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
859 DECLARE_WAITQUEUE(wait, current);
860 struct sock *sk = sock->sk, *nsk;
861 long timeo;
862 int err = 0;
864 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
866 if (sk->sk_state != BT_LISTEN) {
867 err = -EBADFD;
868 goto done;
871 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
873 BT_DBG("sk %p timeo %ld", sk, timeo);
875 /* Wait for an incoming connection. (wake-one). */
876 add_wait_queue_exclusive(sk->sk_sleep, &wait);
877 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
878 set_current_state(TASK_INTERRUPTIBLE);
879 if (!timeo) {
880 err = -EAGAIN;
881 break;
884 release_sock(sk);
885 timeo = schedule_timeout(timeo);
886 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
888 if (sk->sk_state != BT_LISTEN) {
889 err = -EBADFD;
890 break;
893 if (signal_pending(current)) {
894 err = sock_intr_errno(timeo);
895 break;
898 set_current_state(TASK_RUNNING);
899 remove_wait_queue(sk->sk_sleep, &wait);
901 if (err)
902 goto done;
904 newsock->state = SS_CONNECTED;
906 BT_DBG("new socket %p", nsk);
908 done:
909 release_sock(sk);
910 return err;
913 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
915 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
916 struct sock *sk = sock->sk;
918 BT_DBG("sock %p, sk %p", sock, sk);
920 addr->sa_family = AF_BLUETOOTH;
921 *len = sizeof(struct sockaddr_l2);
923 if (peer)
924 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
925 else
926 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
928 la->l2_psm = l2cap_pi(sk)->psm;
929 return 0;
932 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
934 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
935 struct sk_buff *skb, **frag;
936 int err, hlen, count, sent=0;
937 struct l2cap_hdr *lh;
939 BT_DBG("sk %p len %d", sk, len);
941 /* First fragment (with L2CAP header) */
942 if (sk->sk_type == SOCK_DGRAM)
943 hlen = L2CAP_HDR_SIZE + 2;
944 else
945 hlen = L2CAP_HDR_SIZE;
947 count = min_t(unsigned int, (conn->mtu - hlen), len);
949 skb = bt_skb_send_alloc(sk, hlen + count,
950 msg->msg_flags & MSG_DONTWAIT, &err);
951 if (!skb)
952 return err;
954 /* Create L2CAP header */
955 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
956 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
957 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
959 if (sk->sk_type == SOCK_DGRAM)
960 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
962 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
963 err = -EFAULT;
964 goto fail;
967 sent += count;
968 len -= count;
970 /* Continuation fragments (no L2CAP header) */
971 frag = &skb_shinfo(skb)->frag_list;
972 while (len) {
973 count = min_t(unsigned int, conn->mtu, len);
975 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
976 if (!*frag)
977 goto fail;
979 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
980 err = -EFAULT;
981 goto fail;
984 sent += count;
985 len -= count;
987 frag = &(*frag)->next;
990 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
991 goto fail;
993 return sent;
995 fail:
996 kfree_skb(skb);
997 return err;
1000 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1002 struct sock *sk = sock->sk;
1003 int err = 0;
1005 BT_DBG("sock %p, sk %p", sock, sk);
1007 err = sock_error(sk);
1008 if (err)
1009 return err;
1011 if (msg->msg_flags & MSG_OOB)
1012 return -EOPNOTSUPP;
1014 /* Check outgoing MTU */
1015 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1016 return -EINVAL;
1018 lock_sock(sk);
1020 if (sk->sk_state == BT_CONNECTED)
1021 err = l2cap_do_send(sk, msg, len);
1022 else
1023 err = -ENOTCONN;
1025 release_sock(sk);
1026 return err;
1029 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1031 struct sock *sk = sock->sk;
1032 struct l2cap_options opts;
1033 int err = 0, len;
1034 u32 opt;
1036 BT_DBG("sk %p", sk);
1038 lock_sock(sk);
1040 switch (optname) {
1041 case L2CAP_OPTIONS:
1042 opts.imtu = l2cap_pi(sk)->imtu;
1043 opts.omtu = l2cap_pi(sk)->omtu;
1044 opts.flush_to = l2cap_pi(sk)->flush_to;
1045 opts.mode = L2CAP_MODE_BASIC;
1047 len = min_t(unsigned int, sizeof(opts), optlen);
1048 if (copy_from_user((char *) &opts, optval, len)) {
1049 err = -EFAULT;
1050 break;
1053 l2cap_pi(sk)->imtu = opts.imtu;
1054 l2cap_pi(sk)->omtu = opts.omtu;
1055 break;
1057 case L2CAP_LM:
1058 if (get_user(opt, (u32 __user *) optval)) {
1059 err = -EFAULT;
1060 break;
1063 l2cap_pi(sk)->link_mode = opt;
1064 break;
1066 default:
1067 err = -ENOPROTOOPT;
1068 break;
1071 release_sock(sk);
1072 return err;
1075 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1077 struct sock *sk = sock->sk;
1078 struct l2cap_options opts;
1079 struct l2cap_conninfo cinfo;
1080 int len, err = 0;
1082 BT_DBG("sk %p", sk);
1084 if (get_user(len, optlen))
1085 return -EFAULT;
1087 lock_sock(sk);
1089 switch (optname) {
1090 case L2CAP_OPTIONS:
1091 opts.imtu = l2cap_pi(sk)->imtu;
1092 opts.omtu = l2cap_pi(sk)->omtu;
1093 opts.flush_to = l2cap_pi(sk)->flush_to;
1094 opts.mode = L2CAP_MODE_BASIC;
1096 len = min_t(unsigned int, len, sizeof(opts));
1097 if (copy_to_user(optval, (char *) &opts, len))
1098 err = -EFAULT;
1100 break;
1102 case L2CAP_LM:
1103 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1104 err = -EFAULT;
1105 break;
1107 case L2CAP_CONNINFO:
1108 if (sk->sk_state != BT_CONNECTED) {
1109 err = -ENOTCONN;
1110 break;
1113 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1114 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1116 len = min_t(unsigned int, len, sizeof(cinfo));
1117 if (copy_to_user(optval, (char *) &cinfo, len))
1118 err = -EFAULT;
1120 break;
1122 default:
1123 err = -ENOPROTOOPT;
1124 break;
1127 release_sock(sk);
1128 return err;
1131 static int l2cap_sock_shutdown(struct socket *sock, int how)
1133 struct sock *sk = sock->sk;
1134 int err = 0;
1136 BT_DBG("sock %p, sk %p", sock, sk);
1138 if (!sk)
1139 return 0;
1141 lock_sock(sk);
1142 if (!sk->sk_shutdown) {
1143 sk->sk_shutdown = SHUTDOWN_MASK;
1144 l2cap_sock_clear_timer(sk);
1145 __l2cap_sock_close(sk, 0);
1147 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1148 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1150 release_sock(sk);
1151 return err;
1154 static int l2cap_sock_release(struct socket *sock)
1156 struct sock *sk = sock->sk;
1157 int err;
1159 BT_DBG("sock %p, sk %p", sock, sk);
1161 if (!sk)
1162 return 0;
1164 err = l2cap_sock_shutdown(sock, 2);
1166 sock_orphan(sk);
1167 l2cap_sock_kill(sk);
1168 return err;
1171 static void l2cap_chan_ready(struct sock *sk)
1173 struct sock *parent = bt_sk(sk)->parent;
1175 BT_DBG("sk %p, parent %p", sk, parent);
1177 l2cap_pi(sk)->conf_state = 0;
1178 l2cap_sock_clear_timer(sk);
1180 if (!parent) {
1181 /* Outgoing channel.
1182 * Wake up socket sleeping on connect.
1184 sk->sk_state = BT_CONNECTED;
1185 sk->sk_state_change(sk);
1186 } else {
1187 /* Incoming channel.
1188 * Wake up socket sleeping on accept.
1190 parent->sk_data_ready(parent, 0);
1194 /* Copy frame to all raw sockets on that connection */
1195 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1197 struct l2cap_chan_list *l = &conn->chan_list;
1198 struct sk_buff *nskb;
1199 struct sock * sk;
1201 BT_DBG("conn %p", conn);
1203 read_lock(&l->lock);
1204 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1205 if (sk->sk_type != SOCK_RAW)
1206 continue;
1208 /* Don't send frame to the socket it came from */
1209 if (skb->sk == sk)
1210 continue;
1212 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1213 continue;
1215 if (sock_queue_rcv_skb(sk, nskb))
1216 kfree_skb(nskb);
1218 read_unlock(&l->lock);
1221 /* ---- L2CAP signalling commands ---- */
1222 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1223 u8 code, u8 ident, u16 dlen, void *data)
1225 struct sk_buff *skb, **frag;
1226 struct l2cap_cmd_hdr *cmd;
1227 struct l2cap_hdr *lh;
1228 int len, count;
1230 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1232 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1233 count = min_t(unsigned int, conn->mtu, len);
1235 skb = bt_skb_alloc(count, GFP_ATOMIC);
1236 if (!skb)
1237 return NULL;
1239 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1240 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1241 lh->cid = cpu_to_le16(0x0001);
1243 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1244 cmd->code = code;
1245 cmd->ident = ident;
1246 cmd->len = cpu_to_le16(dlen);
1248 if (dlen) {
1249 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1250 memcpy(skb_put(skb, count), data, count);
1251 data += count;
1254 len -= skb->len;
1256 /* Continuation fragments (no L2CAP header) */
1257 frag = &skb_shinfo(skb)->frag_list;
1258 while (len) {
1259 count = min_t(unsigned int, conn->mtu, len);
1261 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1262 if (!*frag)
1263 goto fail;
1265 memcpy(skb_put(*frag, count), data, count);
1267 len -= count;
1268 data += count;
1270 frag = &(*frag)->next;
1273 return skb;
1275 fail:
1276 kfree_skb(skb);
1277 return NULL;
1280 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1282 struct l2cap_conf_opt *opt = *ptr;
1283 int len;
1285 len = L2CAP_CONF_OPT_SIZE + opt->len;
1286 *ptr += len;
1288 *type = opt->type;
1289 *olen = opt->len;
1291 switch (opt->len) {
1292 case 1:
1293 *val = *((u8 *) opt->val);
1294 break;
1296 case 2:
1297 *val = __le16_to_cpu(*((__le16 *) opt->val));
1298 break;
1300 case 4:
1301 *val = __le32_to_cpu(*((__le32 *) opt->val));
1302 break;
1304 default:
1305 *val = (unsigned long) opt->val;
1306 break;
1309 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1310 return len;
1313 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1315 struct l2cap_conf_opt *opt = *ptr;
1317 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1319 opt->type = type;
1320 opt->len = len;
1322 switch (len) {
1323 case 1:
1324 *((u8 *) opt->val) = val;
1325 break;
1327 case 2:
1328 *((__le16 *) opt->val) = cpu_to_le16(val);
1329 break;
1331 case 4:
1332 *((__le32 *) opt->val) = cpu_to_le32(val);
1333 break;
1335 default:
1336 memcpy(opt->val, (void *) val, len);
1337 break;
1340 *ptr += L2CAP_CONF_OPT_SIZE + len;
1343 static int l2cap_build_conf_req(struct sock *sk, void *data)
1345 struct l2cap_pinfo *pi = l2cap_pi(sk);
1346 struct l2cap_conf_req *req = data;
1347 void *ptr = req->data;
1349 BT_DBG("sk %p", sk);
1351 if (pi->imtu != L2CAP_DEFAULT_MTU)
1352 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1354 /* FIXME: Need actual value of the flush timeout */
1355 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1356 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1358 req->dcid = cpu_to_le16(pi->dcid);
1359 req->flags = cpu_to_le16(0);
1361 return ptr - data;
1364 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1366 struct l2cap_pinfo *pi = l2cap_pi(sk);
1367 struct l2cap_conf_rsp *rsp = data;
1368 void *ptr = rsp->data;
1369 void *req = pi->conf_req;
1370 int len = pi->conf_len;
1371 int type, hint, olen;
1372 unsigned long val;
1373 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1374 u16 mtu = L2CAP_DEFAULT_MTU;
1375 u16 result = L2CAP_CONF_SUCCESS;
1377 BT_DBG("sk %p", sk);
1379 while (len >= L2CAP_CONF_OPT_SIZE) {
1380 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1382 hint = type & 0x80;
1383 type &= 0x7f;
1385 switch (type) {
1386 case L2CAP_CONF_MTU:
1387 mtu = val;
1388 break;
1390 case L2CAP_CONF_FLUSH_TO:
1391 pi->flush_to = val;
1392 break;
1394 case L2CAP_CONF_QOS:
1395 break;
1397 case L2CAP_CONF_RFC:
1398 if (olen == sizeof(rfc))
1399 memcpy(&rfc, (void *) val, olen);
1400 break;
1402 default:
1403 if (hint)
1404 break;
1406 result = L2CAP_CONF_UNKNOWN;
1407 *((u8 *) ptr++) = type;
1408 break;
1412 if (result == L2CAP_CONF_SUCCESS) {
1413 /* Configure output options and let the other side know
1414 * which ones we don't like. */
1416 if (rfc.mode == L2CAP_MODE_BASIC) {
1417 if (mtu < pi->omtu)
1418 result = L2CAP_CONF_UNACCEPT;
1419 else {
1420 pi->omtu = mtu;
1421 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1425 } else {
1426 result = L2CAP_CONF_UNACCEPT;
1428 memset(&rfc, 0, sizeof(rfc));
1429 rfc.mode = L2CAP_MODE_BASIC;
1431 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1432 sizeof(rfc), (unsigned long) &rfc);
1436 rsp->scid = cpu_to_le16(pi->dcid);
1437 rsp->result = cpu_to_le16(result);
1438 rsp->flags = cpu_to_le16(0x0000);
1440 return ptr - data;
1443 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1445 struct l2cap_conf_rsp *rsp = data;
1446 void *ptr = rsp->data;
1448 BT_DBG("sk %p", sk);
1450 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1451 rsp->result = cpu_to_le16(result);
1452 rsp->flags = cpu_to_le16(flags);
1454 return ptr - data;
1457 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1459 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1461 if (rej->reason != 0x0000)
1462 return 0;
1464 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1465 cmd->ident == conn->info_ident) {
1466 conn->info_ident = 0;
1467 del_timer(&conn->info_timer);
1468 l2cap_conn_start(conn);
1471 return 0;
1474 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1476 struct l2cap_chan_list *list = &conn->chan_list;
1477 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1478 struct l2cap_conn_rsp rsp;
1479 struct sock *sk, *parent;
1480 int result = 0, status = 0;
1482 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1483 __le16 psm = req->psm;
1485 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1487 /* Check if we have socket listening on psm */
1488 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1489 if (!parent) {
1490 result = L2CAP_CR_BAD_PSM;
1491 goto sendresp;
1494 result = L2CAP_CR_NO_MEM;
1496 /* Check for backlog size */
1497 if (sk_acceptq_is_full(parent)) {
1498 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1499 goto response;
1502 sk = l2cap_sock_alloc(parent->sk_net, NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1503 if (!sk)
1504 goto response;
1506 write_lock_bh(&list->lock);
1508 /* Check if we already have channel with that dcid */
1509 if (__l2cap_get_chan_by_dcid(list, scid)) {
1510 write_unlock_bh(&list->lock);
1511 sock_set_flag(sk, SOCK_ZAPPED);
1512 l2cap_sock_kill(sk);
1513 goto response;
1516 hci_conn_hold(conn->hcon);
1518 l2cap_sock_init(sk, parent);
1519 bacpy(&bt_sk(sk)->src, conn->src);
1520 bacpy(&bt_sk(sk)->dst, conn->dst);
1521 l2cap_pi(sk)->psm = psm;
1522 l2cap_pi(sk)->dcid = scid;
1524 __l2cap_chan_add(conn, sk, parent);
1525 dcid = l2cap_pi(sk)->scid;
1527 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1529 /* Service level security */
1530 result = L2CAP_CR_PEND;
1531 status = L2CAP_CS_AUTHEN_PEND;
1532 sk->sk_state = BT_CONNECT2;
1533 l2cap_pi(sk)->ident = cmd->ident;
1535 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1536 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1537 if (!hci_conn_encrypt(conn->hcon))
1538 goto done;
1539 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1540 if (!hci_conn_auth(conn->hcon))
1541 goto done;
1544 sk->sk_state = BT_CONFIG;
1545 result = status = 0;
1547 done:
1548 write_unlock_bh(&list->lock);
1550 response:
1551 bh_unlock_sock(parent);
1553 sendresp:
1554 rsp.scid = cpu_to_le16(scid);
1555 rsp.dcid = cpu_to_le16(dcid);
1556 rsp.result = cpu_to_le16(result);
1557 rsp.status = cpu_to_le16(status);
1558 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1559 return 0;
1562 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1564 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1565 u16 scid, dcid, result, status;
1566 struct sock *sk;
1567 u8 req[128];
1569 scid = __le16_to_cpu(rsp->scid);
1570 dcid = __le16_to_cpu(rsp->dcid);
1571 result = __le16_to_cpu(rsp->result);
1572 status = __le16_to_cpu(rsp->status);
1574 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1576 if (scid) {
1577 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1578 return 0;
1579 } else {
1580 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1581 return 0;
1584 switch (result) {
1585 case L2CAP_CR_SUCCESS:
1586 sk->sk_state = BT_CONFIG;
1587 l2cap_pi(sk)->ident = 0;
1588 l2cap_pi(sk)->dcid = dcid;
1589 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1591 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1592 l2cap_build_conf_req(sk, req), req);
1593 break;
1595 case L2CAP_CR_PEND:
1596 break;
1598 default:
1599 l2cap_chan_del(sk, ECONNREFUSED);
1600 break;
1603 bh_unlock_sock(sk);
1604 return 0;
1607 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1609 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1610 u16 dcid, flags;
1611 u8 rsp[64];
1612 struct sock *sk;
1613 int len;
1615 dcid = __le16_to_cpu(req->dcid);
1616 flags = __le16_to_cpu(req->flags);
1618 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1620 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1621 return -ENOENT;
1623 if (sk->sk_state == BT_DISCONN)
1624 goto unlock;
1626 /* Reject if config buffer is too small. */
1627 len = cmd_len - sizeof(*req);
1628 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1629 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1630 l2cap_build_conf_rsp(sk, rsp,
1631 L2CAP_CONF_REJECT, flags), rsp);
1632 goto unlock;
1635 /* Store config. */
1636 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1637 l2cap_pi(sk)->conf_len += len;
1639 if (flags & 0x0001) {
1640 /* Incomplete config. Send empty response. */
1641 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1642 l2cap_build_conf_rsp(sk, rsp,
1643 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1644 goto unlock;
1647 /* Complete config. */
1648 len = l2cap_parse_conf_req(sk, rsp);
1649 if (len < 0)
1650 goto unlock;
1652 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1654 /* Reset config buffer. */
1655 l2cap_pi(sk)->conf_len = 0;
1657 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1658 goto unlock;
1660 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1661 sk->sk_state = BT_CONNECTED;
1662 l2cap_chan_ready(sk);
1663 goto unlock;
1666 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1667 u8 req[64];
1668 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1669 l2cap_build_conf_req(sk, req), req);
1672 unlock:
1673 bh_unlock_sock(sk);
1674 return 0;
1677 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1679 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1680 u16 scid, flags, result;
1681 struct sock *sk;
1683 scid = __le16_to_cpu(rsp->scid);
1684 flags = __le16_to_cpu(rsp->flags);
1685 result = __le16_to_cpu(rsp->result);
1687 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1689 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1690 return 0;
1692 switch (result) {
1693 case L2CAP_CONF_SUCCESS:
1694 break;
1696 case L2CAP_CONF_UNACCEPT:
1697 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1698 char req[128];
1699 /* It does not make sense to adjust L2CAP parameters
1700 * that are currently defined in the spec. We simply
1701 * resend config request that we sent earlier. It is
1702 * stupid, but it helps qualification testing which
1703 * expects at least some response from us. */
1704 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1705 l2cap_build_conf_req(sk, req), req);
1706 goto done;
1709 default:
1710 sk->sk_state = BT_DISCONN;
1711 sk->sk_err = ECONNRESET;
1712 l2cap_sock_set_timer(sk, HZ * 5);
1714 struct l2cap_disconn_req req;
1715 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1716 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1717 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1718 L2CAP_DISCONN_REQ, sizeof(req), &req);
1720 goto done;
1723 if (flags & 0x01)
1724 goto done;
1726 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1728 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1729 sk->sk_state = BT_CONNECTED;
1730 l2cap_chan_ready(sk);
1733 done:
1734 bh_unlock_sock(sk);
1735 return 0;
1738 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1740 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1741 struct l2cap_disconn_rsp rsp;
1742 u16 dcid, scid;
1743 struct sock *sk;
1745 scid = __le16_to_cpu(req->scid);
1746 dcid = __le16_to_cpu(req->dcid);
1748 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1750 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1751 return 0;
1753 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1754 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1755 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1757 sk->sk_shutdown = SHUTDOWN_MASK;
1759 l2cap_chan_del(sk, ECONNRESET);
1760 bh_unlock_sock(sk);
1762 l2cap_sock_kill(sk);
1763 return 0;
1766 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1768 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1769 u16 dcid, scid;
1770 struct sock *sk;
1772 scid = __le16_to_cpu(rsp->scid);
1773 dcid = __le16_to_cpu(rsp->dcid);
1775 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1777 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1778 return 0;
1780 l2cap_chan_del(sk, 0);
1781 bh_unlock_sock(sk);
1783 l2cap_sock_kill(sk);
1784 return 0;
1787 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1789 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1790 u16 type;
1792 type = __le16_to_cpu(req->type);
1794 BT_DBG("type 0x%4.4x", type);
1796 if (type == L2CAP_IT_FEAT_MASK) {
1797 u8 buf[8];
1798 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1799 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1800 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1801 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1802 l2cap_send_cmd(conn, cmd->ident,
1803 L2CAP_INFO_RSP, sizeof(buf), buf);
1804 } else {
1805 struct l2cap_info_rsp rsp;
1806 rsp.type = cpu_to_le16(type);
1807 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1808 l2cap_send_cmd(conn, cmd->ident,
1809 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1812 return 0;
1815 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1817 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1818 u16 type, result;
1820 type = __le16_to_cpu(rsp->type);
1821 result = __le16_to_cpu(rsp->result);
1823 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1825 conn->info_ident = 0;
1827 del_timer(&conn->info_timer);
1829 if (type == L2CAP_IT_FEAT_MASK)
1830 conn->feat_mask = __le32_to_cpu(get_unaligned((__le32 *) rsp->data));
1832 l2cap_conn_start(conn);
1834 return 0;
1837 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1839 u8 *data = skb->data;
1840 int len = skb->len;
1841 struct l2cap_cmd_hdr cmd;
1842 int err = 0;
1844 l2cap_raw_recv(conn, skb);
1846 while (len >= L2CAP_CMD_HDR_SIZE) {
1847 u16 cmd_len;
1848 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1849 data += L2CAP_CMD_HDR_SIZE;
1850 len -= L2CAP_CMD_HDR_SIZE;
1852 cmd_len = le16_to_cpu(cmd.len);
1854 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1856 if (cmd_len > len || !cmd.ident) {
1857 BT_DBG("corrupted command");
1858 break;
1861 switch (cmd.code) {
1862 case L2CAP_COMMAND_REJ:
1863 l2cap_command_rej(conn, &cmd, data);
1864 break;
1866 case L2CAP_CONN_REQ:
1867 err = l2cap_connect_req(conn, &cmd, data);
1868 break;
1870 case L2CAP_CONN_RSP:
1871 err = l2cap_connect_rsp(conn, &cmd, data);
1872 break;
1874 case L2CAP_CONF_REQ:
1875 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1876 break;
1878 case L2CAP_CONF_RSP:
1879 err = l2cap_config_rsp(conn, &cmd, data);
1880 break;
1882 case L2CAP_DISCONN_REQ:
1883 err = l2cap_disconnect_req(conn, &cmd, data);
1884 break;
1886 case L2CAP_DISCONN_RSP:
1887 err = l2cap_disconnect_rsp(conn, &cmd, data);
1888 break;
1890 case L2CAP_ECHO_REQ:
1891 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1892 break;
1894 case L2CAP_ECHO_RSP:
1895 break;
1897 case L2CAP_INFO_REQ:
1898 err = l2cap_information_req(conn, &cmd, data);
1899 break;
1901 case L2CAP_INFO_RSP:
1902 err = l2cap_information_rsp(conn, &cmd, data);
1903 break;
1905 default:
1906 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1907 err = -EINVAL;
1908 break;
1911 if (err) {
1912 struct l2cap_cmd_rej rej;
1913 BT_DBG("error %d", err);
1915 /* FIXME: Map err to a valid reason */
1916 rej.reason = cpu_to_le16(0);
1917 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1920 data += cmd_len;
1921 len -= cmd_len;
1924 kfree_skb(skb);
1927 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1929 struct sock *sk;
1931 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1932 if (!sk) {
1933 BT_DBG("unknown cid 0x%4.4x", cid);
1934 goto drop;
1937 BT_DBG("sk %p, len %d", sk, skb->len);
1939 if (sk->sk_state != BT_CONNECTED)
1940 goto drop;
1942 if (l2cap_pi(sk)->imtu < skb->len)
1943 goto drop;
1945 /* If socket recv buffers overflows we drop data here
1946 * which is *bad* because L2CAP has to be reliable.
1947 * But we don't have any other choice. L2CAP doesn't
1948 * provide flow control mechanism. */
1950 if (!sock_queue_rcv_skb(sk, skb))
1951 goto done;
1953 drop:
1954 kfree_skb(skb);
1956 done:
1957 if (sk)
1958 bh_unlock_sock(sk);
1960 return 0;
1963 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
1965 struct sock *sk;
1967 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1968 if (!sk)
1969 goto drop;
1971 BT_DBG("sk %p, len %d", sk, skb->len);
1973 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1974 goto drop;
1976 if (l2cap_pi(sk)->imtu < skb->len)
1977 goto drop;
1979 if (!sock_queue_rcv_skb(sk, skb))
1980 goto done;
1982 drop:
1983 kfree_skb(skb);
1985 done:
1986 if (sk) bh_unlock_sock(sk);
1987 return 0;
1990 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1992 struct l2cap_hdr *lh = (void *) skb->data;
1993 u16 cid, len;
1994 __le16 psm;
1996 skb_pull(skb, L2CAP_HDR_SIZE);
1997 cid = __le16_to_cpu(lh->cid);
1998 len = __le16_to_cpu(lh->len);
2000 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2002 switch (cid) {
2003 case 0x0001:
2004 l2cap_sig_channel(conn, skb);
2005 break;
2007 case 0x0002:
2008 psm = get_unaligned((__le16 *) skb->data);
2009 skb_pull(skb, 2);
2010 l2cap_conless_channel(conn, psm, skb);
2011 break;
2013 default:
2014 l2cap_data_channel(conn, cid, skb);
2015 break;
2019 /* ---- L2CAP interface with lower layer (HCI) ---- */
2021 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2023 int exact = 0, lm1 = 0, lm2 = 0;
2024 register struct sock *sk;
2025 struct hlist_node *node;
2027 if (type != ACL_LINK)
2028 return 0;
2030 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2032 /* Find listening sockets and check their link_mode */
2033 read_lock(&l2cap_sk_list.lock);
2034 sk_for_each(sk, node, &l2cap_sk_list.head) {
2035 if (sk->sk_state != BT_LISTEN)
2036 continue;
2038 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2039 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2040 exact++;
2041 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2042 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2044 read_unlock(&l2cap_sk_list.lock);
2046 return exact ? lm1 : lm2;
2049 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2051 struct l2cap_conn *conn;
2053 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2055 if (hcon->type != ACL_LINK)
2056 return 0;
2058 if (!status) {
2059 conn = l2cap_conn_add(hcon, status);
2060 if (conn)
2061 l2cap_conn_ready(conn);
2062 } else
2063 l2cap_conn_del(hcon, bt_err(status));
2065 return 0;
2068 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2070 BT_DBG("hcon %p reason %d", hcon, reason);
2072 if (hcon->type != ACL_LINK)
2073 return 0;
2075 l2cap_conn_del(hcon, bt_err(reason));
2077 return 0;
2080 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2082 struct l2cap_chan_list *l;
2083 struct l2cap_conn *conn = conn = hcon->l2cap_data;
2084 struct l2cap_conn_rsp rsp;
2085 struct sock *sk;
2086 int result;
2088 if (!conn)
2089 return 0;
2091 l = &conn->chan_list;
2093 BT_DBG("conn %p", conn);
2095 read_lock(&l->lock);
2097 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2098 bh_lock_sock(sk);
2100 if (sk->sk_state != BT_CONNECT2 ||
2101 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
2102 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
2103 bh_unlock_sock(sk);
2104 continue;
2107 if (!status) {
2108 sk->sk_state = BT_CONFIG;
2109 result = 0;
2110 } else {
2111 sk->sk_state = BT_DISCONN;
2112 l2cap_sock_set_timer(sk, HZ/10);
2113 result = L2CAP_CR_SEC_BLOCK;
2116 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2117 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2118 rsp.result = cpu_to_le16(result);
2119 rsp.status = cpu_to_le16(0);
2120 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2121 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2123 bh_unlock_sock(sk);
2126 read_unlock(&l->lock);
2127 return 0;
2130 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2132 struct l2cap_chan_list *l;
2133 struct l2cap_conn *conn = hcon->l2cap_data;
2134 struct l2cap_conn_rsp rsp;
2135 struct sock *sk;
2136 int result;
2138 if (!conn)
2139 return 0;
2141 l = &conn->chan_list;
2143 BT_DBG("conn %p", conn);
2145 read_lock(&l->lock);
2147 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2148 bh_lock_sock(sk);
2150 if (sk->sk_state != BT_CONNECT2) {
2151 bh_unlock_sock(sk);
2152 continue;
2155 if (!status) {
2156 sk->sk_state = BT_CONFIG;
2157 result = 0;
2158 } else {
2159 sk->sk_state = BT_DISCONN;
2160 l2cap_sock_set_timer(sk, HZ/10);
2161 result = L2CAP_CR_SEC_BLOCK;
2164 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2165 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2166 rsp.result = cpu_to_le16(result);
2167 rsp.status = cpu_to_le16(0);
2168 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2169 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2171 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2172 hci_conn_change_link_key(hcon);
2174 bh_unlock_sock(sk);
2177 read_unlock(&l->lock);
2178 return 0;
2181 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2183 struct l2cap_conn *conn = hcon->l2cap_data;
2185 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2186 goto drop;
2188 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2190 if (flags & ACL_START) {
2191 struct l2cap_hdr *hdr;
2192 int len;
2194 if (conn->rx_len) {
2195 BT_ERR("Unexpected start frame (len %d)", skb->len);
2196 kfree_skb(conn->rx_skb);
2197 conn->rx_skb = NULL;
2198 conn->rx_len = 0;
2199 l2cap_conn_unreliable(conn, ECOMM);
2202 if (skb->len < 2) {
2203 BT_ERR("Frame is too short (len %d)", skb->len);
2204 l2cap_conn_unreliable(conn, ECOMM);
2205 goto drop;
2208 hdr = (struct l2cap_hdr *) skb->data;
2209 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2211 if (len == skb->len) {
2212 /* Complete frame received */
2213 l2cap_recv_frame(conn, skb);
2214 return 0;
2217 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2219 if (skb->len > len) {
2220 BT_ERR("Frame is too long (len %d, expected len %d)",
2221 skb->len, len);
2222 l2cap_conn_unreliable(conn, ECOMM);
2223 goto drop;
2226 /* Allocate skb for the complete frame (with header) */
2227 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2228 goto drop;
2230 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2231 skb->len);
2232 conn->rx_len = len - skb->len;
2233 } else {
2234 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2236 if (!conn->rx_len) {
2237 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2238 l2cap_conn_unreliable(conn, ECOMM);
2239 goto drop;
2242 if (skb->len > conn->rx_len) {
2243 BT_ERR("Fragment is too long (len %d, expected %d)",
2244 skb->len, conn->rx_len);
2245 kfree_skb(conn->rx_skb);
2246 conn->rx_skb = NULL;
2247 conn->rx_len = 0;
2248 l2cap_conn_unreliable(conn, ECOMM);
2249 goto drop;
2252 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2253 skb->len);
2254 conn->rx_len -= skb->len;
2256 if (!conn->rx_len) {
2257 /* Complete frame received */
2258 l2cap_recv_frame(conn, conn->rx_skb);
2259 conn->rx_skb = NULL;
2263 drop:
2264 kfree_skb(skb);
2265 return 0;
2268 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2270 struct sock *sk;
2271 struct hlist_node *node;
2272 char *str = buf;
2274 read_lock_bh(&l2cap_sk_list.lock);
2276 sk_for_each(sk, node, &l2cap_sk_list.head) {
2277 struct l2cap_pinfo *pi = l2cap_pi(sk);
2279 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2280 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2281 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2282 pi->imtu, pi->omtu, pi->link_mode);
2285 read_unlock_bh(&l2cap_sk_list.lock);
2287 return (str - buf);
2290 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2292 static const struct proto_ops l2cap_sock_ops = {
2293 .family = PF_BLUETOOTH,
2294 .owner = THIS_MODULE,
2295 .release = l2cap_sock_release,
2296 .bind = l2cap_sock_bind,
2297 .connect = l2cap_sock_connect,
2298 .listen = l2cap_sock_listen,
2299 .accept = l2cap_sock_accept,
2300 .getname = l2cap_sock_getname,
2301 .sendmsg = l2cap_sock_sendmsg,
2302 .recvmsg = bt_sock_recvmsg,
2303 .poll = bt_sock_poll,
2304 .mmap = sock_no_mmap,
2305 .socketpair = sock_no_socketpair,
2306 .ioctl = sock_no_ioctl,
2307 .shutdown = l2cap_sock_shutdown,
2308 .setsockopt = l2cap_sock_setsockopt,
2309 .getsockopt = l2cap_sock_getsockopt
2312 static struct net_proto_family l2cap_sock_family_ops = {
2313 .family = PF_BLUETOOTH,
2314 .owner = THIS_MODULE,
2315 .create = l2cap_sock_create,
2318 static struct hci_proto l2cap_hci_proto = {
2319 .name = "L2CAP",
2320 .id = HCI_PROTO_L2CAP,
2321 .connect_ind = l2cap_connect_ind,
2322 .connect_cfm = l2cap_connect_cfm,
2323 .disconn_ind = l2cap_disconn_ind,
2324 .auth_cfm = l2cap_auth_cfm,
2325 .encrypt_cfm = l2cap_encrypt_cfm,
2326 .recv_acldata = l2cap_recv_acldata
2329 static int __init l2cap_init(void)
2331 int err;
2333 err = proto_register(&l2cap_proto, 0);
2334 if (err < 0)
2335 return err;
2337 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2338 if (err < 0) {
2339 BT_ERR("L2CAP socket registration failed");
2340 goto error;
2343 err = hci_register_proto(&l2cap_hci_proto);
2344 if (err < 0) {
2345 BT_ERR("L2CAP protocol registration failed");
2346 bt_sock_unregister(BTPROTO_L2CAP);
2347 goto error;
2350 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2351 BT_ERR("Failed to create L2CAP info file");
2353 BT_INFO("L2CAP ver %s", VERSION);
2354 BT_INFO("L2CAP socket layer initialized");
2356 return 0;
2358 error:
2359 proto_unregister(&l2cap_proto);
2360 return err;
2363 static void __exit l2cap_exit(void)
2365 class_remove_file(bt_class, &class_attr_l2cap);
2367 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2368 BT_ERR("L2CAP socket unregistration failed");
2370 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2371 BT_ERR("L2CAP protocol unregistration failed");
2373 proto_unregister(&l2cap_proto);
2376 void l2cap_load(void)
2378 /* Dummy function to trigger automatic L2CAP module loading by
2379 * other modules that use L2CAP sockets but don't use any other
2380 * symbols from it. */
2381 return;
2383 EXPORT_SYMBOL(l2cap_load);
2385 module_init(l2cap_init);
2386 module_exit(l2cap_exit);
2388 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2389 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2390 MODULE_VERSION(VERSION);
2391 MODULE_LICENSE("GPL");
2392 MODULE_ALIAS("bt-proto-0");