[Bluetooth] Add timestamp support to L2CAP, RFCOMM and SCO
[linux-2.6/cjktty.git] / net / bluetooth / l2cap.c
blob4fcf24af7590be87ef405a38b8f1bf850b5d65e1
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
58 #define VERSION "2.10"
60 static u32 l2cap_feat_mask = 0x0000;
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
82 bh_lock_sock(sk);
83 __l2cap_sock_close(sk, ETIMEDOUT);
84 bh_unlock_sock(sk);
86 l2cap_sock_kill(sk);
87 sock_put(sk);
90 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
92 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
93 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
96 static void l2cap_sock_clear_timer(struct sock *sk)
98 BT_DBG("sock %p state %d", sk, sk->sk_state);
99 sk_stop_timer(sk, &sk->sk_timer);
102 /* ---- L2CAP channels ---- */
103 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
105 struct sock *s;
106 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
107 if (l2cap_pi(s)->dcid == cid)
108 break;
110 return s;
113 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->scid == cid)
118 break;
120 return s;
123 /* Find channel with given SCID.
124 * Returns locked socket */
125 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
127 struct sock *s;
128 read_lock(&l->lock);
129 s = __l2cap_get_chan_by_scid(l, cid);
130 if (s) bh_lock_sock(s);
131 read_unlock(&l->lock);
132 return s;
135 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
137 struct sock *s;
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->ident == ident)
140 break;
142 return s;
145 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
147 struct sock *s;
148 read_lock(&l->lock);
149 s = __l2cap_get_chan_by_ident(l, ident);
150 if (s) bh_lock_sock(s);
151 read_unlock(&l->lock);
152 return s;
155 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
157 u16 cid = 0x0040;
159 for (; cid < 0xffff; cid++) {
160 if(!__l2cap_get_chan_by_scid(l, cid))
161 return cid;
164 return 0;
167 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
169 sock_hold(sk);
171 if (l->head)
172 l2cap_pi(l->head)->prev_c = sk;
174 l2cap_pi(sk)->next_c = l->head;
175 l2cap_pi(sk)->prev_c = NULL;
176 l->head = sk;
179 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
181 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
183 write_lock_bh(&l->lock);
184 if (sk == l->head)
185 l->head = next;
187 if (next)
188 l2cap_pi(next)->prev_c = prev;
189 if (prev)
190 l2cap_pi(prev)->next_c = next;
191 write_unlock_bh(&l->lock);
193 __sock_put(sk);
196 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
198 struct l2cap_chan_list *l = &conn->chan_list;
200 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
202 l2cap_pi(sk)->conn = conn;
204 if (sk->sk_type == SOCK_SEQPACKET) {
205 /* Alloc CID for connection-oriented socket */
206 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
207 } else if (sk->sk_type == SOCK_DGRAM) {
208 /* Connectionless socket */
209 l2cap_pi(sk)->scid = 0x0002;
210 l2cap_pi(sk)->dcid = 0x0002;
211 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
212 } else {
213 /* Raw socket can send/recv signalling messages only */
214 l2cap_pi(sk)->scid = 0x0001;
215 l2cap_pi(sk)->dcid = 0x0001;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
219 __l2cap_chan_link(l, sk);
221 if (parent)
222 bt_accept_enqueue(parent, sk);
225 /* Delete channel.
226 * Must be called on the locked socket. */
227 static void l2cap_chan_del(struct sock *sk, int err)
229 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
230 struct sock *parent = bt_sk(sk)->parent;
232 l2cap_sock_clear_timer(sk);
234 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
236 if (conn) {
237 /* Unlink from channel list */
238 l2cap_chan_unlink(&conn->chan_list, sk);
239 l2cap_pi(sk)->conn = NULL;
240 hci_conn_put(conn->hcon);
243 sk->sk_state = BT_CLOSED;
244 sock_set_flag(sk, SOCK_ZAPPED);
246 if (err)
247 sk->sk_err = err;
249 if (parent) {
250 bt_accept_unlink(sk);
251 parent->sk_data_ready(parent, 0);
252 } else
253 sk->sk_state_change(sk);
256 /* Service level security */
257 static inline int l2cap_check_link_mode(struct sock *sk)
259 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
261 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
262 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE))
263 return hci_conn_encrypt(conn->hcon);
265 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
266 return hci_conn_auth(conn->hcon);
268 return 1;
271 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
273 u8 id;
275 /* Get next available identificator.
276 * 1 - 128 are used by kernel.
277 * 129 - 199 are reserved.
278 * 200 - 254 are used by utilities like l2ping, etc.
281 spin_lock_bh(&conn->lock);
283 if (++conn->tx_ident > 128)
284 conn->tx_ident = 1;
286 id = conn->tx_ident;
288 spin_unlock_bh(&conn->lock);
290 return id;
293 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
295 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
297 BT_DBG("code 0x%2.2x", code);
299 if (!skb)
300 return -ENOMEM;
302 return hci_send_acl(conn->hcon, skb, 0);
305 static void l2cap_do_start(struct sock *sk)
307 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
309 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
310 struct l2cap_conn_req req;
311 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
312 req.psm = l2cap_pi(sk)->psm;
314 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
316 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
317 L2CAP_CONN_REQ, sizeof(req), &req);
318 } else {
319 struct l2cap_info_req req;
320 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
322 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
323 conn->info_ident = l2cap_get_ident(conn);
325 mod_timer(&conn->info_timer, jiffies +
326 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
328 l2cap_send_cmd(conn, conn->info_ident,
329 L2CAP_INFO_REQ, sizeof(req), &req);
333 /* ---- L2CAP connections ---- */
334 static void l2cap_conn_start(struct l2cap_conn *conn)
336 struct l2cap_chan_list *l = &conn->chan_list;
337 struct sock *sk;
339 BT_DBG("conn %p", conn);
341 read_lock(&l->lock);
343 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
344 bh_lock_sock(sk);
346 if (sk->sk_type != SOCK_SEQPACKET) {
347 bh_unlock_sock(sk);
348 continue;
351 if (sk->sk_state == BT_CONNECT) {
352 struct l2cap_conn_req req;
353 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
354 req.psm = l2cap_pi(sk)->psm;
356 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
358 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
359 L2CAP_CONN_REQ, sizeof(req), &req);
360 } else if (sk->sk_state == BT_CONNECT2) {
361 struct l2cap_conn_rsp rsp;
362 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
363 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
365 if (l2cap_check_link_mode(sk)) {
366 sk->sk_state = BT_CONFIG;
367 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
368 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
369 } else {
370 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
371 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
374 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
375 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
378 bh_unlock_sock(sk);
381 read_unlock(&l->lock);
384 static void l2cap_conn_ready(struct l2cap_conn *conn)
386 struct l2cap_chan_list *l = &conn->chan_list;
387 struct sock *sk;
389 BT_DBG("conn %p", conn);
391 read_lock(&l->lock);
393 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
394 bh_lock_sock(sk);
396 if (sk->sk_type != SOCK_SEQPACKET) {
397 l2cap_sock_clear_timer(sk);
398 sk->sk_state = BT_CONNECTED;
399 sk->sk_state_change(sk);
400 } else if (sk->sk_state == BT_CONNECT)
401 l2cap_do_start(sk);
403 bh_unlock_sock(sk);
406 read_unlock(&l->lock);
409 /* Notify sockets that we cannot guaranty reliability anymore */
410 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
412 struct l2cap_chan_list *l = &conn->chan_list;
413 struct sock *sk;
415 BT_DBG("conn %p", conn);
417 read_lock(&l->lock);
419 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
420 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
421 sk->sk_err = err;
424 read_unlock(&l->lock);
427 static void l2cap_info_timeout(unsigned long arg)
429 struct l2cap_conn *conn = (void *) arg;
431 conn->info_ident = 0;
433 l2cap_conn_start(conn);
436 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
438 struct l2cap_conn *conn = hcon->l2cap_data;
440 if (conn || status)
441 return conn;
443 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
444 if (!conn)
445 return NULL;
447 hcon->l2cap_data = conn;
448 conn->hcon = hcon;
450 BT_DBG("hcon %p conn %p", hcon, conn);
452 conn->mtu = hcon->hdev->acl_mtu;
453 conn->src = &hcon->hdev->bdaddr;
454 conn->dst = &hcon->dst;
456 conn->feat_mask = 0;
458 setup_timer(&conn->info_timer, l2cap_info_timeout, (unsigned long)conn);
460 spin_lock_init(&conn->lock);
461 rwlock_init(&conn->chan_list.lock);
463 return conn;
466 static void l2cap_conn_del(struct hci_conn *hcon, int err)
468 struct l2cap_conn *conn = hcon->l2cap_data;
469 struct sock *sk;
471 if (!conn)
472 return;
474 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
476 if (conn->rx_skb)
477 kfree_skb(conn->rx_skb);
479 /* Kill channels */
480 while ((sk = conn->chan_list.head)) {
481 bh_lock_sock(sk);
482 l2cap_chan_del(sk, err);
483 bh_unlock_sock(sk);
484 l2cap_sock_kill(sk);
487 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
488 del_timer_sync(&conn->info_timer);
490 hcon->l2cap_data = NULL;
491 kfree(conn);
494 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
496 struct l2cap_chan_list *l = &conn->chan_list;
497 write_lock_bh(&l->lock);
498 __l2cap_chan_add(conn, sk, parent);
499 write_unlock_bh(&l->lock);
502 /* ---- Socket interface ---- */
503 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
505 struct sock *sk;
506 struct hlist_node *node;
507 sk_for_each(sk, node, &l2cap_sk_list.head)
508 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
509 goto found;
510 sk = NULL;
511 found:
512 return sk;
515 /* Find socket with psm and source bdaddr.
516 * Returns closest match.
518 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
520 struct sock *sk = NULL, *sk1 = NULL;
521 struct hlist_node *node;
523 sk_for_each(sk, node, &l2cap_sk_list.head) {
524 if (state && sk->sk_state != state)
525 continue;
527 if (l2cap_pi(sk)->psm == psm) {
528 /* Exact match. */
529 if (!bacmp(&bt_sk(sk)->src, src))
530 break;
532 /* Closest match */
533 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
534 sk1 = sk;
537 return node ? sk : sk1;
540 /* Find socket with given address (psm, src).
541 * Returns locked socket */
542 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
544 struct sock *s;
545 read_lock(&l2cap_sk_list.lock);
546 s = __l2cap_get_sock_by_psm(state, psm, src);
547 if (s) bh_lock_sock(s);
548 read_unlock(&l2cap_sk_list.lock);
549 return s;
552 static void l2cap_sock_destruct(struct sock *sk)
554 BT_DBG("sk %p", sk);
556 skb_queue_purge(&sk->sk_receive_queue);
557 skb_queue_purge(&sk->sk_write_queue);
560 static void l2cap_sock_cleanup_listen(struct sock *parent)
562 struct sock *sk;
564 BT_DBG("parent %p", parent);
566 /* Close not yet accepted channels */
567 while ((sk = bt_accept_dequeue(parent, NULL)))
568 l2cap_sock_close(sk);
570 parent->sk_state = BT_CLOSED;
571 sock_set_flag(parent, SOCK_ZAPPED);
574 /* Kill socket (only if zapped and orphan)
575 * Must be called on unlocked socket.
577 static void l2cap_sock_kill(struct sock *sk)
579 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
580 return;
582 BT_DBG("sk %p state %d", sk, sk->sk_state);
584 /* Kill poor orphan */
585 bt_sock_unlink(&l2cap_sk_list, sk);
586 sock_set_flag(sk, SOCK_DEAD);
587 sock_put(sk);
590 static void __l2cap_sock_close(struct sock *sk, int reason)
592 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
594 switch (sk->sk_state) {
595 case BT_LISTEN:
596 l2cap_sock_cleanup_listen(sk);
597 break;
599 case BT_CONNECTED:
600 case BT_CONFIG:
601 case BT_CONNECT2:
602 if (sk->sk_type == SOCK_SEQPACKET) {
603 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
604 struct l2cap_disconn_req req;
606 sk->sk_state = BT_DISCONN;
607 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
609 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
610 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
611 l2cap_send_cmd(conn, l2cap_get_ident(conn),
612 L2CAP_DISCONN_REQ, sizeof(req), &req);
613 } else {
614 l2cap_chan_del(sk, reason);
616 break;
618 case BT_CONNECT:
619 case BT_DISCONN:
620 l2cap_chan_del(sk, reason);
621 break;
623 default:
624 sock_set_flag(sk, SOCK_ZAPPED);
625 break;
629 /* Must be called on unlocked socket. */
630 static void l2cap_sock_close(struct sock *sk)
632 l2cap_sock_clear_timer(sk);
633 lock_sock(sk);
634 __l2cap_sock_close(sk, ECONNRESET);
635 release_sock(sk);
636 l2cap_sock_kill(sk);
639 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
641 struct l2cap_pinfo *pi = l2cap_pi(sk);
643 BT_DBG("sk %p", sk);
645 if (parent) {
646 sk->sk_type = parent->sk_type;
647 pi->imtu = l2cap_pi(parent)->imtu;
648 pi->omtu = l2cap_pi(parent)->omtu;
649 pi->link_mode = l2cap_pi(parent)->link_mode;
650 } else {
651 pi->imtu = L2CAP_DEFAULT_MTU;
652 pi->omtu = 0;
653 pi->link_mode = 0;
656 /* Default config options */
657 pi->conf_len = 0;
658 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
661 static struct proto l2cap_proto = {
662 .name = "L2CAP",
663 .owner = THIS_MODULE,
664 .obj_size = sizeof(struct l2cap_pinfo)
667 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
669 struct sock *sk;
671 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
672 if (!sk)
673 return NULL;
675 sock_init_data(sock, sk);
676 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
678 sk->sk_destruct = l2cap_sock_destruct;
679 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
681 sock_reset_flag(sk, SOCK_ZAPPED);
683 sk->sk_protocol = proto;
684 sk->sk_state = BT_OPEN;
686 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long)sk);
688 bt_sock_link(&l2cap_sk_list, sk);
689 return sk;
692 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
694 struct sock *sk;
696 BT_DBG("sock %p", sock);
698 sock->state = SS_UNCONNECTED;
700 if (sock->type != SOCK_SEQPACKET &&
701 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
702 return -ESOCKTNOSUPPORT;
704 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
705 return -EPERM;
707 sock->ops = &l2cap_sock_ops;
709 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
710 if (!sk)
711 return -ENOMEM;
713 l2cap_sock_init(sk, NULL);
714 return 0;
717 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
719 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
720 struct sock *sk = sock->sk;
721 int err = 0;
723 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
725 if (!addr || addr->sa_family != AF_BLUETOOTH)
726 return -EINVAL;
728 lock_sock(sk);
730 if (sk->sk_state != BT_OPEN) {
731 err = -EBADFD;
732 goto done;
735 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
736 !capable(CAP_NET_BIND_SERVICE)) {
737 err = -EACCES;
738 goto done;
741 write_lock_bh(&l2cap_sk_list.lock);
743 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
744 err = -EADDRINUSE;
745 } else {
746 /* Save source address */
747 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
748 l2cap_pi(sk)->psm = la->l2_psm;
749 l2cap_pi(sk)->sport = la->l2_psm;
750 sk->sk_state = BT_BOUND;
753 write_unlock_bh(&l2cap_sk_list.lock);
755 done:
756 release_sock(sk);
757 return err;
760 static int l2cap_do_connect(struct sock *sk)
762 bdaddr_t *src = &bt_sk(sk)->src;
763 bdaddr_t *dst = &bt_sk(sk)->dst;
764 struct l2cap_conn *conn;
765 struct hci_conn *hcon;
766 struct hci_dev *hdev;
767 int err = 0;
769 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
771 if (!(hdev = hci_get_route(dst, src)))
772 return -EHOSTUNREACH;
774 hci_dev_lock_bh(hdev);
776 err = -ENOMEM;
778 hcon = hci_connect(hdev, ACL_LINK, dst);
779 if (!hcon)
780 goto done;
782 conn = l2cap_conn_add(hcon, 0);
783 if (!conn) {
784 hci_conn_put(hcon);
785 goto done;
788 err = 0;
790 /* Update source addr of the socket */
791 bacpy(src, conn->src);
793 l2cap_chan_add(conn, sk, NULL);
795 sk->sk_state = BT_CONNECT;
796 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
798 if (hcon->state == BT_CONNECTED) {
799 if (sk->sk_type != SOCK_SEQPACKET) {
800 l2cap_sock_clear_timer(sk);
801 sk->sk_state = BT_CONNECTED;
802 } else
803 l2cap_do_start(sk);
806 done:
807 hci_dev_unlock_bh(hdev);
808 hci_dev_put(hdev);
809 return err;
812 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
814 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
815 struct sock *sk = sock->sk;
816 int err = 0;
818 lock_sock(sk);
820 BT_DBG("sk %p", sk);
822 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
823 err = -EINVAL;
824 goto done;
827 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
828 err = -EINVAL;
829 goto done;
832 switch(sk->sk_state) {
833 case BT_CONNECT:
834 case BT_CONNECT2:
835 case BT_CONFIG:
836 /* Already connecting */
837 goto wait;
839 case BT_CONNECTED:
840 /* Already connected */
841 goto done;
843 case BT_OPEN:
844 case BT_BOUND:
845 /* Can connect */
846 break;
848 default:
849 err = -EBADFD;
850 goto done;
853 /* Set destination address and psm */
854 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
855 l2cap_pi(sk)->psm = la->l2_psm;
857 if ((err = l2cap_do_connect(sk)))
858 goto done;
860 wait:
861 err = bt_sock_wait_state(sk, BT_CONNECTED,
862 sock_sndtimeo(sk, flags & O_NONBLOCK));
863 done:
864 release_sock(sk);
865 return err;
868 static int l2cap_sock_listen(struct socket *sock, int backlog)
870 struct sock *sk = sock->sk;
871 int err = 0;
873 BT_DBG("sk %p backlog %d", sk, backlog);
875 lock_sock(sk);
877 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
878 err = -EBADFD;
879 goto done;
882 if (!l2cap_pi(sk)->psm) {
883 bdaddr_t *src = &bt_sk(sk)->src;
884 u16 psm;
886 err = -EINVAL;
888 write_lock_bh(&l2cap_sk_list.lock);
890 for (psm = 0x1001; psm < 0x1100; psm += 2)
891 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
892 l2cap_pi(sk)->psm = htobs(psm);
893 l2cap_pi(sk)->sport = htobs(psm);
894 err = 0;
895 break;
898 write_unlock_bh(&l2cap_sk_list.lock);
900 if (err < 0)
901 goto done;
904 sk->sk_max_ack_backlog = backlog;
905 sk->sk_ack_backlog = 0;
906 sk->sk_state = BT_LISTEN;
908 done:
909 release_sock(sk);
910 return err;
913 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
915 DECLARE_WAITQUEUE(wait, current);
916 struct sock *sk = sock->sk, *nsk;
917 long timeo;
918 int err = 0;
920 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
922 if (sk->sk_state != BT_LISTEN) {
923 err = -EBADFD;
924 goto done;
927 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
929 BT_DBG("sk %p timeo %ld", sk, timeo);
931 /* Wait for an incoming connection. (wake-one). */
932 add_wait_queue_exclusive(sk->sk_sleep, &wait);
933 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
934 set_current_state(TASK_INTERRUPTIBLE);
935 if (!timeo) {
936 err = -EAGAIN;
937 break;
940 release_sock(sk);
941 timeo = schedule_timeout(timeo);
942 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
944 if (sk->sk_state != BT_LISTEN) {
945 err = -EBADFD;
946 break;
949 if (signal_pending(current)) {
950 err = sock_intr_errno(timeo);
951 break;
954 set_current_state(TASK_RUNNING);
955 remove_wait_queue(sk->sk_sleep, &wait);
957 if (err)
958 goto done;
960 newsock->state = SS_CONNECTED;
962 BT_DBG("new socket %p", nsk);
964 done:
965 release_sock(sk);
966 return err;
969 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
971 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
972 struct sock *sk = sock->sk;
974 BT_DBG("sock %p, sk %p", sock, sk);
976 addr->sa_family = AF_BLUETOOTH;
977 *len = sizeof(struct sockaddr_l2);
979 if (peer)
980 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
981 else
982 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
984 la->l2_psm = l2cap_pi(sk)->psm;
985 return 0;
988 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
990 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
991 struct sk_buff *skb, **frag;
992 int err, hlen, count, sent=0;
993 struct l2cap_hdr *lh;
995 BT_DBG("sk %p len %d", sk, len);
997 /* First fragment (with L2CAP header) */
998 if (sk->sk_type == SOCK_DGRAM)
999 hlen = L2CAP_HDR_SIZE + 2;
1000 else
1001 hlen = L2CAP_HDR_SIZE;
1003 count = min_t(unsigned int, (conn->mtu - hlen), len);
1005 skb = bt_skb_send_alloc(sk, hlen + count,
1006 msg->msg_flags & MSG_DONTWAIT, &err);
1007 if (!skb)
1008 return err;
1010 /* Create L2CAP header */
1011 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1012 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1013 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1015 if (sk->sk_type == SOCK_DGRAM)
1016 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1018 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1019 err = -EFAULT;
1020 goto fail;
1023 sent += count;
1024 len -= count;
1026 /* Continuation fragments (no L2CAP header) */
1027 frag = &skb_shinfo(skb)->frag_list;
1028 while (len) {
1029 count = min_t(unsigned int, conn->mtu, len);
1031 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1032 if (!*frag)
1033 goto fail;
1035 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1036 err = -EFAULT;
1037 goto fail;
1040 sent += count;
1041 len -= count;
1043 frag = &(*frag)->next;
1046 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1047 goto fail;
1049 return sent;
1051 fail:
1052 kfree_skb(skb);
1053 return err;
1056 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1058 struct sock *sk = sock->sk;
1059 int err = 0;
1061 BT_DBG("sock %p, sk %p", sock, sk);
1063 err = sock_error(sk);
1064 if (err)
1065 return err;
1067 if (msg->msg_flags & MSG_OOB)
1068 return -EOPNOTSUPP;
1070 /* Check outgoing MTU */
1071 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1072 return -EINVAL;
1074 lock_sock(sk);
1076 if (sk->sk_state == BT_CONNECTED)
1077 err = l2cap_do_send(sk, msg, len);
1078 else
1079 err = -ENOTCONN;
1081 release_sock(sk);
1082 return err;
1085 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1087 struct sock *sk = sock->sk;
1088 struct l2cap_options opts;
1089 int err = 0, len;
1090 u32 opt;
1092 BT_DBG("sk %p", sk);
1094 lock_sock(sk);
1096 switch (optname) {
1097 case L2CAP_OPTIONS:
1098 opts.imtu = l2cap_pi(sk)->imtu;
1099 opts.omtu = l2cap_pi(sk)->omtu;
1100 opts.flush_to = l2cap_pi(sk)->flush_to;
1101 opts.mode = L2CAP_MODE_BASIC;
1103 len = min_t(unsigned int, sizeof(opts), optlen);
1104 if (copy_from_user((char *) &opts, optval, len)) {
1105 err = -EFAULT;
1106 break;
1109 l2cap_pi(sk)->imtu = opts.imtu;
1110 l2cap_pi(sk)->omtu = opts.omtu;
1111 break;
1113 case L2CAP_LM:
1114 if (get_user(opt, (u32 __user *) optval)) {
1115 err = -EFAULT;
1116 break;
1119 l2cap_pi(sk)->link_mode = opt;
1120 break;
1122 default:
1123 err = -ENOPROTOOPT;
1124 break;
1127 release_sock(sk);
1128 return err;
1131 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1133 struct sock *sk = sock->sk;
1134 struct l2cap_options opts;
1135 struct l2cap_conninfo cinfo;
1136 int len, err = 0;
1138 BT_DBG("sk %p", sk);
1140 if (get_user(len, optlen))
1141 return -EFAULT;
1143 lock_sock(sk);
1145 switch (optname) {
1146 case L2CAP_OPTIONS:
1147 opts.imtu = l2cap_pi(sk)->imtu;
1148 opts.omtu = l2cap_pi(sk)->omtu;
1149 opts.flush_to = l2cap_pi(sk)->flush_to;
1150 opts.mode = L2CAP_MODE_BASIC;
1152 len = min_t(unsigned int, len, sizeof(opts));
1153 if (copy_to_user(optval, (char *) &opts, len))
1154 err = -EFAULT;
1156 break;
1158 case L2CAP_LM:
1159 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1160 err = -EFAULT;
1161 break;
1163 case L2CAP_CONNINFO:
1164 if (sk->sk_state != BT_CONNECTED) {
1165 err = -ENOTCONN;
1166 break;
1169 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1170 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1172 len = min_t(unsigned int, len, sizeof(cinfo));
1173 if (copy_to_user(optval, (char *) &cinfo, len))
1174 err = -EFAULT;
1176 break;
1178 default:
1179 err = -ENOPROTOOPT;
1180 break;
1183 release_sock(sk);
1184 return err;
1187 static int l2cap_sock_shutdown(struct socket *sock, int how)
1189 struct sock *sk = sock->sk;
1190 int err = 0;
1192 BT_DBG("sock %p, sk %p", sock, sk);
1194 if (!sk)
1195 return 0;
1197 lock_sock(sk);
1198 if (!sk->sk_shutdown) {
1199 sk->sk_shutdown = SHUTDOWN_MASK;
1200 l2cap_sock_clear_timer(sk);
1201 __l2cap_sock_close(sk, 0);
1203 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1204 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1206 release_sock(sk);
1207 return err;
1210 static int l2cap_sock_release(struct socket *sock)
1212 struct sock *sk = sock->sk;
1213 int err;
1215 BT_DBG("sock %p, sk %p", sock, sk);
1217 if (!sk)
1218 return 0;
1220 err = l2cap_sock_shutdown(sock, 2);
1222 sock_orphan(sk);
1223 l2cap_sock_kill(sk);
1224 return err;
1227 static void l2cap_chan_ready(struct sock *sk)
1229 struct sock *parent = bt_sk(sk)->parent;
1231 BT_DBG("sk %p, parent %p", sk, parent);
1233 l2cap_pi(sk)->conf_state = 0;
1234 l2cap_sock_clear_timer(sk);
1236 if (!parent) {
1237 /* Outgoing channel.
1238 * Wake up socket sleeping on connect.
1240 sk->sk_state = BT_CONNECTED;
1241 sk->sk_state_change(sk);
1242 } else {
1243 /* Incoming channel.
1244 * Wake up socket sleeping on accept.
1246 parent->sk_data_ready(parent, 0);
1250 /* Copy frame to all raw sockets on that connection */
1251 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1253 struct l2cap_chan_list *l = &conn->chan_list;
1254 struct sk_buff *nskb;
1255 struct sock * sk;
1257 BT_DBG("conn %p", conn);
1259 read_lock(&l->lock);
1260 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1261 if (sk->sk_type != SOCK_RAW)
1262 continue;
1264 /* Don't send frame to the socket it came from */
1265 if (skb->sk == sk)
1266 continue;
1268 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1269 continue;
1271 if (sock_queue_rcv_skb(sk, nskb))
1272 kfree_skb(nskb);
1274 read_unlock(&l->lock);
1277 /* ---- L2CAP signalling commands ---- */
1278 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1279 u8 code, u8 ident, u16 dlen, void *data)
1281 struct sk_buff *skb, **frag;
1282 struct l2cap_cmd_hdr *cmd;
1283 struct l2cap_hdr *lh;
1284 int len, count;
1286 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1288 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1289 count = min_t(unsigned int, conn->mtu, len);
1291 skb = bt_skb_alloc(count, GFP_ATOMIC);
1292 if (!skb)
1293 return NULL;
1295 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1296 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1297 lh->cid = cpu_to_le16(0x0001);
1299 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1300 cmd->code = code;
1301 cmd->ident = ident;
1302 cmd->len = cpu_to_le16(dlen);
1304 if (dlen) {
1305 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1306 memcpy(skb_put(skb, count), data, count);
1307 data += count;
1310 len -= skb->len;
1312 /* Continuation fragments (no L2CAP header) */
1313 frag = &skb_shinfo(skb)->frag_list;
1314 while (len) {
1315 count = min_t(unsigned int, conn->mtu, len);
1317 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1318 if (!*frag)
1319 goto fail;
1321 memcpy(skb_put(*frag, count), data, count);
1323 len -= count;
1324 data += count;
1326 frag = &(*frag)->next;
1329 return skb;
1331 fail:
1332 kfree_skb(skb);
1333 return NULL;
1336 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1338 struct l2cap_conf_opt *opt = *ptr;
1339 int len;
1341 len = L2CAP_CONF_OPT_SIZE + opt->len;
1342 *ptr += len;
1344 *type = opt->type;
1345 *olen = opt->len;
1347 switch (opt->len) {
1348 case 1:
1349 *val = *((u8 *) opt->val);
1350 break;
1352 case 2:
1353 *val = __le16_to_cpu(*((__le16 *) opt->val));
1354 break;
1356 case 4:
1357 *val = __le32_to_cpu(*((__le32 *) opt->val));
1358 break;
1360 default:
1361 *val = (unsigned long) opt->val;
1362 break;
1365 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1366 return len;
1369 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1371 struct l2cap_conf_opt *opt = *ptr;
1373 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1375 opt->type = type;
1376 opt->len = len;
1378 switch (len) {
1379 case 1:
1380 *((u8 *) opt->val) = val;
1381 break;
1383 case 2:
1384 *((__le16 *) opt->val) = cpu_to_le16(val);
1385 break;
1387 case 4:
1388 *((__le32 *) opt->val) = cpu_to_le32(val);
1389 break;
1391 default:
1392 memcpy(opt->val, (void *) val, len);
1393 break;
1396 *ptr += L2CAP_CONF_OPT_SIZE + len;
1399 static int l2cap_build_conf_req(struct sock *sk, void *data)
1401 struct l2cap_pinfo *pi = l2cap_pi(sk);
1402 struct l2cap_conf_req *req = data;
1403 void *ptr = req->data;
1405 BT_DBG("sk %p", sk);
1407 if (pi->imtu != L2CAP_DEFAULT_MTU)
1408 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1410 /* FIXME: Need actual value of the flush timeout */
1411 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1412 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1414 req->dcid = cpu_to_le16(pi->dcid);
1415 req->flags = cpu_to_le16(0);
1417 return ptr - data;
1420 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1422 struct l2cap_pinfo *pi = l2cap_pi(sk);
1423 struct l2cap_conf_rsp *rsp = data;
1424 void *ptr = rsp->data;
1425 void *req = pi->conf_req;
1426 int len = pi->conf_len;
1427 int type, hint, olen;
1428 unsigned long val;
1429 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1430 u16 mtu = L2CAP_DEFAULT_MTU;
1431 u16 result = L2CAP_CONF_SUCCESS;
1433 BT_DBG("sk %p", sk);
1435 while (len >= L2CAP_CONF_OPT_SIZE) {
1436 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1438 hint = type & 0x80;
1439 type &= 0x7f;
1441 switch (type) {
1442 case L2CAP_CONF_MTU:
1443 mtu = val;
1444 break;
1446 case L2CAP_CONF_FLUSH_TO:
1447 pi->flush_to = val;
1448 break;
1450 case L2CAP_CONF_QOS:
1451 break;
1453 case L2CAP_CONF_RFC:
1454 if (olen == sizeof(rfc))
1455 memcpy(&rfc, (void *) val, olen);
1456 break;
1458 default:
1459 if (hint)
1460 break;
1462 result = L2CAP_CONF_UNKNOWN;
1463 *((u8 *) ptr++) = type;
1464 break;
1468 if (result == L2CAP_CONF_SUCCESS) {
1469 /* Configure output options and let the other side know
1470 * which ones we don't like. */
1472 if (rfc.mode == L2CAP_MODE_BASIC) {
1473 if (mtu < pi->omtu)
1474 result = L2CAP_CONF_UNACCEPT;
1475 else {
1476 pi->omtu = mtu;
1477 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1480 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1481 } else {
1482 result = L2CAP_CONF_UNACCEPT;
1484 memset(&rfc, 0, sizeof(rfc));
1485 rfc.mode = L2CAP_MODE_BASIC;
1487 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1488 sizeof(rfc), (unsigned long) &rfc);
1492 rsp->scid = cpu_to_le16(pi->dcid);
1493 rsp->result = cpu_to_le16(result);
1494 rsp->flags = cpu_to_le16(0x0000);
1496 return ptr - data;
1499 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1501 struct l2cap_conf_rsp *rsp = data;
1502 void *ptr = rsp->data;
1504 BT_DBG("sk %p", sk);
1506 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1507 rsp->result = cpu_to_le16(result);
1508 rsp->flags = cpu_to_le16(flags);
1510 return ptr - data;
1513 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1515 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1517 if (rej->reason != 0x0000)
1518 return 0;
1520 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1521 cmd->ident == conn->info_ident) {
1522 conn->info_ident = 0;
1523 del_timer(&conn->info_timer);
1524 l2cap_conn_start(conn);
1527 return 0;
1530 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1532 struct l2cap_chan_list *list = &conn->chan_list;
1533 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1534 struct l2cap_conn_rsp rsp;
1535 struct sock *sk, *parent;
1536 int result, status = 0;
1538 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1539 __le16 psm = req->psm;
1541 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1543 /* Check if we have socket listening on psm */
1544 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1545 if (!parent) {
1546 result = L2CAP_CR_BAD_PSM;
1547 goto sendresp;
1550 result = L2CAP_CR_NO_MEM;
1552 /* Check for backlog size */
1553 if (sk_acceptq_is_full(parent)) {
1554 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1555 goto response;
1558 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1559 if (!sk)
1560 goto response;
1562 write_lock_bh(&list->lock);
1564 /* Check if we already have channel with that dcid */
1565 if (__l2cap_get_chan_by_dcid(list, scid)) {
1566 write_unlock_bh(&list->lock);
1567 sock_set_flag(sk, SOCK_ZAPPED);
1568 l2cap_sock_kill(sk);
1569 goto response;
1572 hci_conn_hold(conn->hcon);
1574 l2cap_sock_init(sk, parent);
1575 bacpy(&bt_sk(sk)->src, conn->src);
1576 bacpy(&bt_sk(sk)->dst, conn->dst);
1577 l2cap_pi(sk)->psm = psm;
1578 l2cap_pi(sk)->dcid = scid;
1580 __l2cap_chan_add(conn, sk, parent);
1581 dcid = l2cap_pi(sk)->scid;
1583 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1585 l2cap_pi(sk)->ident = cmd->ident;
1587 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1588 if (l2cap_check_link_mode(sk)) {
1589 sk->sk_state = BT_CONFIG;
1590 result = L2CAP_CR_SUCCESS;
1591 status = L2CAP_CS_NO_INFO;
1592 } else {
1593 sk->sk_state = BT_CONNECT2;
1594 result = L2CAP_CR_PEND;
1595 status = L2CAP_CS_AUTHEN_PEND;
1597 } else {
1598 sk->sk_state = BT_CONNECT2;
1599 result = L2CAP_CR_PEND;
1600 status = L2CAP_CS_NO_INFO;
1603 write_unlock_bh(&list->lock);
1605 response:
1606 bh_unlock_sock(parent);
1608 sendresp:
1609 rsp.scid = cpu_to_le16(scid);
1610 rsp.dcid = cpu_to_le16(dcid);
1611 rsp.result = cpu_to_le16(result);
1612 rsp.status = cpu_to_le16(status);
1613 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1615 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1616 struct l2cap_info_req info;
1617 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1619 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1620 conn->info_ident = l2cap_get_ident(conn);
1622 mod_timer(&conn->info_timer, jiffies +
1623 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1625 l2cap_send_cmd(conn, conn->info_ident,
1626 L2CAP_INFO_REQ, sizeof(info), &info);
1629 return 0;
1632 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1634 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1635 u16 scid, dcid, result, status;
1636 struct sock *sk;
1637 u8 req[128];
1639 scid = __le16_to_cpu(rsp->scid);
1640 dcid = __le16_to_cpu(rsp->dcid);
1641 result = __le16_to_cpu(rsp->result);
1642 status = __le16_to_cpu(rsp->status);
1644 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1646 if (scid) {
1647 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1648 return 0;
1649 } else {
1650 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1651 return 0;
1654 switch (result) {
1655 case L2CAP_CR_SUCCESS:
1656 sk->sk_state = BT_CONFIG;
1657 l2cap_pi(sk)->ident = 0;
1658 l2cap_pi(sk)->dcid = dcid;
1659 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1661 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1662 l2cap_build_conf_req(sk, req), req);
1663 break;
1665 case L2CAP_CR_PEND:
1666 break;
1668 default:
1669 l2cap_chan_del(sk, ECONNREFUSED);
1670 break;
1673 bh_unlock_sock(sk);
1674 return 0;
1677 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1679 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1680 u16 dcid, flags;
1681 u8 rsp[64];
1682 struct sock *sk;
1683 int len;
1685 dcid = __le16_to_cpu(req->dcid);
1686 flags = __le16_to_cpu(req->flags);
1688 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1690 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1691 return -ENOENT;
1693 if (sk->sk_state == BT_DISCONN)
1694 goto unlock;
1696 /* Reject if config buffer is too small. */
1697 len = cmd_len - sizeof(*req);
1698 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1699 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1700 l2cap_build_conf_rsp(sk, rsp,
1701 L2CAP_CONF_REJECT, flags), rsp);
1702 goto unlock;
1705 /* Store config. */
1706 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1707 l2cap_pi(sk)->conf_len += len;
1709 if (flags & 0x0001) {
1710 /* Incomplete config. Send empty response. */
1711 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1712 l2cap_build_conf_rsp(sk, rsp,
1713 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1714 goto unlock;
1717 /* Complete config. */
1718 len = l2cap_parse_conf_req(sk, rsp);
1719 if (len < 0)
1720 goto unlock;
1722 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1724 /* Reset config buffer. */
1725 l2cap_pi(sk)->conf_len = 0;
1727 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1728 goto unlock;
1730 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1731 sk->sk_state = BT_CONNECTED;
1732 l2cap_chan_ready(sk);
1733 goto unlock;
1736 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1737 u8 buf[64];
1738 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1739 l2cap_build_conf_req(sk, buf), buf);
1742 unlock:
1743 bh_unlock_sock(sk);
1744 return 0;
1747 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1749 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1750 u16 scid, flags, result;
1751 struct sock *sk;
1753 scid = __le16_to_cpu(rsp->scid);
1754 flags = __le16_to_cpu(rsp->flags);
1755 result = __le16_to_cpu(rsp->result);
1757 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1759 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1760 return 0;
1762 switch (result) {
1763 case L2CAP_CONF_SUCCESS:
1764 break;
1766 case L2CAP_CONF_UNACCEPT:
1767 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1768 char req[128];
1769 /* It does not make sense to adjust L2CAP parameters
1770 * that are currently defined in the spec. We simply
1771 * resend config request that we sent earlier. It is
1772 * stupid, but it helps qualification testing which
1773 * expects at least some response from us. */
1774 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1775 l2cap_build_conf_req(sk, req), req);
1776 goto done;
1779 default:
1780 sk->sk_state = BT_DISCONN;
1781 sk->sk_err = ECONNRESET;
1782 l2cap_sock_set_timer(sk, HZ * 5);
1784 struct l2cap_disconn_req req;
1785 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1786 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1787 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1788 L2CAP_DISCONN_REQ, sizeof(req), &req);
1790 goto done;
1793 if (flags & 0x01)
1794 goto done;
1796 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1798 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1799 sk->sk_state = BT_CONNECTED;
1800 l2cap_chan_ready(sk);
1803 done:
1804 bh_unlock_sock(sk);
1805 return 0;
1808 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1810 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1811 struct l2cap_disconn_rsp rsp;
1812 u16 dcid, scid;
1813 struct sock *sk;
1815 scid = __le16_to_cpu(req->scid);
1816 dcid = __le16_to_cpu(req->dcid);
1818 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1820 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1821 return 0;
1823 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1824 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1825 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1827 sk->sk_shutdown = SHUTDOWN_MASK;
1829 l2cap_chan_del(sk, ECONNRESET);
1830 bh_unlock_sock(sk);
1832 l2cap_sock_kill(sk);
1833 return 0;
1836 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1838 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1839 u16 dcid, scid;
1840 struct sock *sk;
1842 scid = __le16_to_cpu(rsp->scid);
1843 dcid = __le16_to_cpu(rsp->dcid);
1845 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1847 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1848 return 0;
1850 l2cap_chan_del(sk, 0);
1851 bh_unlock_sock(sk);
1853 l2cap_sock_kill(sk);
1854 return 0;
1857 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1859 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1860 u16 type;
1862 type = __le16_to_cpu(req->type);
1864 BT_DBG("type 0x%4.4x", type);
1866 if (type == L2CAP_IT_FEAT_MASK) {
1867 u8 buf[8];
1868 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1869 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1870 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1871 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1872 l2cap_send_cmd(conn, cmd->ident,
1873 L2CAP_INFO_RSP, sizeof(buf), buf);
1874 } else {
1875 struct l2cap_info_rsp rsp;
1876 rsp.type = cpu_to_le16(type);
1877 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1878 l2cap_send_cmd(conn, cmd->ident,
1879 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1882 return 0;
1885 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1887 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1888 u16 type, result;
1890 type = __le16_to_cpu(rsp->type);
1891 result = __le16_to_cpu(rsp->result);
1893 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1895 conn->info_ident = 0;
1897 del_timer(&conn->info_timer);
1899 if (type == L2CAP_IT_FEAT_MASK)
1900 conn->feat_mask = get_unaligned_le32(rsp->data);
1902 l2cap_conn_start(conn);
1904 return 0;
1907 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1909 u8 *data = skb->data;
1910 int len = skb->len;
1911 struct l2cap_cmd_hdr cmd;
1912 int err = 0;
1914 l2cap_raw_recv(conn, skb);
1916 while (len >= L2CAP_CMD_HDR_SIZE) {
1917 u16 cmd_len;
1918 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1919 data += L2CAP_CMD_HDR_SIZE;
1920 len -= L2CAP_CMD_HDR_SIZE;
1922 cmd_len = le16_to_cpu(cmd.len);
1924 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1926 if (cmd_len > len || !cmd.ident) {
1927 BT_DBG("corrupted command");
1928 break;
1931 switch (cmd.code) {
1932 case L2CAP_COMMAND_REJ:
1933 l2cap_command_rej(conn, &cmd, data);
1934 break;
1936 case L2CAP_CONN_REQ:
1937 err = l2cap_connect_req(conn, &cmd, data);
1938 break;
1940 case L2CAP_CONN_RSP:
1941 err = l2cap_connect_rsp(conn, &cmd, data);
1942 break;
1944 case L2CAP_CONF_REQ:
1945 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1946 break;
1948 case L2CAP_CONF_RSP:
1949 err = l2cap_config_rsp(conn, &cmd, data);
1950 break;
1952 case L2CAP_DISCONN_REQ:
1953 err = l2cap_disconnect_req(conn, &cmd, data);
1954 break;
1956 case L2CAP_DISCONN_RSP:
1957 err = l2cap_disconnect_rsp(conn, &cmd, data);
1958 break;
1960 case L2CAP_ECHO_REQ:
1961 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1962 break;
1964 case L2CAP_ECHO_RSP:
1965 break;
1967 case L2CAP_INFO_REQ:
1968 err = l2cap_information_req(conn, &cmd, data);
1969 break;
1971 case L2CAP_INFO_RSP:
1972 err = l2cap_information_rsp(conn, &cmd, data);
1973 break;
1975 default:
1976 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1977 err = -EINVAL;
1978 break;
1981 if (err) {
1982 struct l2cap_cmd_rej rej;
1983 BT_DBG("error %d", err);
1985 /* FIXME: Map err to a valid reason */
1986 rej.reason = cpu_to_le16(0);
1987 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1990 data += cmd_len;
1991 len -= cmd_len;
1994 kfree_skb(skb);
1997 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1999 struct sock *sk;
2001 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2002 if (!sk) {
2003 BT_DBG("unknown cid 0x%4.4x", cid);
2004 goto drop;
2007 BT_DBG("sk %p, len %d", sk, skb->len);
2009 if (sk->sk_state != BT_CONNECTED)
2010 goto drop;
2012 if (l2cap_pi(sk)->imtu < skb->len)
2013 goto drop;
2015 /* If socket recv buffers overflows we drop data here
2016 * which is *bad* because L2CAP has to be reliable.
2017 * But we don't have any other choice. L2CAP doesn't
2018 * provide flow control mechanism. */
2020 if (!sock_queue_rcv_skb(sk, skb))
2021 goto done;
2023 drop:
2024 kfree_skb(skb);
2026 done:
2027 if (sk)
2028 bh_unlock_sock(sk);
2030 return 0;
2033 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2035 struct sock *sk;
2037 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2038 if (!sk)
2039 goto drop;
2041 BT_DBG("sk %p, len %d", sk, skb->len);
2043 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2044 goto drop;
2046 if (l2cap_pi(sk)->imtu < skb->len)
2047 goto drop;
2049 if (!sock_queue_rcv_skb(sk, skb))
2050 goto done;
2052 drop:
2053 kfree_skb(skb);
2055 done:
2056 if (sk) bh_unlock_sock(sk);
2057 return 0;
2060 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2062 struct l2cap_hdr *lh = (void *) skb->data;
2063 u16 cid, len;
2064 __le16 psm;
2066 skb_pull(skb, L2CAP_HDR_SIZE);
2067 cid = __le16_to_cpu(lh->cid);
2068 len = __le16_to_cpu(lh->len);
2070 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2072 switch (cid) {
2073 case 0x0001:
2074 l2cap_sig_channel(conn, skb);
2075 break;
2077 case 0x0002:
2078 psm = get_unaligned((__le16 *) skb->data);
2079 skb_pull(skb, 2);
2080 l2cap_conless_channel(conn, psm, skb);
2081 break;
2083 default:
2084 l2cap_data_channel(conn, cid, skb);
2085 break;
2089 /* ---- L2CAP interface with lower layer (HCI) ---- */
2091 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2093 int exact = 0, lm1 = 0, lm2 = 0;
2094 register struct sock *sk;
2095 struct hlist_node *node;
2097 if (type != ACL_LINK)
2098 return 0;
2100 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2102 /* Find listening sockets and check their link_mode */
2103 read_lock(&l2cap_sk_list.lock);
2104 sk_for_each(sk, node, &l2cap_sk_list.head) {
2105 if (sk->sk_state != BT_LISTEN)
2106 continue;
2108 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2109 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2110 exact++;
2111 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2112 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2114 read_unlock(&l2cap_sk_list.lock);
2116 return exact ? lm1 : lm2;
2119 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2121 struct l2cap_conn *conn;
2123 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2125 if (hcon->type != ACL_LINK)
2126 return 0;
2128 if (!status) {
2129 conn = l2cap_conn_add(hcon, status);
2130 if (conn)
2131 l2cap_conn_ready(conn);
2132 } else
2133 l2cap_conn_del(hcon, bt_err(status));
2135 return 0;
2138 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2140 BT_DBG("hcon %p reason %d", hcon, reason);
2142 if (hcon->type != ACL_LINK)
2143 return 0;
2145 l2cap_conn_del(hcon, bt_err(reason));
2147 return 0;
2150 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2152 struct l2cap_chan_list *l;
2153 struct l2cap_conn *conn = hcon->l2cap_data;
2154 struct l2cap_conn_rsp rsp;
2155 struct sock *sk;
2156 int result;
2158 if (!conn)
2159 return 0;
2161 l = &conn->chan_list;
2163 BT_DBG("conn %p", conn);
2165 read_lock(&l->lock);
2167 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2168 struct l2cap_pinfo *pi = l2cap_pi(sk);
2170 bh_lock_sock(sk);
2172 if (sk->sk_state != BT_CONNECT2) {
2173 bh_unlock_sock(sk);
2174 continue;
2177 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2178 !(hcon->link_mode & HCI_LM_ENCRYPT)) {
2179 bh_unlock_sock(sk);
2180 continue;
2183 if (!status) {
2184 sk->sk_state = BT_CONFIG;
2185 result = 0;
2186 } else {
2187 sk->sk_state = BT_DISCONN;
2188 l2cap_sock_set_timer(sk, HZ/10);
2189 result = L2CAP_CR_SEC_BLOCK;
2192 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2193 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2194 rsp.result = cpu_to_le16(result);
2195 rsp.status = cpu_to_le16(0);
2196 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2197 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2199 bh_unlock_sock(sk);
2202 read_unlock(&l->lock);
2203 return 0;
2206 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2208 struct l2cap_chan_list *l;
2209 struct l2cap_conn *conn = hcon->l2cap_data;
2210 struct l2cap_conn_rsp rsp;
2211 struct sock *sk;
2212 int result;
2214 if (!conn)
2215 return 0;
2217 l = &conn->chan_list;
2219 BT_DBG("conn %p", conn);
2221 read_lock(&l->lock);
2223 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2224 struct l2cap_pinfo *pi = l2cap_pi(sk);
2226 bh_lock_sock(sk);
2228 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2229 (sk->sk_state == BT_CONNECTED ||
2230 sk->sk_state == BT_CONFIG) &&
2231 !status && encrypt == 0x00) {
2232 __l2cap_sock_close(sk, ECONNREFUSED);
2233 bh_unlock_sock(sk);
2234 continue;
2237 if (sk->sk_state != BT_CONNECT2) {
2238 bh_unlock_sock(sk);
2239 continue;
2242 if (!status) {
2243 sk->sk_state = BT_CONFIG;
2244 result = 0;
2245 } else {
2246 sk->sk_state = BT_DISCONN;
2247 l2cap_sock_set_timer(sk, HZ/10);
2248 result = L2CAP_CR_SEC_BLOCK;
2251 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2252 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2253 rsp.result = cpu_to_le16(result);
2254 rsp.status = cpu_to_le16(0);
2255 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2256 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2258 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2259 hci_conn_change_link_key(hcon);
2261 bh_unlock_sock(sk);
2264 read_unlock(&l->lock);
2265 return 0;
2268 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2270 struct l2cap_conn *conn = hcon->l2cap_data;
2272 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2273 goto drop;
2275 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2277 if (flags & ACL_START) {
2278 struct l2cap_hdr *hdr;
2279 int len;
2281 if (conn->rx_len) {
2282 BT_ERR("Unexpected start frame (len %d)", skb->len);
2283 kfree_skb(conn->rx_skb);
2284 conn->rx_skb = NULL;
2285 conn->rx_len = 0;
2286 l2cap_conn_unreliable(conn, ECOMM);
2289 if (skb->len < 2) {
2290 BT_ERR("Frame is too short (len %d)", skb->len);
2291 l2cap_conn_unreliable(conn, ECOMM);
2292 goto drop;
2295 hdr = (struct l2cap_hdr *) skb->data;
2296 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2298 if (len == skb->len) {
2299 /* Complete frame received */
2300 l2cap_recv_frame(conn, skb);
2301 return 0;
2304 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2306 if (skb->len > len) {
2307 BT_ERR("Frame is too long (len %d, expected len %d)",
2308 skb->len, len);
2309 l2cap_conn_unreliable(conn, ECOMM);
2310 goto drop;
2313 /* Allocate skb for the complete frame (with header) */
2314 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2315 goto drop;
2317 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2318 skb->len);
2319 conn->rx_len = len - skb->len;
2320 } else {
2321 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2323 if (!conn->rx_len) {
2324 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2325 l2cap_conn_unreliable(conn, ECOMM);
2326 goto drop;
2329 if (skb->len > conn->rx_len) {
2330 BT_ERR("Fragment is too long (len %d, expected %d)",
2331 skb->len, conn->rx_len);
2332 kfree_skb(conn->rx_skb);
2333 conn->rx_skb = NULL;
2334 conn->rx_len = 0;
2335 l2cap_conn_unreliable(conn, ECOMM);
2336 goto drop;
2339 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2340 skb->len);
2341 conn->rx_len -= skb->len;
2343 if (!conn->rx_len) {
2344 /* Complete frame received */
2345 l2cap_recv_frame(conn, conn->rx_skb);
2346 conn->rx_skb = NULL;
2350 drop:
2351 kfree_skb(skb);
2352 return 0;
2355 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2357 struct sock *sk;
2358 struct hlist_node *node;
2359 char *str = buf;
2361 read_lock_bh(&l2cap_sk_list.lock);
2363 sk_for_each(sk, node, &l2cap_sk_list.head) {
2364 struct l2cap_pinfo *pi = l2cap_pi(sk);
2366 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2367 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2368 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2369 pi->imtu, pi->omtu, pi->link_mode);
2372 read_unlock_bh(&l2cap_sk_list.lock);
2374 return (str - buf);
2377 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2379 static const struct proto_ops l2cap_sock_ops = {
2380 .family = PF_BLUETOOTH,
2381 .owner = THIS_MODULE,
2382 .release = l2cap_sock_release,
2383 .bind = l2cap_sock_bind,
2384 .connect = l2cap_sock_connect,
2385 .listen = l2cap_sock_listen,
2386 .accept = l2cap_sock_accept,
2387 .getname = l2cap_sock_getname,
2388 .sendmsg = l2cap_sock_sendmsg,
2389 .recvmsg = bt_sock_recvmsg,
2390 .poll = bt_sock_poll,
2391 .ioctl = bt_sock_ioctl,
2392 .mmap = sock_no_mmap,
2393 .socketpair = sock_no_socketpair,
2394 .shutdown = l2cap_sock_shutdown,
2395 .setsockopt = l2cap_sock_setsockopt,
2396 .getsockopt = l2cap_sock_getsockopt
2399 static struct net_proto_family l2cap_sock_family_ops = {
2400 .family = PF_BLUETOOTH,
2401 .owner = THIS_MODULE,
2402 .create = l2cap_sock_create,
2405 static struct hci_proto l2cap_hci_proto = {
2406 .name = "L2CAP",
2407 .id = HCI_PROTO_L2CAP,
2408 .connect_ind = l2cap_connect_ind,
2409 .connect_cfm = l2cap_connect_cfm,
2410 .disconn_ind = l2cap_disconn_ind,
2411 .auth_cfm = l2cap_auth_cfm,
2412 .encrypt_cfm = l2cap_encrypt_cfm,
2413 .recv_acldata = l2cap_recv_acldata
2416 static int __init l2cap_init(void)
2418 int err;
2420 err = proto_register(&l2cap_proto, 0);
2421 if (err < 0)
2422 return err;
2424 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2425 if (err < 0) {
2426 BT_ERR("L2CAP socket registration failed");
2427 goto error;
2430 err = hci_register_proto(&l2cap_hci_proto);
2431 if (err < 0) {
2432 BT_ERR("L2CAP protocol registration failed");
2433 bt_sock_unregister(BTPROTO_L2CAP);
2434 goto error;
2437 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2438 BT_ERR("Failed to create L2CAP info file");
2440 BT_INFO("L2CAP ver %s", VERSION);
2441 BT_INFO("L2CAP socket layer initialized");
2443 return 0;
2445 error:
2446 proto_unregister(&l2cap_proto);
2447 return err;
2450 static void __exit l2cap_exit(void)
2452 class_remove_file(bt_class, &class_attr_l2cap);
2454 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2455 BT_ERR("L2CAP socket unregistration failed");
2457 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2458 BT_ERR("L2CAP protocol unregistration failed");
2460 proto_unregister(&l2cap_proto);
2463 void l2cap_load(void)
2465 /* Dummy function to trigger automatic L2CAP module loading by
2466 * other modules that use L2CAP sockets but don't use any other
2467 * symbols from it. */
2468 return;
2470 EXPORT_SYMBOL(l2cap_load);
2472 module_init(l2cap_init);
2473 module_exit(l2cap_exit);
2475 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2476 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2477 MODULE_VERSION(VERSION);
2478 MODULE_LICENSE("GPL");
2479 MODULE_ALIAS("bt-proto-0");