ipwireless: Remove unused defines
[linux-2.6/pdupreez.git] / net / bluetooth / l2cap.c
blobc1239852834aa78361939eba75ca596c5dba7d62
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
58 #define VERSION "2.10"
60 static u32 l2cap_feat_mask = 0x0000;
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
79 int reason;
81 BT_DBG("sock %p state %d", sk, sk->sk_state);
83 bh_lock_sock(sk);
85 if (sk->sk_state == BT_CONNECT &&
86 (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH |
87 L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)))
88 reason = ECONNREFUSED;
89 else
90 reason = ETIMEDOUT;
92 __l2cap_sock_close(sk, reason);
94 bh_unlock_sock(sk);
96 l2cap_sock_kill(sk);
97 sock_put(sk);
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
106 static void l2cap_sock_clear_timer(struct sock *sk)
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
118 break;
120 return s;
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
125 struct sock *s;
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
128 break;
130 return s;
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
137 struct sock *s;
138 read_lock(&l->lock);
139 s = __l2cap_get_chan_by_scid(l, cid);
140 if (s) bh_lock_sock(s);
141 read_unlock(&l->lock);
142 return s;
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
147 struct sock *s;
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
150 break;
152 return s;
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
157 struct sock *s;
158 read_lock(&l->lock);
159 s = __l2cap_get_chan_by_ident(l, ident);
160 if (s) bh_lock_sock(s);
161 read_unlock(&l->lock);
162 return s;
165 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
167 u16 cid = 0x0040;
169 for (; cid < 0xffff; cid++) {
170 if(!__l2cap_get_chan_by_scid(l, cid))
171 return cid;
174 return 0;
177 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
179 sock_hold(sk);
181 if (l->head)
182 l2cap_pi(l->head)->prev_c = sk;
184 l2cap_pi(sk)->next_c = l->head;
185 l2cap_pi(sk)->prev_c = NULL;
186 l->head = sk;
189 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
191 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
193 write_lock_bh(&l->lock);
194 if (sk == l->head)
195 l->head = next;
197 if (next)
198 l2cap_pi(next)->prev_c = prev;
199 if (prev)
200 l2cap_pi(prev)->next_c = next;
201 write_unlock_bh(&l->lock);
203 __sock_put(sk);
206 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
208 struct l2cap_chan_list *l = &conn->chan_list;
210 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
212 l2cap_pi(sk)->conn = conn;
214 if (sk->sk_type == SOCK_SEQPACKET) {
215 /* Alloc CID for connection-oriented socket */
216 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
217 } else if (sk->sk_type == SOCK_DGRAM) {
218 /* Connectionless socket */
219 l2cap_pi(sk)->scid = 0x0002;
220 l2cap_pi(sk)->dcid = 0x0002;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
222 } else {
223 /* Raw socket can send/recv signalling messages only */
224 l2cap_pi(sk)->scid = 0x0001;
225 l2cap_pi(sk)->dcid = 0x0001;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
229 __l2cap_chan_link(l, sk);
231 if (parent)
232 bt_accept_enqueue(parent, sk);
235 /* Delete channel.
236 * Must be called on the locked socket. */
237 static void l2cap_chan_del(struct sock *sk, int err)
239 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
240 struct sock *parent = bt_sk(sk)->parent;
242 l2cap_sock_clear_timer(sk);
244 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
246 if (conn) {
247 /* Unlink from channel list */
248 l2cap_chan_unlink(&conn->chan_list, sk);
249 l2cap_pi(sk)->conn = NULL;
250 hci_conn_put(conn->hcon);
253 sk->sk_state = BT_CLOSED;
254 sock_set_flag(sk, SOCK_ZAPPED);
256 if (err)
257 sk->sk_err = err;
259 if (parent) {
260 bt_accept_unlink(sk);
261 parent->sk_data_ready(parent, 0);
262 } else
263 sk->sk_state_change(sk);
266 /* Service level security */
267 static inline int l2cap_check_link_mode(struct sock *sk)
269 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
271 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
272 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE))
273 return hci_conn_encrypt(conn->hcon);
275 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
276 return hci_conn_auth(conn->hcon);
278 return 1;
281 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
283 u8 id;
285 /* Get next available identificator.
286 * 1 - 128 are used by kernel.
287 * 129 - 199 are reserved.
288 * 200 - 254 are used by utilities like l2ping, etc.
291 spin_lock_bh(&conn->lock);
293 if (++conn->tx_ident > 128)
294 conn->tx_ident = 1;
296 id = conn->tx_ident;
298 spin_unlock_bh(&conn->lock);
300 return id;
303 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
305 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
307 BT_DBG("code 0x%2.2x", code);
309 if (!skb)
310 return -ENOMEM;
312 return hci_send_acl(conn->hcon, skb, 0);
315 static void l2cap_do_start(struct sock *sk)
317 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
319 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
320 if (l2cap_check_link_mode(sk)) {
321 struct l2cap_conn_req req;
322 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
323 req.psm = l2cap_pi(sk)->psm;
325 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
327 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
328 L2CAP_CONN_REQ, sizeof(req), &req);
330 } else {
331 struct l2cap_info_req req;
332 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
334 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
335 conn->info_ident = l2cap_get_ident(conn);
337 mod_timer(&conn->info_timer, jiffies +
338 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
340 l2cap_send_cmd(conn, conn->info_ident,
341 L2CAP_INFO_REQ, sizeof(req), &req);
345 /* ---- L2CAP connections ---- */
346 static void l2cap_conn_start(struct l2cap_conn *conn)
348 struct l2cap_chan_list *l = &conn->chan_list;
349 struct sock *sk;
351 BT_DBG("conn %p", conn);
353 read_lock(&l->lock);
355 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
356 bh_lock_sock(sk);
358 if (sk->sk_type != SOCK_SEQPACKET) {
359 bh_unlock_sock(sk);
360 continue;
363 if (sk->sk_state == BT_CONNECT) {
364 if (l2cap_check_link_mode(sk)) {
365 struct l2cap_conn_req req;
366 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
367 req.psm = l2cap_pi(sk)->psm;
369 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
371 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
372 L2CAP_CONN_REQ, sizeof(req), &req);
374 } else if (sk->sk_state == BT_CONNECT2) {
375 struct l2cap_conn_rsp rsp;
376 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
377 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
379 if (l2cap_check_link_mode(sk)) {
380 sk->sk_state = BT_CONFIG;
381 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
382 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
383 } else {
384 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
385 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
388 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
389 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
392 bh_unlock_sock(sk);
395 read_unlock(&l->lock);
398 static void l2cap_conn_ready(struct l2cap_conn *conn)
400 struct l2cap_chan_list *l = &conn->chan_list;
401 struct sock *sk;
403 BT_DBG("conn %p", conn);
405 read_lock(&l->lock);
407 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
408 bh_lock_sock(sk);
410 if (sk->sk_type != SOCK_SEQPACKET) {
411 l2cap_sock_clear_timer(sk);
412 sk->sk_state = BT_CONNECTED;
413 sk->sk_state_change(sk);
414 } else if (sk->sk_state == BT_CONNECT)
415 l2cap_do_start(sk);
417 bh_unlock_sock(sk);
420 read_unlock(&l->lock);
423 /* Notify sockets that we cannot guaranty reliability anymore */
424 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
426 struct l2cap_chan_list *l = &conn->chan_list;
427 struct sock *sk;
429 BT_DBG("conn %p", conn);
431 read_lock(&l->lock);
433 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
434 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
435 sk->sk_err = err;
438 read_unlock(&l->lock);
441 static void l2cap_info_timeout(unsigned long arg)
443 struct l2cap_conn *conn = (void *) arg;
445 conn->info_ident = 0;
447 l2cap_conn_start(conn);
450 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
452 struct l2cap_conn *conn = hcon->l2cap_data;
454 if (conn || status)
455 return conn;
457 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
458 if (!conn)
459 return NULL;
461 hcon->l2cap_data = conn;
462 conn->hcon = hcon;
464 BT_DBG("hcon %p conn %p", hcon, conn);
466 conn->mtu = hcon->hdev->acl_mtu;
467 conn->src = &hcon->hdev->bdaddr;
468 conn->dst = &hcon->dst;
470 conn->feat_mask = 0;
472 setup_timer(&conn->info_timer, l2cap_info_timeout,
473 (unsigned long) conn);
475 spin_lock_init(&conn->lock);
476 rwlock_init(&conn->chan_list.lock);
478 return conn;
481 static void l2cap_conn_del(struct hci_conn *hcon, int err)
483 struct l2cap_conn *conn = hcon->l2cap_data;
484 struct sock *sk;
486 if (!conn)
487 return;
489 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
491 if (conn->rx_skb)
492 kfree_skb(conn->rx_skb);
494 /* Kill channels */
495 while ((sk = conn->chan_list.head)) {
496 bh_lock_sock(sk);
497 l2cap_chan_del(sk, err);
498 bh_unlock_sock(sk);
499 l2cap_sock_kill(sk);
502 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
503 del_timer_sync(&conn->info_timer);
505 hcon->l2cap_data = NULL;
506 kfree(conn);
509 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
511 struct l2cap_chan_list *l = &conn->chan_list;
512 write_lock_bh(&l->lock);
513 __l2cap_chan_add(conn, sk, parent);
514 write_unlock_bh(&l->lock);
517 /* ---- Socket interface ---- */
518 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
520 struct sock *sk;
521 struct hlist_node *node;
522 sk_for_each(sk, node, &l2cap_sk_list.head)
523 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
524 goto found;
525 sk = NULL;
526 found:
527 return sk;
530 /* Find socket with psm and source bdaddr.
531 * Returns closest match.
533 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
535 struct sock *sk = NULL, *sk1 = NULL;
536 struct hlist_node *node;
538 sk_for_each(sk, node, &l2cap_sk_list.head) {
539 if (state && sk->sk_state != state)
540 continue;
542 if (l2cap_pi(sk)->psm == psm) {
543 /* Exact match. */
544 if (!bacmp(&bt_sk(sk)->src, src))
545 break;
547 /* Closest match */
548 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
549 sk1 = sk;
552 return node ? sk : sk1;
555 /* Find socket with given address (psm, src).
556 * Returns locked socket */
557 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
559 struct sock *s;
560 read_lock(&l2cap_sk_list.lock);
561 s = __l2cap_get_sock_by_psm(state, psm, src);
562 if (s) bh_lock_sock(s);
563 read_unlock(&l2cap_sk_list.lock);
564 return s;
567 static void l2cap_sock_destruct(struct sock *sk)
569 BT_DBG("sk %p", sk);
571 skb_queue_purge(&sk->sk_receive_queue);
572 skb_queue_purge(&sk->sk_write_queue);
575 static void l2cap_sock_cleanup_listen(struct sock *parent)
577 struct sock *sk;
579 BT_DBG("parent %p", parent);
581 /* Close not yet accepted channels */
582 while ((sk = bt_accept_dequeue(parent, NULL)))
583 l2cap_sock_close(sk);
585 parent->sk_state = BT_CLOSED;
586 sock_set_flag(parent, SOCK_ZAPPED);
589 /* Kill socket (only if zapped and orphan)
590 * Must be called on unlocked socket.
592 static void l2cap_sock_kill(struct sock *sk)
594 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
595 return;
597 BT_DBG("sk %p state %d", sk, sk->sk_state);
599 /* Kill poor orphan */
600 bt_sock_unlink(&l2cap_sk_list, sk);
601 sock_set_flag(sk, SOCK_DEAD);
602 sock_put(sk);
605 static void __l2cap_sock_close(struct sock *sk, int reason)
607 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
609 switch (sk->sk_state) {
610 case BT_LISTEN:
611 l2cap_sock_cleanup_listen(sk);
612 break;
614 case BT_CONNECTED:
615 case BT_CONFIG:
616 case BT_CONNECT2:
617 if (sk->sk_type == SOCK_SEQPACKET) {
618 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
619 struct l2cap_disconn_req req;
621 sk->sk_state = BT_DISCONN;
622 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
624 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
625 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
626 l2cap_send_cmd(conn, l2cap_get_ident(conn),
627 L2CAP_DISCONN_REQ, sizeof(req), &req);
628 } else
629 l2cap_chan_del(sk, reason);
630 break;
632 case BT_CONNECT:
633 case BT_DISCONN:
634 l2cap_chan_del(sk, reason);
635 break;
637 default:
638 sock_set_flag(sk, SOCK_ZAPPED);
639 break;
643 /* Must be called on unlocked socket. */
644 static void l2cap_sock_close(struct sock *sk)
646 l2cap_sock_clear_timer(sk);
647 lock_sock(sk);
648 __l2cap_sock_close(sk, ECONNRESET);
649 release_sock(sk);
650 l2cap_sock_kill(sk);
653 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
655 struct l2cap_pinfo *pi = l2cap_pi(sk);
657 BT_DBG("sk %p", sk);
659 if (parent) {
660 sk->sk_type = parent->sk_type;
661 pi->imtu = l2cap_pi(parent)->imtu;
662 pi->omtu = l2cap_pi(parent)->omtu;
663 pi->link_mode = l2cap_pi(parent)->link_mode;
664 } else {
665 pi->imtu = L2CAP_DEFAULT_MTU;
666 pi->omtu = 0;
667 pi->link_mode = 0;
670 /* Default config options */
671 pi->conf_len = 0;
672 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
675 static struct proto l2cap_proto = {
676 .name = "L2CAP",
677 .owner = THIS_MODULE,
678 .obj_size = sizeof(struct l2cap_pinfo)
681 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
683 struct sock *sk;
685 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
686 if (!sk)
687 return NULL;
689 sock_init_data(sock, sk);
690 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
692 sk->sk_destruct = l2cap_sock_destruct;
693 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
695 sock_reset_flag(sk, SOCK_ZAPPED);
697 sk->sk_protocol = proto;
698 sk->sk_state = BT_OPEN;
700 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
702 bt_sock_link(&l2cap_sk_list, sk);
703 return sk;
706 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
708 struct sock *sk;
710 BT_DBG("sock %p", sock);
712 sock->state = SS_UNCONNECTED;
714 if (sock->type != SOCK_SEQPACKET &&
715 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
716 return -ESOCKTNOSUPPORT;
718 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
719 return -EPERM;
721 sock->ops = &l2cap_sock_ops;
723 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
724 if (!sk)
725 return -ENOMEM;
727 l2cap_sock_init(sk, NULL);
728 return 0;
731 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
733 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
734 struct sock *sk = sock->sk;
735 int err = 0;
737 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
739 if (!addr || addr->sa_family != AF_BLUETOOTH)
740 return -EINVAL;
742 lock_sock(sk);
744 if (sk->sk_state != BT_OPEN) {
745 err = -EBADFD;
746 goto done;
749 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
750 !capable(CAP_NET_BIND_SERVICE)) {
751 err = -EACCES;
752 goto done;
755 write_lock_bh(&l2cap_sk_list.lock);
757 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
758 err = -EADDRINUSE;
759 } else {
760 /* Save source address */
761 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
762 l2cap_pi(sk)->psm = la->l2_psm;
763 l2cap_pi(sk)->sport = la->l2_psm;
764 sk->sk_state = BT_BOUND;
767 write_unlock_bh(&l2cap_sk_list.lock);
769 done:
770 release_sock(sk);
771 return err;
774 static int l2cap_do_connect(struct sock *sk)
776 bdaddr_t *src = &bt_sk(sk)->src;
777 bdaddr_t *dst = &bt_sk(sk)->dst;
778 struct l2cap_conn *conn;
779 struct hci_conn *hcon;
780 struct hci_dev *hdev;
781 int err = 0;
783 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
785 if (!(hdev = hci_get_route(dst, src)))
786 return -EHOSTUNREACH;
788 hci_dev_lock_bh(hdev);
790 err = -ENOMEM;
792 hcon = hci_connect(hdev, ACL_LINK, dst);
793 if (!hcon)
794 goto done;
796 conn = l2cap_conn_add(hcon, 0);
797 if (!conn) {
798 hci_conn_put(hcon);
799 goto done;
802 err = 0;
804 /* Update source addr of the socket */
805 bacpy(src, conn->src);
807 l2cap_chan_add(conn, sk, NULL);
809 sk->sk_state = BT_CONNECT;
810 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
812 if (hcon->state == BT_CONNECTED) {
813 if (sk->sk_type != SOCK_SEQPACKET) {
814 l2cap_sock_clear_timer(sk);
815 sk->sk_state = BT_CONNECTED;
816 } else
817 l2cap_do_start(sk);
820 done:
821 hci_dev_unlock_bh(hdev);
822 hci_dev_put(hdev);
823 return err;
826 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
828 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
829 struct sock *sk = sock->sk;
830 int err = 0;
832 lock_sock(sk);
834 BT_DBG("sk %p", sk);
836 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
837 err = -EINVAL;
838 goto done;
841 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
842 err = -EINVAL;
843 goto done;
846 switch(sk->sk_state) {
847 case BT_CONNECT:
848 case BT_CONNECT2:
849 case BT_CONFIG:
850 /* Already connecting */
851 goto wait;
853 case BT_CONNECTED:
854 /* Already connected */
855 goto done;
857 case BT_OPEN:
858 case BT_BOUND:
859 /* Can connect */
860 break;
862 default:
863 err = -EBADFD;
864 goto done;
867 /* Set destination address and psm */
868 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
869 l2cap_pi(sk)->psm = la->l2_psm;
871 if ((err = l2cap_do_connect(sk)))
872 goto done;
874 wait:
875 err = bt_sock_wait_state(sk, BT_CONNECTED,
876 sock_sndtimeo(sk, flags & O_NONBLOCK));
877 done:
878 release_sock(sk);
879 return err;
882 static int l2cap_sock_listen(struct socket *sock, int backlog)
884 struct sock *sk = sock->sk;
885 int err = 0;
887 BT_DBG("sk %p backlog %d", sk, backlog);
889 lock_sock(sk);
891 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
892 err = -EBADFD;
893 goto done;
896 if (!l2cap_pi(sk)->psm) {
897 bdaddr_t *src = &bt_sk(sk)->src;
898 u16 psm;
900 err = -EINVAL;
902 write_lock_bh(&l2cap_sk_list.lock);
904 for (psm = 0x1001; psm < 0x1100; psm += 2)
905 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
906 l2cap_pi(sk)->psm = htobs(psm);
907 l2cap_pi(sk)->sport = htobs(psm);
908 err = 0;
909 break;
912 write_unlock_bh(&l2cap_sk_list.lock);
914 if (err < 0)
915 goto done;
918 sk->sk_max_ack_backlog = backlog;
919 sk->sk_ack_backlog = 0;
920 sk->sk_state = BT_LISTEN;
922 done:
923 release_sock(sk);
924 return err;
927 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
929 DECLARE_WAITQUEUE(wait, current);
930 struct sock *sk = sock->sk, *nsk;
931 long timeo;
932 int err = 0;
934 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
936 if (sk->sk_state != BT_LISTEN) {
937 err = -EBADFD;
938 goto done;
941 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
943 BT_DBG("sk %p timeo %ld", sk, timeo);
945 /* Wait for an incoming connection. (wake-one). */
946 add_wait_queue_exclusive(sk->sk_sleep, &wait);
947 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
948 set_current_state(TASK_INTERRUPTIBLE);
949 if (!timeo) {
950 err = -EAGAIN;
951 break;
954 release_sock(sk);
955 timeo = schedule_timeout(timeo);
956 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
958 if (sk->sk_state != BT_LISTEN) {
959 err = -EBADFD;
960 break;
963 if (signal_pending(current)) {
964 err = sock_intr_errno(timeo);
965 break;
968 set_current_state(TASK_RUNNING);
969 remove_wait_queue(sk->sk_sleep, &wait);
971 if (err)
972 goto done;
974 newsock->state = SS_CONNECTED;
976 BT_DBG("new socket %p", nsk);
978 done:
979 release_sock(sk);
980 return err;
983 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
985 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
986 struct sock *sk = sock->sk;
988 BT_DBG("sock %p, sk %p", sock, sk);
990 addr->sa_family = AF_BLUETOOTH;
991 *len = sizeof(struct sockaddr_l2);
993 if (peer)
994 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
995 else
996 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
998 la->l2_psm = l2cap_pi(sk)->psm;
999 return 0;
1002 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1004 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1005 struct sk_buff *skb, **frag;
1006 int err, hlen, count, sent=0;
1007 struct l2cap_hdr *lh;
1009 BT_DBG("sk %p len %d", sk, len);
1011 /* First fragment (with L2CAP header) */
1012 if (sk->sk_type == SOCK_DGRAM)
1013 hlen = L2CAP_HDR_SIZE + 2;
1014 else
1015 hlen = L2CAP_HDR_SIZE;
1017 count = min_t(unsigned int, (conn->mtu - hlen), len);
1019 skb = bt_skb_send_alloc(sk, hlen + count,
1020 msg->msg_flags & MSG_DONTWAIT, &err);
1021 if (!skb)
1022 return err;
1024 /* Create L2CAP header */
1025 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1026 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1027 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1029 if (sk->sk_type == SOCK_DGRAM)
1030 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1032 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1033 err = -EFAULT;
1034 goto fail;
1037 sent += count;
1038 len -= count;
1040 /* Continuation fragments (no L2CAP header) */
1041 frag = &skb_shinfo(skb)->frag_list;
1042 while (len) {
1043 count = min_t(unsigned int, conn->mtu, len);
1045 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1046 if (!*frag)
1047 goto fail;
1049 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1050 err = -EFAULT;
1051 goto fail;
1054 sent += count;
1055 len -= count;
1057 frag = &(*frag)->next;
1060 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1061 goto fail;
1063 return sent;
1065 fail:
1066 kfree_skb(skb);
1067 return err;
1070 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1072 struct sock *sk = sock->sk;
1073 int err = 0;
1075 BT_DBG("sock %p, sk %p", sock, sk);
1077 err = sock_error(sk);
1078 if (err)
1079 return err;
1081 if (msg->msg_flags & MSG_OOB)
1082 return -EOPNOTSUPP;
1084 /* Check outgoing MTU */
1085 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1086 return -EINVAL;
1088 lock_sock(sk);
1090 if (sk->sk_state == BT_CONNECTED)
1091 err = l2cap_do_send(sk, msg, len);
1092 else
1093 err = -ENOTCONN;
1095 release_sock(sk);
1096 return err;
1099 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1101 struct sock *sk = sock->sk;
1102 struct l2cap_options opts;
1103 int err = 0, len;
1104 u32 opt;
1106 BT_DBG("sk %p", sk);
1108 lock_sock(sk);
1110 switch (optname) {
1111 case L2CAP_OPTIONS:
1112 opts.imtu = l2cap_pi(sk)->imtu;
1113 opts.omtu = l2cap_pi(sk)->omtu;
1114 opts.flush_to = l2cap_pi(sk)->flush_to;
1115 opts.mode = L2CAP_MODE_BASIC;
1117 len = min_t(unsigned int, sizeof(opts), optlen);
1118 if (copy_from_user((char *) &opts, optval, len)) {
1119 err = -EFAULT;
1120 break;
1123 l2cap_pi(sk)->imtu = opts.imtu;
1124 l2cap_pi(sk)->omtu = opts.omtu;
1125 break;
1127 case L2CAP_LM:
1128 if (get_user(opt, (u32 __user *) optval)) {
1129 err = -EFAULT;
1130 break;
1133 l2cap_pi(sk)->link_mode = opt;
1134 break;
1136 default:
1137 err = -ENOPROTOOPT;
1138 break;
1141 release_sock(sk);
1142 return err;
1145 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1147 struct sock *sk = sock->sk;
1148 struct l2cap_options opts;
1149 struct l2cap_conninfo cinfo;
1150 int len, err = 0;
1152 BT_DBG("sk %p", sk);
1154 if (get_user(len, optlen))
1155 return -EFAULT;
1157 lock_sock(sk);
1159 switch (optname) {
1160 case L2CAP_OPTIONS:
1161 opts.imtu = l2cap_pi(sk)->imtu;
1162 opts.omtu = l2cap_pi(sk)->omtu;
1163 opts.flush_to = l2cap_pi(sk)->flush_to;
1164 opts.mode = L2CAP_MODE_BASIC;
1166 len = min_t(unsigned int, len, sizeof(opts));
1167 if (copy_to_user(optval, (char *) &opts, len))
1168 err = -EFAULT;
1170 break;
1172 case L2CAP_LM:
1173 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1174 err = -EFAULT;
1175 break;
1177 case L2CAP_CONNINFO:
1178 if (sk->sk_state != BT_CONNECTED) {
1179 err = -ENOTCONN;
1180 break;
1183 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1184 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1186 len = min_t(unsigned int, len, sizeof(cinfo));
1187 if (copy_to_user(optval, (char *) &cinfo, len))
1188 err = -EFAULT;
1190 break;
1192 default:
1193 err = -ENOPROTOOPT;
1194 break;
1197 release_sock(sk);
1198 return err;
1201 static int l2cap_sock_shutdown(struct socket *sock, int how)
1203 struct sock *sk = sock->sk;
1204 int err = 0;
1206 BT_DBG("sock %p, sk %p", sock, sk);
1208 if (!sk)
1209 return 0;
1211 lock_sock(sk);
1212 if (!sk->sk_shutdown) {
1213 sk->sk_shutdown = SHUTDOWN_MASK;
1214 l2cap_sock_clear_timer(sk);
1215 __l2cap_sock_close(sk, 0);
1217 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1218 err = bt_sock_wait_state(sk, BT_CLOSED,
1219 sk->sk_lingertime);
1221 release_sock(sk);
1222 return err;
1225 static int l2cap_sock_release(struct socket *sock)
1227 struct sock *sk = sock->sk;
1228 int err;
1230 BT_DBG("sock %p, sk %p", sock, sk);
1232 if (!sk)
1233 return 0;
1235 err = l2cap_sock_shutdown(sock, 2);
1237 sock_orphan(sk);
1238 l2cap_sock_kill(sk);
1239 return err;
1242 static void l2cap_chan_ready(struct sock *sk)
1244 struct sock *parent = bt_sk(sk)->parent;
1246 BT_DBG("sk %p, parent %p", sk, parent);
1248 l2cap_pi(sk)->conf_state = 0;
1249 l2cap_sock_clear_timer(sk);
1251 if (!parent) {
1252 /* Outgoing channel.
1253 * Wake up socket sleeping on connect.
1255 sk->sk_state = BT_CONNECTED;
1256 sk->sk_state_change(sk);
1257 } else {
1258 /* Incoming channel.
1259 * Wake up socket sleeping on accept.
1261 parent->sk_data_ready(parent, 0);
1264 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
1265 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1266 hci_conn_change_link_key(conn->hcon);
1270 /* Copy frame to all raw sockets on that connection */
1271 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1273 struct l2cap_chan_list *l = &conn->chan_list;
1274 struct sk_buff *nskb;
1275 struct sock * sk;
1277 BT_DBG("conn %p", conn);
1279 read_lock(&l->lock);
1280 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1281 if (sk->sk_type != SOCK_RAW)
1282 continue;
1284 /* Don't send frame to the socket it came from */
1285 if (skb->sk == sk)
1286 continue;
1288 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1289 continue;
1291 if (sock_queue_rcv_skb(sk, nskb))
1292 kfree_skb(nskb);
1294 read_unlock(&l->lock);
1297 /* ---- L2CAP signalling commands ---- */
1298 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1299 u8 code, u8 ident, u16 dlen, void *data)
1301 struct sk_buff *skb, **frag;
1302 struct l2cap_cmd_hdr *cmd;
1303 struct l2cap_hdr *lh;
1304 int len, count;
1306 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1308 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1309 count = min_t(unsigned int, conn->mtu, len);
1311 skb = bt_skb_alloc(count, GFP_ATOMIC);
1312 if (!skb)
1313 return NULL;
1315 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1316 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1317 lh->cid = cpu_to_le16(0x0001);
1319 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1320 cmd->code = code;
1321 cmd->ident = ident;
1322 cmd->len = cpu_to_le16(dlen);
1324 if (dlen) {
1325 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1326 memcpy(skb_put(skb, count), data, count);
1327 data += count;
1330 len -= skb->len;
1332 /* Continuation fragments (no L2CAP header) */
1333 frag = &skb_shinfo(skb)->frag_list;
1334 while (len) {
1335 count = min_t(unsigned int, conn->mtu, len);
1337 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1338 if (!*frag)
1339 goto fail;
1341 memcpy(skb_put(*frag, count), data, count);
1343 len -= count;
1344 data += count;
1346 frag = &(*frag)->next;
1349 return skb;
1351 fail:
1352 kfree_skb(skb);
1353 return NULL;
1356 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1358 struct l2cap_conf_opt *opt = *ptr;
1359 int len;
1361 len = L2CAP_CONF_OPT_SIZE + opt->len;
1362 *ptr += len;
1364 *type = opt->type;
1365 *olen = opt->len;
1367 switch (opt->len) {
1368 case 1:
1369 *val = *((u8 *) opt->val);
1370 break;
1372 case 2:
1373 *val = __le16_to_cpu(*((__le16 *) opt->val));
1374 break;
1376 case 4:
1377 *val = __le32_to_cpu(*((__le32 *) opt->val));
1378 break;
1380 default:
1381 *val = (unsigned long) opt->val;
1382 break;
1385 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1386 return len;
1389 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1391 struct l2cap_conf_opt *opt = *ptr;
1393 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1395 opt->type = type;
1396 opt->len = len;
1398 switch (len) {
1399 case 1:
1400 *((u8 *) opt->val) = val;
1401 break;
1403 case 2:
1404 *((__le16 *) opt->val) = cpu_to_le16(val);
1405 break;
1407 case 4:
1408 *((__le32 *) opt->val) = cpu_to_le32(val);
1409 break;
1411 default:
1412 memcpy(opt->val, (void *) val, len);
1413 break;
1416 *ptr += L2CAP_CONF_OPT_SIZE + len;
1419 static int l2cap_build_conf_req(struct sock *sk, void *data)
1421 struct l2cap_pinfo *pi = l2cap_pi(sk);
1422 struct l2cap_conf_req *req = data;
1423 void *ptr = req->data;
1425 BT_DBG("sk %p", sk);
1427 if (pi->imtu != L2CAP_DEFAULT_MTU)
1428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1430 /* FIXME: Need actual value of the flush timeout */
1431 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1432 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1434 req->dcid = cpu_to_le16(pi->dcid);
1435 req->flags = cpu_to_le16(0);
1437 return ptr - data;
1440 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1442 struct l2cap_pinfo *pi = l2cap_pi(sk);
1443 struct l2cap_conf_rsp *rsp = data;
1444 void *ptr = rsp->data;
1445 void *req = pi->conf_req;
1446 int len = pi->conf_len;
1447 int type, hint, olen;
1448 unsigned long val;
1449 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1450 u16 mtu = L2CAP_DEFAULT_MTU;
1451 u16 result = L2CAP_CONF_SUCCESS;
1453 BT_DBG("sk %p", sk);
1455 while (len >= L2CAP_CONF_OPT_SIZE) {
1456 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1458 hint = type & 0x80;
1459 type &= 0x7f;
1461 switch (type) {
1462 case L2CAP_CONF_MTU:
1463 mtu = val;
1464 break;
1466 case L2CAP_CONF_FLUSH_TO:
1467 pi->flush_to = val;
1468 break;
1470 case L2CAP_CONF_QOS:
1471 break;
1473 case L2CAP_CONF_RFC:
1474 if (olen == sizeof(rfc))
1475 memcpy(&rfc, (void *) val, olen);
1476 break;
1478 default:
1479 if (hint)
1480 break;
1482 result = L2CAP_CONF_UNKNOWN;
1483 *((u8 *) ptr++) = type;
1484 break;
1488 if (result == L2CAP_CONF_SUCCESS) {
1489 /* Configure output options and let the other side know
1490 * which ones we don't like. */
1492 if (rfc.mode == L2CAP_MODE_BASIC) {
1493 if (mtu < pi->omtu)
1494 result = L2CAP_CONF_UNACCEPT;
1495 else {
1496 pi->omtu = mtu;
1497 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1500 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1501 } else {
1502 result = L2CAP_CONF_UNACCEPT;
1504 memset(&rfc, 0, sizeof(rfc));
1505 rfc.mode = L2CAP_MODE_BASIC;
1507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1508 sizeof(rfc), (unsigned long) &rfc);
1512 rsp->scid = cpu_to_le16(pi->dcid);
1513 rsp->result = cpu_to_le16(result);
1514 rsp->flags = cpu_to_le16(0x0000);
1516 return ptr - data;
1519 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1521 struct l2cap_conf_rsp *rsp = data;
1522 void *ptr = rsp->data;
1524 BT_DBG("sk %p", sk);
1526 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1527 rsp->result = cpu_to_le16(result);
1528 rsp->flags = cpu_to_le16(flags);
1530 return ptr - data;
1533 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1535 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1537 if (rej->reason != 0x0000)
1538 return 0;
1540 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1541 cmd->ident == conn->info_ident) {
1542 conn->info_ident = 0;
1543 del_timer(&conn->info_timer);
1544 l2cap_conn_start(conn);
1547 return 0;
1550 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1552 struct l2cap_chan_list *list = &conn->chan_list;
1553 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1554 struct l2cap_conn_rsp rsp;
1555 struct sock *sk, *parent;
1556 int result, status = 0;
1558 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1559 __le16 psm = req->psm;
1561 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1563 /* Check if we have socket listening on psm */
1564 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1565 if (!parent) {
1566 result = L2CAP_CR_BAD_PSM;
1567 goto sendresp;
1570 result = L2CAP_CR_NO_MEM;
1572 /* Check for backlog size */
1573 if (sk_acceptq_is_full(parent)) {
1574 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1575 goto response;
1578 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1579 if (!sk)
1580 goto response;
1582 write_lock_bh(&list->lock);
1584 /* Check if we already have channel with that dcid */
1585 if (__l2cap_get_chan_by_dcid(list, scid)) {
1586 write_unlock_bh(&list->lock);
1587 sock_set_flag(sk, SOCK_ZAPPED);
1588 l2cap_sock_kill(sk);
1589 goto response;
1592 hci_conn_hold(conn->hcon);
1594 l2cap_sock_init(sk, parent);
1595 bacpy(&bt_sk(sk)->src, conn->src);
1596 bacpy(&bt_sk(sk)->dst, conn->dst);
1597 l2cap_pi(sk)->psm = psm;
1598 l2cap_pi(sk)->dcid = scid;
1600 __l2cap_chan_add(conn, sk, parent);
1601 dcid = l2cap_pi(sk)->scid;
1603 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1605 l2cap_pi(sk)->ident = cmd->ident;
1607 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1608 if (l2cap_check_link_mode(sk)) {
1609 sk->sk_state = BT_CONFIG;
1610 result = L2CAP_CR_SUCCESS;
1611 status = L2CAP_CS_NO_INFO;
1612 } else {
1613 sk->sk_state = BT_CONNECT2;
1614 result = L2CAP_CR_PEND;
1615 status = L2CAP_CS_AUTHEN_PEND;
1617 } else {
1618 sk->sk_state = BT_CONNECT2;
1619 result = L2CAP_CR_PEND;
1620 status = L2CAP_CS_NO_INFO;
1623 write_unlock_bh(&list->lock);
1625 response:
1626 bh_unlock_sock(parent);
1628 sendresp:
1629 rsp.scid = cpu_to_le16(scid);
1630 rsp.dcid = cpu_to_le16(dcid);
1631 rsp.result = cpu_to_le16(result);
1632 rsp.status = cpu_to_le16(status);
1633 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1635 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1636 struct l2cap_info_req info;
1637 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1639 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1640 conn->info_ident = l2cap_get_ident(conn);
1642 mod_timer(&conn->info_timer, jiffies +
1643 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1645 l2cap_send_cmd(conn, conn->info_ident,
1646 L2CAP_INFO_REQ, sizeof(info), &info);
1649 return 0;
1652 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1654 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1655 u16 scid, dcid, result, status;
1656 struct sock *sk;
1657 u8 req[128];
1659 scid = __le16_to_cpu(rsp->scid);
1660 dcid = __le16_to_cpu(rsp->dcid);
1661 result = __le16_to_cpu(rsp->result);
1662 status = __le16_to_cpu(rsp->status);
1664 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1666 if (scid) {
1667 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1668 return 0;
1669 } else {
1670 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1671 return 0;
1674 switch (result) {
1675 case L2CAP_CR_SUCCESS:
1676 sk->sk_state = BT_CONFIG;
1677 l2cap_pi(sk)->ident = 0;
1678 l2cap_pi(sk)->dcid = dcid;
1679 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1681 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1682 l2cap_build_conf_req(sk, req), req);
1683 break;
1685 case L2CAP_CR_PEND:
1686 break;
1688 default:
1689 l2cap_chan_del(sk, ECONNREFUSED);
1690 break;
1693 bh_unlock_sock(sk);
1694 return 0;
1697 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1699 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1700 u16 dcid, flags;
1701 u8 rsp[64];
1702 struct sock *sk;
1703 int len;
1705 dcid = __le16_to_cpu(req->dcid);
1706 flags = __le16_to_cpu(req->flags);
1708 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1710 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1711 return -ENOENT;
1713 if (sk->sk_state == BT_DISCONN)
1714 goto unlock;
1716 /* Reject if config buffer is too small. */
1717 len = cmd_len - sizeof(*req);
1718 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1719 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1720 l2cap_build_conf_rsp(sk, rsp,
1721 L2CAP_CONF_REJECT, flags), rsp);
1722 goto unlock;
1725 /* Store config. */
1726 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1727 l2cap_pi(sk)->conf_len += len;
1729 if (flags & 0x0001) {
1730 /* Incomplete config. Send empty response. */
1731 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1732 l2cap_build_conf_rsp(sk, rsp,
1733 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1734 goto unlock;
1737 /* Complete config. */
1738 len = l2cap_parse_conf_req(sk, rsp);
1739 if (len < 0)
1740 goto unlock;
1742 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1744 /* Reset config buffer. */
1745 l2cap_pi(sk)->conf_len = 0;
1747 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1748 goto unlock;
1750 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1751 sk->sk_state = BT_CONNECTED;
1752 l2cap_chan_ready(sk);
1753 goto unlock;
1756 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1757 u8 buf[64];
1758 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1759 l2cap_build_conf_req(sk, buf), buf);
1762 unlock:
1763 bh_unlock_sock(sk);
1764 return 0;
1767 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1769 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1770 u16 scid, flags, result;
1771 struct sock *sk;
1773 scid = __le16_to_cpu(rsp->scid);
1774 flags = __le16_to_cpu(rsp->flags);
1775 result = __le16_to_cpu(rsp->result);
1777 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1779 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1780 return 0;
1782 switch (result) {
1783 case L2CAP_CONF_SUCCESS:
1784 break;
1786 case L2CAP_CONF_UNACCEPT:
1787 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1788 char req[128];
1789 /* It does not make sense to adjust L2CAP parameters
1790 * that are currently defined in the spec. We simply
1791 * resend config request that we sent earlier. It is
1792 * stupid, but it helps qualification testing which
1793 * expects at least some response from us. */
1794 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1795 l2cap_build_conf_req(sk, req), req);
1796 goto done;
1799 default:
1800 sk->sk_state = BT_DISCONN;
1801 sk->sk_err = ECONNRESET;
1802 l2cap_sock_set_timer(sk, HZ * 5);
1804 struct l2cap_disconn_req req;
1805 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1806 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1807 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1808 L2CAP_DISCONN_REQ, sizeof(req), &req);
1810 goto done;
1813 if (flags & 0x01)
1814 goto done;
1816 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1818 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1819 sk->sk_state = BT_CONNECTED;
1820 l2cap_chan_ready(sk);
1823 done:
1824 bh_unlock_sock(sk);
1825 return 0;
1828 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1830 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1831 struct l2cap_disconn_rsp rsp;
1832 u16 dcid, scid;
1833 struct sock *sk;
1835 scid = __le16_to_cpu(req->scid);
1836 dcid = __le16_to_cpu(req->dcid);
1838 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1840 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1841 return 0;
1843 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1844 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1845 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1847 sk->sk_shutdown = SHUTDOWN_MASK;
1849 l2cap_chan_del(sk, ECONNRESET);
1850 bh_unlock_sock(sk);
1852 l2cap_sock_kill(sk);
1853 return 0;
1856 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1858 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1859 u16 dcid, scid;
1860 struct sock *sk;
1862 scid = __le16_to_cpu(rsp->scid);
1863 dcid = __le16_to_cpu(rsp->dcid);
1865 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1867 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1868 return 0;
1870 l2cap_chan_del(sk, 0);
1871 bh_unlock_sock(sk);
1873 l2cap_sock_kill(sk);
1874 return 0;
1877 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1879 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1880 u16 type;
1882 type = __le16_to_cpu(req->type);
1884 BT_DBG("type 0x%4.4x", type);
1886 if (type == L2CAP_IT_FEAT_MASK) {
1887 u8 buf[8];
1888 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1889 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1890 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1891 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1892 l2cap_send_cmd(conn, cmd->ident,
1893 L2CAP_INFO_RSP, sizeof(buf), buf);
1894 } else {
1895 struct l2cap_info_rsp rsp;
1896 rsp.type = cpu_to_le16(type);
1897 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1898 l2cap_send_cmd(conn, cmd->ident,
1899 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1902 return 0;
1905 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1907 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1908 u16 type, result;
1910 type = __le16_to_cpu(rsp->type);
1911 result = __le16_to_cpu(rsp->result);
1913 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1915 conn->info_ident = 0;
1917 del_timer(&conn->info_timer);
1919 if (type == L2CAP_IT_FEAT_MASK)
1920 conn->feat_mask = get_unaligned_le32(rsp->data);
1922 l2cap_conn_start(conn);
1924 return 0;
1927 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1929 u8 *data = skb->data;
1930 int len = skb->len;
1931 struct l2cap_cmd_hdr cmd;
1932 int err = 0;
1934 l2cap_raw_recv(conn, skb);
1936 while (len >= L2CAP_CMD_HDR_SIZE) {
1937 u16 cmd_len;
1938 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1939 data += L2CAP_CMD_HDR_SIZE;
1940 len -= L2CAP_CMD_HDR_SIZE;
1942 cmd_len = le16_to_cpu(cmd.len);
1944 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1946 if (cmd_len > len || !cmd.ident) {
1947 BT_DBG("corrupted command");
1948 break;
1951 switch (cmd.code) {
1952 case L2CAP_COMMAND_REJ:
1953 l2cap_command_rej(conn, &cmd, data);
1954 break;
1956 case L2CAP_CONN_REQ:
1957 err = l2cap_connect_req(conn, &cmd, data);
1958 break;
1960 case L2CAP_CONN_RSP:
1961 err = l2cap_connect_rsp(conn, &cmd, data);
1962 break;
1964 case L2CAP_CONF_REQ:
1965 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1966 break;
1968 case L2CAP_CONF_RSP:
1969 err = l2cap_config_rsp(conn, &cmd, data);
1970 break;
1972 case L2CAP_DISCONN_REQ:
1973 err = l2cap_disconnect_req(conn, &cmd, data);
1974 break;
1976 case L2CAP_DISCONN_RSP:
1977 err = l2cap_disconnect_rsp(conn, &cmd, data);
1978 break;
1980 case L2CAP_ECHO_REQ:
1981 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1982 break;
1984 case L2CAP_ECHO_RSP:
1985 break;
1987 case L2CAP_INFO_REQ:
1988 err = l2cap_information_req(conn, &cmd, data);
1989 break;
1991 case L2CAP_INFO_RSP:
1992 err = l2cap_information_rsp(conn, &cmd, data);
1993 break;
1995 default:
1996 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1997 err = -EINVAL;
1998 break;
2001 if (err) {
2002 struct l2cap_cmd_rej rej;
2003 BT_DBG("error %d", err);
2005 /* FIXME: Map err to a valid reason */
2006 rej.reason = cpu_to_le16(0);
2007 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2010 data += cmd_len;
2011 len -= cmd_len;
2014 kfree_skb(skb);
2017 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2019 struct sock *sk;
2021 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2022 if (!sk) {
2023 BT_DBG("unknown cid 0x%4.4x", cid);
2024 goto drop;
2027 BT_DBG("sk %p, len %d", sk, skb->len);
2029 if (sk->sk_state != BT_CONNECTED)
2030 goto drop;
2032 if (l2cap_pi(sk)->imtu < skb->len)
2033 goto drop;
2035 /* If socket recv buffers overflows we drop data here
2036 * which is *bad* because L2CAP has to be reliable.
2037 * But we don't have any other choice. L2CAP doesn't
2038 * provide flow control mechanism. */
2040 if (!sock_queue_rcv_skb(sk, skb))
2041 goto done;
2043 drop:
2044 kfree_skb(skb);
2046 done:
2047 if (sk)
2048 bh_unlock_sock(sk);
2050 return 0;
2053 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2055 struct sock *sk;
2057 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2058 if (!sk)
2059 goto drop;
2061 BT_DBG("sk %p, len %d", sk, skb->len);
2063 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2064 goto drop;
2066 if (l2cap_pi(sk)->imtu < skb->len)
2067 goto drop;
2069 if (!sock_queue_rcv_skb(sk, skb))
2070 goto done;
2072 drop:
2073 kfree_skb(skb);
2075 done:
2076 if (sk) bh_unlock_sock(sk);
2077 return 0;
2080 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2082 struct l2cap_hdr *lh = (void *) skb->data;
2083 u16 cid, len;
2084 __le16 psm;
2086 skb_pull(skb, L2CAP_HDR_SIZE);
2087 cid = __le16_to_cpu(lh->cid);
2088 len = __le16_to_cpu(lh->len);
2090 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2092 switch (cid) {
2093 case 0x0001:
2094 l2cap_sig_channel(conn, skb);
2095 break;
2097 case 0x0002:
2098 psm = get_unaligned((__le16 *) skb->data);
2099 skb_pull(skb, 2);
2100 l2cap_conless_channel(conn, psm, skb);
2101 break;
2103 default:
2104 l2cap_data_channel(conn, cid, skb);
2105 break;
2109 /* ---- L2CAP interface with lower layer (HCI) ---- */
2111 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2113 int exact = 0, lm1 = 0, lm2 = 0;
2114 register struct sock *sk;
2115 struct hlist_node *node;
2117 if (type != ACL_LINK)
2118 return 0;
2120 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2122 /* Find listening sockets and check their link_mode */
2123 read_lock(&l2cap_sk_list.lock);
2124 sk_for_each(sk, node, &l2cap_sk_list.head) {
2125 if (sk->sk_state != BT_LISTEN)
2126 continue;
2128 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2129 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2130 exact++;
2131 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2132 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2134 read_unlock(&l2cap_sk_list.lock);
2136 return exact ? lm1 : lm2;
2139 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2141 struct l2cap_conn *conn;
2143 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2145 if (hcon->type != ACL_LINK)
2146 return 0;
2148 if (!status) {
2149 conn = l2cap_conn_add(hcon, status);
2150 if (conn)
2151 l2cap_conn_ready(conn);
2152 } else
2153 l2cap_conn_del(hcon, bt_err(status));
2155 return 0;
2158 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2160 BT_DBG("hcon %p reason %d", hcon, reason);
2162 if (hcon->type != ACL_LINK)
2163 return 0;
2165 l2cap_conn_del(hcon, bt_err(reason));
2167 return 0;
2170 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2172 struct l2cap_chan_list *l;
2173 struct l2cap_conn *conn = hcon->l2cap_data;
2174 struct sock *sk;
2176 if (!conn)
2177 return 0;
2179 l = &conn->chan_list;
2181 BT_DBG("conn %p", conn);
2183 read_lock(&l->lock);
2185 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2186 struct l2cap_pinfo *pi = l2cap_pi(sk);
2188 bh_lock_sock(sk);
2190 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2191 !(hcon->link_mode & HCI_LM_ENCRYPT) &&
2192 !status) {
2193 bh_unlock_sock(sk);
2194 continue;
2197 if (sk->sk_state == BT_CONNECT) {
2198 if (!status) {
2199 struct l2cap_conn_req req;
2200 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2201 req.psm = l2cap_pi(sk)->psm;
2203 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2205 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2206 L2CAP_CONN_REQ, sizeof(req), &req);
2207 } else {
2208 l2cap_sock_clear_timer(sk);
2209 l2cap_sock_set_timer(sk, HZ / 10);
2211 } else if (sk->sk_state == BT_CONNECT2) {
2212 struct l2cap_conn_rsp rsp;
2213 __u16 result;
2215 if (!status) {
2216 sk->sk_state = BT_CONFIG;
2217 result = L2CAP_CR_SUCCESS;
2218 } else {
2219 sk->sk_state = BT_DISCONN;
2220 l2cap_sock_set_timer(sk, HZ / 10);
2221 result = L2CAP_CR_SEC_BLOCK;
2224 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2225 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2226 rsp.result = cpu_to_le16(result);
2227 rsp.status = cpu_to_le16(0);
2228 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2229 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2232 bh_unlock_sock(sk);
2235 read_unlock(&l->lock);
2237 return 0;
2240 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2242 struct l2cap_chan_list *l;
2243 struct l2cap_conn *conn = hcon->l2cap_data;
2244 struct sock *sk;
2246 if (!conn)
2247 return 0;
2249 l = &conn->chan_list;
2251 BT_DBG("conn %p", conn);
2253 read_lock(&l->lock);
2255 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2256 struct l2cap_pinfo *pi = l2cap_pi(sk);
2258 bh_lock_sock(sk);
2260 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2261 (sk->sk_state == BT_CONNECTED ||
2262 sk->sk_state == BT_CONFIG) &&
2263 !status && encrypt == 0x00) {
2264 __l2cap_sock_close(sk, ECONNREFUSED);
2265 bh_unlock_sock(sk);
2266 continue;
2269 if (sk->sk_state == BT_CONNECT) {
2270 if (!status) {
2271 struct l2cap_conn_req req;
2272 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2273 req.psm = l2cap_pi(sk)->psm;
2275 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2277 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2278 L2CAP_CONN_REQ, sizeof(req), &req);
2279 } else {
2280 l2cap_sock_clear_timer(sk);
2281 l2cap_sock_set_timer(sk, HZ / 10);
2283 } else if (sk->sk_state == BT_CONNECT2) {
2284 struct l2cap_conn_rsp rsp;
2285 __u16 result;
2287 if (!status) {
2288 sk->sk_state = BT_CONFIG;
2289 result = L2CAP_CR_SUCCESS;
2290 } else {
2291 sk->sk_state = BT_DISCONN;
2292 l2cap_sock_set_timer(sk, HZ / 10);
2293 result = L2CAP_CR_SEC_BLOCK;
2296 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2297 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2298 rsp.result = cpu_to_le16(result);
2299 rsp.status = cpu_to_le16(0);
2300 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2301 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2304 bh_unlock_sock(sk);
2307 read_unlock(&l->lock);
2309 return 0;
2312 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2314 struct l2cap_conn *conn = hcon->l2cap_data;
2316 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2317 goto drop;
2319 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2321 if (flags & ACL_START) {
2322 struct l2cap_hdr *hdr;
2323 int len;
2325 if (conn->rx_len) {
2326 BT_ERR("Unexpected start frame (len %d)", skb->len);
2327 kfree_skb(conn->rx_skb);
2328 conn->rx_skb = NULL;
2329 conn->rx_len = 0;
2330 l2cap_conn_unreliable(conn, ECOMM);
2333 if (skb->len < 2) {
2334 BT_ERR("Frame is too short (len %d)", skb->len);
2335 l2cap_conn_unreliable(conn, ECOMM);
2336 goto drop;
2339 hdr = (struct l2cap_hdr *) skb->data;
2340 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2342 if (len == skb->len) {
2343 /* Complete frame received */
2344 l2cap_recv_frame(conn, skb);
2345 return 0;
2348 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2350 if (skb->len > len) {
2351 BT_ERR("Frame is too long (len %d, expected len %d)",
2352 skb->len, len);
2353 l2cap_conn_unreliable(conn, ECOMM);
2354 goto drop;
2357 /* Allocate skb for the complete frame (with header) */
2358 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2359 goto drop;
2361 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2362 skb->len);
2363 conn->rx_len = len - skb->len;
2364 } else {
2365 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2367 if (!conn->rx_len) {
2368 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2369 l2cap_conn_unreliable(conn, ECOMM);
2370 goto drop;
2373 if (skb->len > conn->rx_len) {
2374 BT_ERR("Fragment is too long (len %d, expected %d)",
2375 skb->len, conn->rx_len);
2376 kfree_skb(conn->rx_skb);
2377 conn->rx_skb = NULL;
2378 conn->rx_len = 0;
2379 l2cap_conn_unreliable(conn, ECOMM);
2380 goto drop;
2383 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2384 skb->len);
2385 conn->rx_len -= skb->len;
2387 if (!conn->rx_len) {
2388 /* Complete frame received */
2389 l2cap_recv_frame(conn, conn->rx_skb);
2390 conn->rx_skb = NULL;
2394 drop:
2395 kfree_skb(skb);
2396 return 0;
2399 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2401 struct sock *sk;
2402 struct hlist_node *node;
2403 char *str = buf;
2405 read_lock_bh(&l2cap_sk_list.lock);
2407 sk_for_each(sk, node, &l2cap_sk_list.head) {
2408 struct l2cap_pinfo *pi = l2cap_pi(sk);
2410 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2411 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2412 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2413 pi->imtu, pi->omtu, pi->link_mode);
2416 read_unlock_bh(&l2cap_sk_list.lock);
2418 return (str - buf);
2421 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2423 static const struct proto_ops l2cap_sock_ops = {
2424 .family = PF_BLUETOOTH,
2425 .owner = THIS_MODULE,
2426 .release = l2cap_sock_release,
2427 .bind = l2cap_sock_bind,
2428 .connect = l2cap_sock_connect,
2429 .listen = l2cap_sock_listen,
2430 .accept = l2cap_sock_accept,
2431 .getname = l2cap_sock_getname,
2432 .sendmsg = l2cap_sock_sendmsg,
2433 .recvmsg = bt_sock_recvmsg,
2434 .poll = bt_sock_poll,
2435 .ioctl = bt_sock_ioctl,
2436 .mmap = sock_no_mmap,
2437 .socketpair = sock_no_socketpair,
2438 .shutdown = l2cap_sock_shutdown,
2439 .setsockopt = l2cap_sock_setsockopt,
2440 .getsockopt = l2cap_sock_getsockopt
2443 static struct net_proto_family l2cap_sock_family_ops = {
2444 .family = PF_BLUETOOTH,
2445 .owner = THIS_MODULE,
2446 .create = l2cap_sock_create,
2449 static struct hci_proto l2cap_hci_proto = {
2450 .name = "L2CAP",
2451 .id = HCI_PROTO_L2CAP,
2452 .connect_ind = l2cap_connect_ind,
2453 .connect_cfm = l2cap_connect_cfm,
2454 .disconn_ind = l2cap_disconn_ind,
2455 .auth_cfm = l2cap_auth_cfm,
2456 .encrypt_cfm = l2cap_encrypt_cfm,
2457 .recv_acldata = l2cap_recv_acldata
2460 static int __init l2cap_init(void)
2462 int err;
2464 err = proto_register(&l2cap_proto, 0);
2465 if (err < 0)
2466 return err;
2468 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2469 if (err < 0) {
2470 BT_ERR("L2CAP socket registration failed");
2471 goto error;
2474 err = hci_register_proto(&l2cap_hci_proto);
2475 if (err < 0) {
2476 BT_ERR("L2CAP protocol registration failed");
2477 bt_sock_unregister(BTPROTO_L2CAP);
2478 goto error;
2481 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2482 BT_ERR("Failed to create L2CAP info file");
2484 BT_INFO("L2CAP ver %s", VERSION);
2485 BT_INFO("L2CAP socket layer initialized");
2487 return 0;
2489 error:
2490 proto_unregister(&l2cap_proto);
2491 return err;
2494 static void __exit l2cap_exit(void)
2496 class_remove_file(bt_class, &class_attr_l2cap);
2498 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2499 BT_ERR("L2CAP socket unregistration failed");
2501 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2502 BT_ERR("L2CAP protocol unregistration failed");
2504 proto_unregister(&l2cap_proto);
2507 void l2cap_load(void)
2509 /* Dummy function to trigger automatic L2CAP module loading by
2510 * other modules that use L2CAP sockets but don't use any other
2511 * symbols from it. */
2512 return;
2514 EXPORT_SYMBOL(l2cap_load);
2516 module_init(l2cap_init);
2517 module_exit(l2cap_exit);
2519 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2520 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2521 MODULE_VERSION(VERSION);
2522 MODULE_LICENSE("GPL");
2523 MODULE_ALIAS("bt-proto-0");