MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / net / bluetooth / l2cap.c
blob1da6a95e23937c3df12ca566e73da7e88fe382ad
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/config.h>
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/major.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/interrupt.h>
40 #include <linux/socket.h>
41 #include <linux/skbuff.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/list.h>
45 #include <net/sock.h>
47 #include <asm/system.h>
48 #include <asm/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53 #include <net/bluetooth/l2cap.h>
55 #ifndef CONFIG_BT_L2CAP_DEBUG
56 #undef BT_DBG
57 #define BT_DBG(D...)
58 #endif
60 #define VERSION "2.4"
62 static struct proto_ops l2cap_sock_ops;
64 struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
68 static int l2cap_conn_del(struct hci_conn *conn, int err);
70 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
71 static void l2cap_chan_del(struct sock *sk, int err);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static int l2cap_send_req(struct l2cap_conn *conn, u8 code, u16 len, void *data);
78 static int l2cap_send_rsp(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
85 BT_DBG("sock %p state %d", sk, sk->sk_state);
87 bh_lock_sock(sk);
88 __l2cap_sock_close(sk, ETIMEDOUT);
89 bh_unlock_sock(sk);
91 l2cap_sock_kill(sk);
92 sock_put(sk);
95 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
97 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
98 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
101 static void l2cap_sock_clear_timer(struct sock *sk)
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104 sk_stop_timer(sk, &sk->sk_timer);
107 static void l2cap_sock_init_timer(struct sock *sk)
109 init_timer(&sk->sk_timer);
110 sk->sk_timer.function = l2cap_sock_timeout;
111 sk->sk_timer.data = (unsigned long)sk;
114 /* ---- L2CAP connections ---- */
115 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
117 struct l2cap_conn *conn;
119 if ((conn = hcon->l2cap_data))
120 return conn;
122 if (status)
123 return conn;
125 if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC)))
126 return NULL;
127 memset(conn, 0, sizeof(struct l2cap_conn));
129 hcon->l2cap_data = conn;
130 conn->hcon = hcon;
132 conn->mtu = hcon->hdev->acl_mtu;
133 conn->src = &hcon->hdev->bdaddr;
134 conn->dst = &hcon->dst;
136 spin_lock_init(&conn->lock);
137 conn->chan_list.lock = RW_LOCK_UNLOCKED;
139 BT_DBG("hcon %p conn %p", hcon, conn);
140 return conn;
143 static int l2cap_conn_del(struct hci_conn *hcon, int err)
145 struct l2cap_conn *conn;
146 struct sock *sk;
148 if (!(conn = hcon->l2cap_data))
149 return 0;
151 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
153 if (conn->rx_skb)
154 kfree_skb(conn->rx_skb);
156 /* Kill channels */
157 while ((sk = conn->chan_list.head)) {
158 bh_lock_sock(sk);
159 l2cap_chan_del(sk, err);
160 bh_unlock_sock(sk);
161 l2cap_sock_kill(sk);
164 hcon->l2cap_data = NULL;
165 kfree(conn);
166 return 0;
169 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
171 struct l2cap_chan_list *l = &conn->chan_list;
172 write_lock(&l->lock);
173 __l2cap_chan_add(conn, sk, parent);
174 write_unlock(&l->lock);
177 /* ---- Socket interface ---- */
178 static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
180 struct sock *sk;
181 struct hlist_node *node;
182 sk_for_each(sk, node, &l2cap_sk_list.head)
183 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
184 goto found;
185 sk = NULL;
186 found:
187 return sk;
190 /* Find socket with psm and source bdaddr.
191 * Returns closest match.
193 static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
195 struct sock *sk = NULL, *sk1 = NULL;
196 struct hlist_node *node;
198 sk_for_each(sk, node, &l2cap_sk_list.head) {
199 if (state && sk->sk_state != state)
200 continue;
202 if (l2cap_pi(sk)->psm == psm) {
203 /* Exact match. */
204 if (!bacmp(&bt_sk(sk)->src, src))
205 break;
207 /* Closest match */
208 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
209 sk1 = sk;
212 return node ? sk : sk1;
215 /* Find socket with given address (psm, src).
216 * Returns locked socket */
217 static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
219 struct sock *s;
220 read_lock(&l2cap_sk_list.lock);
221 s = __l2cap_get_sock_by_psm(state, psm, src);
222 if (s) bh_lock_sock(s);
223 read_unlock(&l2cap_sk_list.lock);
224 return s;
227 static void l2cap_sock_destruct(struct sock *sk)
229 BT_DBG("sk %p", sk);
231 skb_queue_purge(&sk->sk_receive_queue);
232 skb_queue_purge(&sk->sk_write_queue);
234 if (sk->sk_protinfo)
235 kfree(sk->sk_protinfo);
238 static void l2cap_sock_cleanup_listen(struct sock *parent)
240 struct sock *sk;
242 BT_DBG("parent %p", parent);
244 /* Close not yet accepted channels */
245 while ((sk = bt_accept_dequeue(parent, NULL)))
246 l2cap_sock_close(sk);
248 parent->sk_state = BT_CLOSED;
249 parent->sk_zapped = 1;
252 /* Kill socket (only if zapped and orphan)
253 * Must be called on unlocked socket.
255 static void l2cap_sock_kill(struct sock *sk)
257 if (!sk->sk_zapped || sk->sk_socket)
258 return;
260 BT_DBG("sk %p state %d", sk, sk->sk_state);
262 /* Kill poor orphan */
263 bt_sock_unlink(&l2cap_sk_list, sk);
264 sock_set_flag(sk, SOCK_DEAD);
265 sock_put(sk);
268 static void __l2cap_sock_close(struct sock *sk, int reason)
270 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
272 switch (sk->sk_state) {
273 case BT_LISTEN:
274 l2cap_sock_cleanup_listen(sk);
275 break;
277 case BT_CONNECTED:
278 case BT_CONFIG:
279 case BT_CONNECT2:
280 if (sk->sk_type == SOCK_SEQPACKET) {
281 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
282 struct l2cap_disconn_req req;
284 sk->sk_state = BT_DISCONN;
285 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
287 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
288 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
289 l2cap_send_req(conn, L2CAP_DISCONN_REQ, sizeof(req), &req);
290 } else {
291 l2cap_chan_del(sk, reason);
293 break;
295 case BT_CONNECT:
296 case BT_DISCONN:
297 l2cap_chan_del(sk, reason);
298 break;
300 default:
301 sk->sk_zapped = 1;
302 break;
306 /* Must be called on unlocked socket. */
307 static void l2cap_sock_close(struct sock *sk)
309 l2cap_sock_clear_timer(sk);
310 lock_sock(sk);
311 __l2cap_sock_close(sk, ECONNRESET);
312 release_sock(sk);
313 l2cap_sock_kill(sk);
316 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
318 struct l2cap_pinfo *pi = l2cap_pi(sk);
320 BT_DBG("sk %p", sk);
322 if (parent) {
323 sk->sk_type = parent->sk_type;
324 pi->imtu = l2cap_pi(parent)->imtu;
325 pi->omtu = l2cap_pi(parent)->omtu;
326 pi->link_mode = l2cap_pi(parent)->link_mode;
327 } else {
328 pi->imtu = L2CAP_DEFAULT_MTU;
329 pi->omtu = 0;
330 pi->link_mode = 0;
333 /* Default config options */
334 pi->conf_mtu = L2CAP_DEFAULT_MTU;
335 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
338 static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
340 struct sock *sk;
342 sk = bt_sock_alloc(sock, proto, sizeof(struct l2cap_pinfo), prio);
343 if (!sk)
344 return NULL;
346 sk_set_owner(sk, THIS_MODULE);
348 sk->sk_destruct = l2cap_sock_destruct;
349 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
351 sk->sk_protocol = proto;
352 sk->sk_state = BT_OPEN;
354 l2cap_sock_init_timer(sk);
356 bt_sock_link(&l2cap_sk_list, sk);
357 return sk;
360 static int l2cap_sock_create(struct socket *sock, int protocol)
362 struct sock *sk;
364 BT_DBG("sock %p", sock);
366 sock->state = SS_UNCONNECTED;
368 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
369 return -ESOCKTNOSUPPORT;
371 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
372 return -EPERM;
374 sock->ops = &l2cap_sock_ops;
376 sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
377 if (!sk)
378 return -ENOMEM;
380 l2cap_sock_init(sk, NULL);
381 return 0;
384 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
386 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
387 struct sock *sk = sock->sk;
388 int err = 0;
390 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
392 if (!addr || addr->sa_family != AF_BLUETOOTH)
393 return -EINVAL;
395 lock_sock(sk);
397 if (sk->sk_state != BT_OPEN) {
398 err = -EBADFD;
399 goto done;
402 write_lock_bh(&l2cap_sk_list.lock);
404 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
405 err = -EADDRINUSE;
406 } else {
407 /* Save source address */
408 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
409 l2cap_pi(sk)->psm = la->l2_psm;
410 l2cap_pi(sk)->sport = la->l2_psm;
411 sk->sk_state = BT_BOUND;
414 write_unlock_bh(&l2cap_sk_list.lock);
416 done:
417 release_sock(sk);
418 return err;
421 static int l2cap_do_connect(struct sock *sk)
423 bdaddr_t *src = &bt_sk(sk)->src;
424 bdaddr_t *dst = &bt_sk(sk)->dst;
425 struct l2cap_conn *conn;
426 struct hci_conn *hcon;
427 struct hci_dev *hdev;
428 int err = 0;
430 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
432 if (!(hdev = hci_get_route(dst, src)))
433 return -EHOSTUNREACH;
435 hci_dev_lock_bh(hdev);
437 err = -ENOMEM;
439 hcon = hci_connect(hdev, ACL_LINK, dst);
440 if (!hcon)
441 goto done;
443 conn = l2cap_conn_add(hcon, 0);
444 if (!conn) {
445 hci_conn_put(hcon);
446 goto done;
449 err = 0;
451 /* Update source addr of the socket */
452 bacpy(src, conn->src);
454 l2cap_chan_add(conn, sk, NULL);
456 sk->sk_state = BT_CONNECT;
457 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
459 if (hcon->state == BT_CONNECTED) {
460 if (sk->sk_type == SOCK_SEQPACKET) {
461 struct l2cap_conn_req req;
462 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
463 req.psm = l2cap_pi(sk)->psm;
464 l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
465 } else {
466 l2cap_sock_clear_timer(sk);
467 sk->sk_state = BT_CONNECTED;
471 done:
472 hci_dev_unlock_bh(hdev);
473 hci_dev_put(hdev);
474 return err;
477 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
479 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
480 struct sock *sk = sock->sk;
481 int err = 0;
483 lock_sock(sk);
485 BT_DBG("sk %p", sk);
487 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
488 err = -EINVAL;
489 goto done;
492 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
493 err = -EINVAL;
494 goto done;
497 switch(sk->sk_state) {
498 case BT_CONNECT:
499 case BT_CONNECT2:
500 case BT_CONFIG:
501 /* Already connecting */
502 goto wait;
504 case BT_CONNECTED:
505 /* Already connected */
506 goto done;
508 case BT_OPEN:
509 case BT_BOUND:
510 /* Can connect */
511 break;
513 default:
514 err = -EBADFD;
515 goto done;
518 /* Set destination address and psm */
519 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
520 l2cap_pi(sk)->psm = la->l2_psm;
522 if ((err = l2cap_do_connect(sk)))
523 goto done;
525 wait:
526 err = bt_sock_wait_state(sk, BT_CONNECTED,
527 sock_sndtimeo(sk, flags & O_NONBLOCK));
528 done:
529 release_sock(sk);
530 return err;
533 static int l2cap_sock_listen(struct socket *sock, int backlog)
535 struct sock *sk = sock->sk;
536 int err = 0;
538 BT_DBG("sk %p backlog %d", sk, backlog);
540 lock_sock(sk);
542 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
543 err = -EBADFD;
544 goto done;
547 if (!l2cap_pi(sk)->psm) {
548 bdaddr_t *src = &bt_sk(sk)->src;
549 u16 psm;
551 err = -EINVAL;
553 write_lock_bh(&l2cap_sk_list.lock);
555 for (psm = 0x1001; psm < 0x1100; psm += 2)
556 if (!__l2cap_get_sock_by_addr(psm, src)) {
557 l2cap_pi(sk)->psm = htobs(psm);
558 l2cap_pi(sk)->sport = htobs(psm);
559 err = 0;
560 break;
563 write_unlock_bh(&l2cap_sk_list.lock);
565 if (err < 0)
566 goto done;
569 sk->sk_max_ack_backlog = backlog;
570 sk->sk_ack_backlog = 0;
571 sk->sk_state = BT_LISTEN;
573 done:
574 release_sock(sk);
575 return err;
578 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
580 DECLARE_WAITQUEUE(wait, current);
581 struct sock *sk = sock->sk, *nsk;
582 long timeo;
583 int err = 0;
585 lock_sock(sk);
587 if (sk->sk_state != BT_LISTEN) {
588 err = -EBADFD;
589 goto done;
592 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
594 BT_DBG("sk %p timeo %ld", sk, timeo);
596 /* Wait for an incoming connection. (wake-one). */
597 add_wait_queue_exclusive(sk->sk_sleep, &wait);
598 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
599 set_current_state(TASK_INTERRUPTIBLE);
600 if (!timeo) {
601 err = -EAGAIN;
602 break;
605 release_sock(sk);
606 timeo = schedule_timeout(timeo);
607 lock_sock(sk);
609 if (sk->sk_state != BT_LISTEN) {
610 err = -EBADFD;
611 break;
614 if (signal_pending(current)) {
615 err = sock_intr_errno(timeo);
616 break;
619 set_current_state(TASK_RUNNING);
620 remove_wait_queue(sk->sk_sleep, &wait);
622 if (err)
623 goto done;
625 newsock->state = SS_CONNECTED;
627 BT_DBG("new socket %p", nsk);
629 done:
630 release_sock(sk);
631 return err;
634 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
636 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
637 struct sock *sk = sock->sk;
639 BT_DBG("sock %p, sk %p", sock, sk);
641 addr->sa_family = AF_BLUETOOTH;
642 *len = sizeof(struct sockaddr_l2);
644 if (peer)
645 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
646 else
647 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
649 la->l2_psm = l2cap_pi(sk)->psm;
650 return 0;
653 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
655 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
656 struct sk_buff *skb, **frag;
657 int err, hlen, count, sent=0;
658 struct l2cap_hdr *lh;
660 BT_DBG("sk %p len %d", sk, len);
662 /* First fragment (with L2CAP header) */
663 if (sk->sk_type == SOCK_DGRAM)
664 hlen = L2CAP_HDR_SIZE + 2;
665 else
666 hlen = L2CAP_HDR_SIZE;
668 count = min_t(unsigned int, (conn->mtu - hlen), len);
670 skb = bt_skb_send_alloc(sk, hlen + count,
671 msg->msg_flags & MSG_DONTWAIT, &err);
672 if (!skb)
673 return err;
675 /* Create L2CAP header */
676 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
677 lh->cid = __cpu_to_le16(l2cap_pi(sk)->dcid);
678 lh->len = __cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
680 if (sk->sk_type == SOCK_DGRAM)
681 put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
683 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
684 err = -EFAULT;
685 goto fail;
688 sent += count;
689 len -= count;
691 /* Continuation fragments (no L2CAP header) */
692 frag = &skb_shinfo(skb)->frag_list;
693 while (len) {
694 count = min_t(unsigned int, conn->mtu, len);
696 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
697 if (!*frag)
698 goto fail;
700 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
701 err = -EFAULT;
702 goto fail;
705 sent += count;
706 len -= count;
708 frag = &(*frag)->next;
711 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
712 goto fail;
714 return sent;
716 fail:
717 kfree_skb(skb);
718 return err;
721 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
723 struct sock *sk = sock->sk;
724 int err = 0;
726 BT_DBG("sock %p, sk %p", sock, sk);
728 if (sk->sk_err)
729 return sock_error(sk);
731 if (msg->msg_flags & MSG_OOB)
732 return -EOPNOTSUPP;
734 /* Check outgoing MTU */
735 if (len > l2cap_pi(sk)->omtu)
736 return -EINVAL;
738 lock_sock(sk);
740 if (sk->sk_state == BT_CONNECTED)
741 err = l2cap_do_send(sk, msg, len);
742 else
743 err = -ENOTCONN;
745 release_sock(sk);
746 return err;
749 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
751 struct sock *sk = sock->sk;
752 struct l2cap_options opts;
753 int err = 0, len;
754 u32 opt;
756 BT_DBG("sk %p", sk);
758 lock_sock(sk);
760 switch (optname) {
761 case L2CAP_OPTIONS:
762 len = min_t(unsigned int, sizeof(opts), optlen);
763 if (copy_from_user((char *)&opts, optval, len)) {
764 err = -EFAULT;
765 break;
767 l2cap_pi(sk)->imtu = opts.imtu;
768 l2cap_pi(sk)->omtu = opts.omtu;
769 break;
771 case L2CAP_LM:
772 if (get_user(opt, (u32 __user *)optval)) {
773 err = -EFAULT;
774 break;
777 l2cap_pi(sk)->link_mode = opt;
778 break;
780 default:
781 err = -ENOPROTOOPT;
782 break;
785 release_sock(sk);
786 return err;
789 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
791 struct sock *sk = sock->sk;
792 struct l2cap_options opts;
793 struct l2cap_conninfo cinfo;
794 int len, err = 0;
796 if (get_user(len, optlen))
797 return -EFAULT;
799 lock_sock(sk);
801 switch (optname) {
802 case L2CAP_OPTIONS:
803 opts.imtu = l2cap_pi(sk)->imtu;
804 opts.omtu = l2cap_pi(sk)->omtu;
805 opts.flush_to = l2cap_pi(sk)->flush_to;
807 len = min_t(unsigned int, len, sizeof(opts));
808 if (copy_to_user(optval, (char *)&opts, len))
809 err = -EFAULT;
811 break;
813 case L2CAP_LM:
814 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *)optval))
815 err = -EFAULT;
816 break;
818 case L2CAP_CONNINFO:
819 if (sk->sk_state != BT_CONNECTED) {
820 err = -ENOTCONN;
821 break;
824 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
826 len = min_t(unsigned int, len, sizeof(cinfo));
827 if (copy_to_user(optval, (char *)&cinfo, len))
828 err = -EFAULT;
830 break;
832 default:
833 err = -ENOPROTOOPT;
834 break;
837 release_sock(sk);
838 return err;
841 static int l2cap_sock_shutdown(struct socket *sock, int how)
843 struct sock *sk = sock->sk;
844 int err = 0;
846 BT_DBG("sock %p, sk %p", sock, sk);
848 if (!sk)
849 return 0;
851 lock_sock(sk);
852 if (!sk->sk_shutdown) {
853 sk->sk_shutdown = SHUTDOWN_MASK;
854 l2cap_sock_clear_timer(sk);
855 __l2cap_sock_close(sk, 0);
857 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
858 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
860 release_sock(sk);
861 return err;
864 static int l2cap_sock_release(struct socket *sock)
866 struct sock *sk = sock->sk;
867 int err;
869 BT_DBG("sock %p, sk %p", sock, sk);
871 if (!sk)
872 return 0;
874 err = l2cap_sock_shutdown(sock, 2);
876 sock_orphan(sk);
877 l2cap_sock_kill(sk);
878 return err;
881 /* ---- L2CAP channels ---- */
882 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
884 struct sock *s;
885 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
886 if (l2cap_pi(s)->dcid == cid)
887 break;
889 return s;
892 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
894 struct sock *s;
895 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
896 if (l2cap_pi(s)->scid == cid)
897 break;
899 return s;
902 /* Find channel with given SCID.
903 * Returns locked socket */
904 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
906 struct sock *s;
907 read_lock(&l->lock);
908 s = __l2cap_get_chan_by_scid(l, cid);
909 if (s) bh_lock_sock(s);
910 read_unlock(&l->lock);
911 return s;
914 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
916 u16 cid = 0x0040;
918 for (; cid < 0xffff; cid++) {
919 if(!__l2cap_get_chan_by_scid(l, cid))
920 return cid;
923 return 0;
926 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
928 sock_hold(sk);
930 if (l->head)
931 l2cap_pi(l->head)->prev_c = sk;
933 l2cap_pi(sk)->next_c = l->head;
934 l2cap_pi(sk)->prev_c = NULL;
935 l->head = sk;
938 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
940 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
942 write_lock(&l->lock);
943 if (sk == l->head)
944 l->head = next;
946 if (next)
947 l2cap_pi(next)->prev_c = prev;
948 if (prev)
949 l2cap_pi(prev)->next_c = next;
950 write_unlock(&l->lock);
952 __sock_put(sk);
955 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
957 struct l2cap_chan_list *l = &conn->chan_list;
959 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
961 l2cap_pi(sk)->conn = conn;
963 if (sk->sk_type == SOCK_SEQPACKET) {
964 /* Alloc CID for connection-oriented socket */
965 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
966 } else if (sk->sk_type == SOCK_DGRAM) {
967 /* Connectionless socket */
968 l2cap_pi(sk)->scid = 0x0002;
969 l2cap_pi(sk)->dcid = 0x0002;
970 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
971 } else {
972 /* Raw socket can send/recv signalling messages only */
973 l2cap_pi(sk)->scid = 0x0001;
974 l2cap_pi(sk)->dcid = 0x0001;
975 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
978 __l2cap_chan_link(l, sk);
980 if (parent)
981 bt_accept_enqueue(parent, sk);
984 /* Delete channel.
985 * Must be called on the locked socket. */
986 static void l2cap_chan_del(struct sock *sk, int err)
988 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
989 struct sock *parent = bt_sk(sk)->parent;
991 l2cap_sock_clear_timer(sk);
993 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
995 if (conn) {
996 /* Unlink from channel list */
997 l2cap_chan_unlink(&conn->chan_list, sk);
998 l2cap_pi(sk)->conn = NULL;
999 hci_conn_put(conn->hcon);
1002 sk->sk_state = BT_CLOSED;
1003 sk->sk_zapped = 1;
1005 if (err)
1006 sk->sk_err = err;
1008 if (parent)
1009 parent->sk_data_ready(parent, 0);
1010 else
1011 sk->sk_state_change(sk);
1014 static void l2cap_conn_ready(struct l2cap_conn *conn)
1016 struct l2cap_chan_list *l = &conn->chan_list;
1017 struct sock *sk;
1019 BT_DBG("conn %p", conn);
1021 read_lock(&l->lock);
1023 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1024 bh_lock_sock(sk);
1026 if (sk->sk_type != SOCK_SEQPACKET) {
1027 l2cap_sock_clear_timer(sk);
1028 sk->sk_state = BT_CONNECTED;
1029 sk->sk_state_change(sk);
1030 } else if (sk->sk_state == BT_CONNECT) {
1031 struct l2cap_conn_req req;
1032 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1033 req.psm = l2cap_pi(sk)->psm;
1034 l2cap_send_req(conn, L2CAP_CONN_REQ, sizeof(req), &req);
1037 bh_unlock_sock(sk);
1040 read_unlock(&l->lock);
1043 static void l2cap_chan_ready(struct sock *sk)
1045 struct sock *parent = bt_sk(sk)->parent;
1047 BT_DBG("sk %p, parent %p", sk, parent);
1049 l2cap_pi(sk)->conf_state = 0;
1050 l2cap_sock_clear_timer(sk);
1052 if (!parent) {
1053 /* Outgoing channel.
1054 * Wake up socket sleeping on connect.
1056 sk->sk_state = BT_CONNECTED;
1057 sk->sk_state_change(sk);
1058 } else {
1059 /* Incoming channel.
1060 * Wake up socket sleeping on accept.
1062 parent->sk_data_ready(parent, 0);
1066 /* Copy frame to all raw sockets on that connection */
1067 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1069 struct l2cap_chan_list *l = &conn->chan_list;
1070 struct sk_buff *nskb;
1071 struct sock * sk;
1073 BT_DBG("conn %p", conn);
1075 read_lock(&l->lock);
1076 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1077 if (sk->sk_type != SOCK_RAW)
1078 continue;
1080 /* Don't send frame to the socket it came from */
1081 if (skb->sk == sk)
1082 continue;
1084 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1085 continue;
1087 if (sock_queue_rcv_skb(sk, nskb))
1088 kfree_skb(nskb);
1090 read_unlock(&l->lock);
1093 /* ---- L2CAP signalling commands ---- */
1094 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
1096 u8 id;
1098 /* Get next available identificator.
1099 * 1 - 199 are used by kernel.
1100 * 200 - 254 are used by utilities like l2ping, etc
1103 spin_lock(&conn->lock);
1105 if (++conn->tx_ident > 199)
1106 conn->tx_ident = 1;
1108 id = conn->tx_ident;
1110 spin_unlock(&conn->lock);
1112 return id;
1115 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1116 u8 code, u8 ident, u16 dlen, void *data)
1118 struct sk_buff *skb, **frag;
1119 struct l2cap_cmd_hdr *cmd;
1120 struct l2cap_hdr *lh;
1121 int len, count;
1123 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1125 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1126 count = min_t(unsigned int, conn->mtu, len);
1128 skb = bt_skb_alloc(count, GFP_ATOMIC);
1129 if (!skb)
1130 return NULL;
1132 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1133 lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1134 lh->cid = __cpu_to_le16(0x0001);
1136 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1137 cmd->code = code;
1138 cmd->ident = ident;
1139 cmd->len = __cpu_to_le16(dlen);
1141 if (dlen) {
1142 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1143 memcpy(skb_put(skb, count), data, count);
1144 data += count;
1147 len -= skb->len;
1149 /* Continuation fragments (no L2CAP header) */
1150 frag = &skb_shinfo(skb)->frag_list;
1151 while (len) {
1152 count = min_t(unsigned int, conn->mtu, len);
1154 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1155 if (!*frag)
1156 goto fail;
1158 memcpy(skb_put(*frag, count), data, count);
1160 len -= count;
1161 data += count;
1163 frag = &(*frag)->next;
1166 return skb;
1168 fail:
1169 kfree_skb(skb);
1170 return NULL;
1173 static int l2cap_send_req(struct l2cap_conn *conn, u8 code, u16 len, void *data)
1175 u8 ident = l2cap_get_ident(conn);
1176 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
1178 BT_DBG("code 0x%2.2x", code);
1180 if (!skb)
1181 return -ENOMEM;
1182 return hci_send_acl(conn->hcon, skb, 0);
1185 static int l2cap_send_rsp(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
1187 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
1189 BT_DBG("code 0x%2.2x", code);
1191 if (!skb)
1192 return -ENOMEM;
1193 return hci_send_acl(conn->hcon, skb, 0);
1196 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1198 struct l2cap_conf_opt *opt = *ptr;
1199 int len;
1201 len = L2CAP_CONF_OPT_SIZE + opt->len;
1202 *ptr += len;
1204 *type = opt->type;
1205 *olen = opt->len;
1207 switch (opt->len) {
1208 case 1:
1209 *val = *((u8 *) opt->val);
1210 break;
1212 case 2:
1213 *val = __le16_to_cpu(*((u16 *)opt->val));
1214 break;
1216 case 4:
1217 *val = __le32_to_cpu(*((u32 *)opt->val));
1218 break;
1220 default:
1221 *val = (unsigned long) opt->val;
1222 break;
1225 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1226 return len;
1229 static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
1231 int type, hint, olen;
1232 unsigned long val;
1233 void *ptr = data;
1235 BT_DBG("sk %p len %d", sk, len);
1237 while (len >= L2CAP_CONF_OPT_SIZE) {
1238 len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
1240 hint = type & 0x80;
1241 type &= 0x7f;
1243 switch (type) {
1244 case L2CAP_CONF_MTU:
1245 l2cap_pi(sk)->conf_mtu = val;
1246 break;
1248 case L2CAP_CONF_FLUSH_TO:
1249 l2cap_pi(sk)->flush_to = val;
1250 break;
1252 case L2CAP_CONF_QOS:
1253 break;
1255 default:
1256 if (hint)
1257 break;
1259 /* FIXME: Reject unknown option */
1260 break;
1265 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1267 struct l2cap_conf_opt *opt = *ptr;
1269 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1271 opt->type = type;
1272 opt->len = len;
1274 switch (len) {
1275 case 1:
1276 *((u8 *) opt->val) = val;
1277 break;
1279 case 2:
1280 *((u16 *) opt->val) = __cpu_to_le16(val);
1281 break;
1283 case 4:
1284 *((u32 *) opt->val) = __cpu_to_le32(val);
1285 break;
1287 default:
1288 memcpy(opt->val, (void *) val, len);
1289 break;
1292 *ptr += L2CAP_CONF_OPT_SIZE + len;
1295 static int l2cap_build_conf_req(struct sock *sk, void *data)
1297 struct l2cap_pinfo *pi = l2cap_pi(sk);
1298 struct l2cap_conf_req *req = data;
1299 void *ptr = req->data;
1301 BT_DBG("sk %p", sk);
1303 if (pi->imtu != L2CAP_DEFAULT_MTU)
1304 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1306 /* FIXME. Need actual value of the flush timeout */
1307 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1308 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1310 req->dcid = __cpu_to_le16(pi->dcid);
1311 req->flags = __cpu_to_le16(0);
1313 return ptr - data;
1316 static inline int l2cap_conf_output(struct sock *sk, void **ptr)
1318 struct l2cap_pinfo *pi = l2cap_pi(sk);
1319 int result = 0;
1321 /* Configure output options and let the other side know
1322 * which ones we don't like. */
1323 if (pi->conf_mtu < pi->omtu) {
1324 l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1325 result = L2CAP_CONF_UNACCEPT;
1326 } else {
1327 pi->omtu = pi->conf_mtu;
1330 BT_DBG("sk %p result %d", sk, result);
1331 return result;
1334 static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
1336 struct l2cap_conf_rsp *rsp = data;
1337 void *ptr = rsp->data;
1338 u16 flags = 0;
1340 BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
1342 if (result)
1343 *result = l2cap_conf_output(sk, &ptr);
1344 else
1345 flags = 0x0001;
1347 rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1348 rsp->result = __cpu_to_le16(result ? *result : 0);
1349 rsp->flags = __cpu_to_le16(flags);
1351 return ptr - data;
1354 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1356 struct l2cap_chan_list *list = &conn->chan_list;
1357 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1358 struct l2cap_conn_rsp rsp;
1359 struct sock *sk, *parent;
1360 int result = 0, status = 0;
1362 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1363 u16 psm = req->psm;
1365 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1367 /* Check if we have socket listening on psm */
1368 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1369 if (!parent) {
1370 result = L2CAP_CR_BAD_PSM;
1371 goto sendresp;
1374 result = L2CAP_CR_NO_MEM;
1376 /* Check for backlog size */
1377 if (parent->sk_ack_backlog > parent->sk_max_ack_backlog) {
1378 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1379 goto response;
1382 sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1383 if (!sk)
1384 goto response;
1386 write_lock(&list->lock);
1388 /* Check if we already have channel with that dcid */
1389 if (__l2cap_get_chan_by_dcid(list, scid)) {
1390 write_unlock(&list->lock);
1391 sk->sk_zapped = 1;
1392 l2cap_sock_kill(sk);
1393 goto response;
1396 hci_conn_hold(conn->hcon);
1398 l2cap_sock_init(sk, parent);
1399 bacpy(&bt_sk(sk)->src, conn->src);
1400 bacpy(&bt_sk(sk)->dst, conn->dst);
1401 l2cap_pi(sk)->psm = psm;
1402 l2cap_pi(sk)->dcid = scid;
1404 __l2cap_chan_add(conn, sk, parent);
1405 dcid = l2cap_pi(sk)->scid;
1407 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1409 /* Service level security */
1410 result = L2CAP_CR_PEND;
1411 status = L2CAP_CS_AUTHEN_PEND;
1412 sk->sk_state = BT_CONNECT2;
1413 l2cap_pi(sk)->ident = cmd->ident;
1415 if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) {
1416 if (!hci_conn_encrypt(conn->hcon))
1417 goto done;
1418 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1419 if (!hci_conn_auth(conn->hcon))
1420 goto done;
1423 sk->sk_state = BT_CONFIG;
1424 result = status = 0;
1426 done:
1427 write_unlock(&list->lock);
1429 response:
1430 bh_unlock_sock(parent);
1432 sendresp:
1433 rsp.scid = __cpu_to_le16(scid);
1434 rsp.dcid = __cpu_to_le16(dcid);
1435 rsp.result = __cpu_to_le16(result);
1436 rsp.status = __cpu_to_le16(status);
1437 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1438 return 0;
1441 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1443 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1444 u16 scid, dcid, result, status;
1445 struct sock *sk;
1446 u8 req[128];
1448 scid = __le16_to_cpu(rsp->scid);
1449 dcid = __le16_to_cpu(rsp->dcid);
1450 result = __le16_to_cpu(rsp->result);
1451 status = __le16_to_cpu(rsp->status);
1453 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1455 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1456 return 0;
1458 switch (result) {
1459 case L2CAP_CR_SUCCESS:
1460 sk->sk_state = BT_CONFIG;
1461 l2cap_pi(sk)->dcid = dcid;
1462 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1464 l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req);
1465 break;
1467 case L2CAP_CR_PEND:
1468 break;
1470 default:
1471 l2cap_chan_del(sk, ECONNREFUSED);
1472 break;
1475 bh_unlock_sock(sk);
1476 return 0;
1479 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1481 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1482 u16 dcid, flags;
1483 u8 rsp[64];
1484 struct sock *sk;
1485 int result;
1487 dcid = __le16_to_cpu(req->dcid);
1488 flags = __le16_to_cpu(req->flags);
1490 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1492 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1493 return -ENOENT;
1495 l2cap_parse_conf_req(sk, req->data, cmd->len - sizeof(*req));
1497 if (flags & 0x0001) {
1498 /* Incomplete config. Send empty response. */
1499 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, NULL), rsp);
1500 goto unlock;
1503 /* Complete config. */
1504 l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, &result), rsp);
1506 if (result)
1507 goto unlock;
1509 /* Output config done */
1510 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1512 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1513 sk->sk_state = BT_CONNECTED;
1514 l2cap_chan_ready(sk);
1515 } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1516 u8 req[64];
1517 l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req);
1520 unlock:
1521 bh_unlock_sock(sk);
1522 return 0;
1525 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1527 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1528 u16 scid, flags, result;
1529 struct sock *sk;
1531 scid = __le16_to_cpu(rsp->scid);
1532 flags = __le16_to_cpu(rsp->flags);
1533 result = __le16_to_cpu(rsp->result);
1535 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1537 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1538 return 0;
1540 switch (result) {
1541 case L2CAP_CONF_SUCCESS:
1542 break;
1544 case L2CAP_CONF_UNACCEPT:
1545 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1546 char req[128];
1547 /* It does not make sense to adjust L2CAP parameters
1548 * that are currently defined in the spec. We simply
1549 * resend config request that we sent earlier. It is
1550 * stupid, but it helps qualification testing which
1551 * expects at least some response from us. */
1552 l2cap_send_req(conn, L2CAP_CONF_REQ,
1553 l2cap_build_conf_req(sk, req), req);
1554 goto done;
1557 default:
1558 sk->sk_state = BT_DISCONN;
1559 sk->sk_err = ECONNRESET;
1560 l2cap_sock_set_timer(sk, HZ * 5);
1562 struct l2cap_disconn_req req;
1563 req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1564 req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
1565 l2cap_send_req(conn, L2CAP_DISCONN_REQ, sizeof(req), &req);
1567 goto done;
1570 if (flags & 0x01)
1571 goto done;
1573 /* Input config done */
1574 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1576 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1577 sk->sk_state = BT_CONNECTED;
1578 l2cap_chan_ready(sk);
1581 done:
1582 bh_unlock_sock(sk);
1583 return 0;
1586 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1588 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1589 struct l2cap_disconn_rsp rsp;
1590 u16 dcid, scid;
1591 struct sock *sk;
1593 scid = __le16_to_cpu(req->scid);
1594 dcid = __le16_to_cpu(req->dcid);
1596 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1598 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1599 return 0;
1601 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1602 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1603 l2cap_send_rsp(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1605 sk->sk_shutdown = SHUTDOWN_MASK;
1607 l2cap_chan_del(sk, ECONNRESET);
1608 bh_unlock_sock(sk);
1610 l2cap_sock_kill(sk);
1611 return 0;
1614 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1616 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1617 u16 dcid, scid;
1618 struct sock *sk;
1620 scid = __le16_to_cpu(rsp->scid);
1621 dcid = __le16_to_cpu(rsp->dcid);
1623 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1625 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1626 return 0;
1628 l2cap_chan_del(sk, 0);
1629 bh_unlock_sock(sk);
1631 l2cap_sock_kill(sk);
1632 return 0;
1635 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1637 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1638 struct l2cap_info_rsp rsp;
1639 u16 type;
1641 type = __le16_to_cpu(req->type);
1643 BT_DBG("type 0x%4.4x", type);
1645 rsp.type = __cpu_to_le16(type);
1646 rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP);
1647 l2cap_send_rsp(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1649 return 0;
1652 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1654 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1655 u16 type, result;
1657 type = __le16_to_cpu(rsp->type);
1658 result = __le16_to_cpu(rsp->result);
1660 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1662 return 0;
1665 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1667 u8 *data = skb->data;
1668 int len = skb->len;
1669 struct l2cap_cmd_hdr cmd;
1670 int err = 0;
1672 l2cap_raw_recv(conn, skb);
1674 while (len >= L2CAP_CMD_HDR_SIZE) {
1675 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1676 data += L2CAP_CMD_HDR_SIZE;
1677 len -= L2CAP_CMD_HDR_SIZE;
1679 cmd.len = __le16_to_cpu(cmd.len);
1681 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
1683 if (cmd.len > len || !cmd.ident) {
1684 BT_DBG("corrupted command");
1685 break;
1688 switch (cmd.code) {
1689 case L2CAP_COMMAND_REJ:
1690 /* FIXME: We should process this */
1691 break;
1693 case L2CAP_CONN_REQ:
1694 err = l2cap_connect_req(conn, &cmd, data);
1695 break;
1697 case L2CAP_CONN_RSP:
1698 err = l2cap_connect_rsp(conn, &cmd, data);
1699 break;
1701 case L2CAP_CONF_REQ:
1702 err = l2cap_config_req(conn, &cmd, data);
1703 break;
1705 case L2CAP_CONF_RSP:
1706 err = l2cap_config_rsp(conn, &cmd, data);
1707 break;
1709 case L2CAP_DISCONN_REQ:
1710 err = l2cap_disconnect_req(conn, &cmd, data);
1711 break;
1713 case L2CAP_DISCONN_RSP:
1714 err = l2cap_disconnect_rsp(conn, &cmd, data);
1715 break;
1717 case L2CAP_ECHO_REQ:
1718 l2cap_send_rsp(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
1719 break;
1721 case L2CAP_ECHO_RSP:
1722 break;
1724 case L2CAP_INFO_REQ:
1725 err = l2cap_information_req(conn, &cmd, data);
1726 break;
1728 case L2CAP_INFO_RSP:
1729 err = l2cap_information_rsp(conn, &cmd, data);
1730 break;
1732 default:
1733 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1734 err = -EINVAL;
1735 break;
1738 if (err) {
1739 struct l2cap_cmd_rej rej;
1740 BT_DBG("error %d", err);
1742 /* FIXME: Map err to a valid reason */
1743 rej.reason = __cpu_to_le16(0);
1744 l2cap_send_rsp(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1747 data += cmd.len;
1748 len -= cmd.len;
1751 kfree_skb(skb);
1754 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1756 struct sock *sk;
1758 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1759 if (!sk) {
1760 BT_DBG("unknown cid 0x%4.4x", cid);
1761 goto drop;
1764 BT_DBG("sk %p, len %d", sk, skb->len);
1766 if (sk->sk_state != BT_CONNECTED)
1767 goto drop;
1769 if (l2cap_pi(sk)->imtu < skb->len)
1770 goto drop;
1772 /* If socket recv buffers overflows we drop data here
1773 * which is *bad* because L2CAP has to be reliable.
1774 * But we don't have any other choice. L2CAP doesn't
1775 * provide flow control mechanism. */
1777 if (!sock_queue_rcv_skb(sk, skb))
1778 goto done;
1780 drop:
1781 kfree_skb(skb);
1783 done:
1784 if (sk) bh_unlock_sock(sk);
1785 return 0;
1788 static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
1790 struct sock *sk;
1792 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1793 if (!sk)
1794 goto drop;
1796 BT_DBG("sk %p, len %d", sk, skb->len);
1798 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1799 goto drop;
1801 if (l2cap_pi(sk)->imtu < skb->len)
1802 goto drop;
1804 if (!sock_queue_rcv_skb(sk, skb))
1805 goto done;
1807 drop:
1808 kfree_skb(skb);
1810 done:
1811 if (sk) bh_unlock_sock(sk);
1812 return 0;
1815 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1817 struct l2cap_hdr *lh = (void *) skb->data;
1818 u16 cid, psm, len;
1820 skb_pull(skb, L2CAP_HDR_SIZE);
1821 cid = __le16_to_cpu(lh->cid);
1822 len = __le16_to_cpu(lh->len);
1824 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1826 switch (cid) {
1827 case 0x0001:
1828 l2cap_sig_channel(conn, skb);
1829 break;
1831 case 0x0002:
1832 psm = get_unaligned((u16 *) skb->data);
1833 skb_pull(skb, 2);
1834 l2cap_conless_channel(conn, psm, skb);
1835 break;
1837 default:
1838 l2cap_data_channel(conn, cid, skb);
1839 break;
1843 /* ---- L2CAP interface with lower layer (HCI) ---- */
1845 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1847 int exact = 0, lm1 = 0, lm2 = 0;
1848 register struct sock *sk;
1849 struct hlist_node *node;
1851 if (type != ACL_LINK)
1852 return 0;
1854 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
1856 /* Find listening sockets and check their link_mode */
1857 read_lock(&l2cap_sk_list.lock);
1858 sk_for_each(sk, node, &l2cap_sk_list.head) {
1859 if (sk->sk_state != BT_LISTEN)
1860 continue;
1862 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
1863 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1864 exact++;
1865 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1866 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
1868 read_unlock(&l2cap_sk_list.lock);
1870 return exact ? lm1 : lm2;
1873 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1875 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1877 if (hcon->type != ACL_LINK)
1878 return 0;
1880 if (!status) {
1881 struct l2cap_conn *conn;
1883 conn = l2cap_conn_add(hcon, status);
1884 if (conn)
1885 l2cap_conn_ready(conn);
1886 } else
1887 l2cap_conn_del(hcon, bt_err(status));
1889 return 0;
1892 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1894 BT_DBG("hcon %p reason %d", hcon, reason);
1896 if (hcon->type != ACL_LINK)
1897 return 0;
1899 l2cap_conn_del(hcon, bt_err(reason));
1900 return 0;
1903 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1905 struct l2cap_chan_list *l;
1906 struct l2cap_conn *conn;
1907 struct l2cap_conn_rsp rsp;
1908 struct sock *sk;
1909 int result;
1911 if (!(conn = hcon->l2cap_data))
1912 return 0;
1913 l = &conn->chan_list;
1915 BT_DBG("conn %p", conn);
1917 read_lock(&l->lock);
1919 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1920 bh_lock_sock(sk);
1922 if (sk->sk_state != BT_CONNECT2 ||
1923 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)) {
1924 bh_unlock_sock(sk);
1925 continue;
1928 if (!status) {
1929 sk->sk_state = BT_CONFIG;
1930 result = 0;
1931 } else {
1932 sk->sk_state = BT_DISCONN;
1933 l2cap_sock_set_timer(sk, HZ/10);
1934 result = L2CAP_CR_SEC_BLOCK;
1937 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1938 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1939 rsp.result = __cpu_to_le16(result);
1940 rsp.status = __cpu_to_le16(0);
1941 l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1943 bh_unlock_sock(sk);
1946 read_unlock(&l->lock);
1947 return 0;
1950 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
1952 struct l2cap_chan_list *l;
1953 struct l2cap_conn *conn;
1954 struct l2cap_conn_rsp rsp;
1955 struct sock *sk;
1956 int result;
1958 if (!(conn = hcon->l2cap_data))
1959 return 0;
1960 l = &conn->chan_list;
1962 BT_DBG("conn %p", conn);
1964 read_lock(&l->lock);
1966 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1967 bh_lock_sock(sk);
1969 if (sk->sk_state != BT_CONNECT2) {
1970 bh_unlock_sock(sk);
1971 continue;
1974 if (!status) {
1975 sk->sk_state = BT_CONFIG;
1976 result = 0;
1977 } else {
1978 sk->sk_state = BT_DISCONN;
1979 l2cap_sock_set_timer(sk, HZ/10);
1980 result = L2CAP_CR_SEC_BLOCK;
1983 rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
1984 rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
1985 rsp.result = __cpu_to_le16(result);
1986 rsp.status = __cpu_to_le16(0);
1987 l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1989 bh_unlock_sock(sk);
1992 read_unlock(&l->lock);
1993 return 0;
1996 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
1998 struct l2cap_conn *conn = hcon->l2cap_data;
2000 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2001 goto drop;
2003 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2005 if (flags & ACL_START) {
2006 struct l2cap_hdr *hdr;
2007 int len;
2009 if (conn->rx_len) {
2010 BT_ERR("Unexpected start frame (len %d)", skb->len);
2011 kfree_skb(conn->rx_skb);
2012 conn->rx_skb = NULL;
2013 conn->rx_len = 0;
2016 if (skb->len < 2) {
2017 BT_ERR("Frame is too short (len %d)", skb->len);
2018 goto drop;
2021 hdr = (struct l2cap_hdr *) skb->data;
2022 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2024 if (len == skb->len) {
2025 /* Complete frame received */
2026 l2cap_recv_frame(conn, skb);
2027 return 0;
2030 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2032 if (skb->len > len) {
2033 BT_ERR("Frame is too long (len %d, expected len %d)",
2034 skb->len, len);
2035 goto drop;
2038 /* Allocate skb for the complete frame (with header) */
2039 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2040 goto drop;
2042 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2043 conn->rx_len = len - skb->len;
2044 } else {
2045 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2047 if (!conn->rx_len) {
2048 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2049 goto drop;
2052 if (skb->len > conn->rx_len) {
2053 BT_ERR("Fragment is too long (len %d, expected %d)",
2054 skb->len, conn->rx_len);
2055 kfree_skb(conn->rx_skb);
2056 conn->rx_skb = NULL;
2057 conn->rx_len = 0;
2058 goto drop;
2061 memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
2062 conn->rx_len -= skb->len;
2064 if (!conn->rx_len) {
2065 /* Complete frame received */
2066 l2cap_recv_frame(conn, conn->rx_skb);
2067 conn->rx_skb = NULL;
2071 drop:
2072 kfree_skb(skb);
2073 return 0;
2076 /* ---- Proc fs support ---- */
2077 #ifdef CONFIG_PROC_FS
2078 static void *l2cap_seq_start(struct seq_file *seq, loff_t *pos)
2080 struct sock *sk;
2081 struct hlist_node *node;
2082 loff_t l = *pos;
2084 read_lock_bh(&l2cap_sk_list.lock);
2086 sk_for_each(sk, node, &l2cap_sk_list.head)
2087 if (!l--)
2088 goto found;
2089 sk = NULL;
2090 found:
2091 return sk;
2094 static void *l2cap_seq_next(struct seq_file *seq, void *e, loff_t *pos)
2096 (*pos)++;
2097 return sk_next(e);
2100 static void l2cap_seq_stop(struct seq_file *seq, void *e)
2102 read_unlock_bh(&l2cap_sk_list.lock);
2105 static int l2cap_seq_show(struct seq_file *seq, void *e)
2107 struct sock *sk = e;
2108 struct l2cap_pinfo *pi = l2cap_pi(sk);
2110 seq_printf(seq, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2111 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2112 sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
2113 pi->omtu, pi->link_mode);
2114 return 0;
2117 static struct seq_operations l2cap_seq_ops = {
2118 .start = l2cap_seq_start,
2119 .next = l2cap_seq_next,
2120 .stop = l2cap_seq_stop,
2121 .show = l2cap_seq_show
2124 static int l2cap_seq_open(struct inode *inode, struct file *file)
2126 return seq_open(file, &l2cap_seq_ops);
2129 static struct file_operations l2cap_seq_fops = {
2130 .owner = THIS_MODULE,
2131 .open = l2cap_seq_open,
2132 .read = seq_read,
2133 .llseek = seq_lseek,
2134 .release = seq_release,
2137 static int __init l2cap_proc_init(void)
2139 struct proc_dir_entry *p = create_proc_entry("l2cap", S_IRUGO, proc_bt);
2140 if (!p)
2141 return -ENOMEM;
2142 p->owner = THIS_MODULE;
2143 p->proc_fops = &l2cap_seq_fops;
2144 return 0;
2147 static void __exit l2cap_proc_cleanup(void)
2149 remove_proc_entry("l2cap", proc_bt);
2152 #else /* CONFIG_PROC_FS */
2154 static int __init l2cap_proc_init(void)
2156 return 0;
2159 static void __exit l2cap_proc_cleanup(void)
2161 return;
2163 #endif /* CONFIG_PROC_FS */
2165 static struct proto_ops l2cap_sock_ops = {
2166 .family = PF_BLUETOOTH,
2167 .owner = THIS_MODULE,
2168 .release = l2cap_sock_release,
2169 .bind = l2cap_sock_bind,
2170 .connect = l2cap_sock_connect,
2171 .listen = l2cap_sock_listen,
2172 .accept = l2cap_sock_accept,
2173 .getname = l2cap_sock_getname,
2174 .sendmsg = l2cap_sock_sendmsg,
2175 .recvmsg = bt_sock_recvmsg,
2176 .poll = bt_sock_poll,
2177 .mmap = sock_no_mmap,
2178 .socketpair = sock_no_socketpair,
2179 .ioctl = sock_no_ioctl,
2180 .shutdown = l2cap_sock_shutdown,
2181 .setsockopt = l2cap_sock_setsockopt,
2182 .getsockopt = l2cap_sock_getsockopt
2185 static struct net_proto_family l2cap_sock_family_ops = {
2186 .family = PF_BLUETOOTH,
2187 .owner = THIS_MODULE,
2188 .create = l2cap_sock_create,
2191 static struct hci_proto l2cap_hci_proto = {
2192 .name = "L2CAP",
2193 .id = HCI_PROTO_L2CAP,
2194 .connect_ind = l2cap_connect_ind,
2195 .connect_cfm = l2cap_connect_cfm,
2196 .disconn_ind = l2cap_disconn_ind,
2197 .auth_cfm = l2cap_auth_cfm,
2198 .encrypt_cfm = l2cap_encrypt_cfm,
2199 .recv_acldata = l2cap_recv_acldata
2202 static int __init l2cap_init(void)
2204 int err;
2206 if ((err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops))) {
2207 BT_ERR("L2CAP socket registration failed");
2208 return err;
2211 if ((err = hci_register_proto(&l2cap_hci_proto))) {
2212 BT_ERR("L2CAP protocol registration failed");
2213 return err;
2216 l2cap_proc_init();
2218 BT_INFO("L2CAP ver %s", VERSION);
2219 BT_INFO("L2CAP socket layer initialized");
2221 return 0;
2224 static void __exit l2cap_exit(void)
2226 l2cap_proc_cleanup();
2228 /* Unregister socket and protocol */
2229 if (bt_sock_unregister(BTPROTO_L2CAP))
2230 BT_ERR("L2CAP socket unregistration failed");
2232 if (hci_unregister_proto(&l2cap_hci_proto))
2233 BT_ERR("L2CAP protocol unregistration failed");
2236 void l2cap_load(void)
2238 /* Dummy function to trigger automatic L2CAP module loading by
2239 * other modules that use L2CAP sockets but don not use any othe
2240 * symbols from it. */
2241 return;
2243 EXPORT_SYMBOL(l2cap_load);
2245 module_init(l2cap_init);
2246 module_exit(l2cap_exit);
2248 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2249 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2250 MODULE_VERSION(VERSION);
2251 MODULE_LICENSE("GPL");
2252 MODULE_ALIAS("bt-proto-0");