exit_notify: kill the wrong capable(CAP_KILL) check
[linux-2.6/mini2440.git] / net / bluetooth / l2cap.c
blobca4d3b40d5cea5ac310dafe77177499eab6e554a
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.13"
55 static u32 l2cap_feat_mask = 0x0080;
56 static u8 l2cap_fixed_chan[8] = { 0x02, };
58 static const struct proto_ops l2cap_sock_ops;
60 static struct bt_sock_list l2cap_sk_list = {
61 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
64 static void __l2cap_sock_close(struct sock *sk, int reason);
65 static void l2cap_sock_close(struct sock *sk);
66 static void l2cap_sock_kill(struct sock *sk);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
71 /* ---- L2CAP timers ---- */
72 static void l2cap_sock_timeout(unsigned long arg)
74 struct sock *sk = (struct sock *) arg;
75 int reason;
77 BT_DBG("sock %p state %d", sk, sk->sk_state);
79 bh_lock_sock(sk);
81 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
82 reason = ECONNREFUSED;
83 else if (sk->sk_state == BT_CONNECT &&
84 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
85 reason = ECONNREFUSED;
86 else
87 reason = ETIMEDOUT;
89 __l2cap_sock_close(sk, reason);
91 bh_unlock_sock(sk);
93 l2cap_sock_kill(sk);
94 sock_put(sk);
97 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
99 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
100 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
103 static void l2cap_sock_clear_timer(struct sock *sk)
105 BT_DBG("sock %p state %d", sk, sk->sk_state);
106 sk_stop_timer(sk, &sk->sk_timer);
109 /* ---- L2CAP channels ---- */
110 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
112 struct sock *s;
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->dcid == cid)
115 break;
117 return s;
120 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
122 struct sock *s;
123 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
124 if (l2cap_pi(s)->scid == cid)
125 break;
127 return s;
130 /* Find channel with given SCID.
131 * Returns locked socket */
132 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 struct sock *s;
135 read_lock(&l->lock);
136 s = __l2cap_get_chan_by_scid(l, cid);
137 if (s) bh_lock_sock(s);
138 read_unlock(&l->lock);
139 return s;
142 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->ident == ident)
147 break;
149 return s;
152 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
154 struct sock *s;
155 read_lock(&l->lock);
156 s = __l2cap_get_chan_by_ident(l, ident);
157 if (s) bh_lock_sock(s);
158 read_unlock(&l->lock);
159 return s;
162 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
164 u16 cid = 0x0040;
166 for (; cid < 0xffff; cid++) {
167 if(!__l2cap_get_chan_by_scid(l, cid))
168 return cid;
171 return 0;
174 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
176 sock_hold(sk);
178 if (l->head)
179 l2cap_pi(l->head)->prev_c = sk;
181 l2cap_pi(sk)->next_c = l->head;
182 l2cap_pi(sk)->prev_c = NULL;
183 l->head = sk;
186 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
188 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
190 write_lock_bh(&l->lock);
191 if (sk == l->head)
192 l->head = next;
194 if (next)
195 l2cap_pi(next)->prev_c = prev;
196 if (prev)
197 l2cap_pi(prev)->next_c = next;
198 write_unlock_bh(&l->lock);
200 __sock_put(sk);
203 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
205 struct l2cap_chan_list *l = &conn->chan_list;
207 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
209 conn->disc_reason = 0x13;
211 l2cap_pi(sk)->conn = conn;
213 if (sk->sk_type == SOCK_SEQPACKET) {
214 /* Alloc CID for connection-oriented socket */
215 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
216 } else if (sk->sk_type == SOCK_DGRAM) {
217 /* Connectionless socket */
218 l2cap_pi(sk)->scid = 0x0002;
219 l2cap_pi(sk)->dcid = 0x0002;
220 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
221 } else {
222 /* Raw socket can send/recv signalling messages only */
223 l2cap_pi(sk)->scid = 0x0001;
224 l2cap_pi(sk)->dcid = 0x0001;
225 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
228 __l2cap_chan_link(l, sk);
230 if (parent)
231 bt_accept_enqueue(parent, sk);
234 /* Delete channel.
235 * Must be called on the locked socket. */
236 static void l2cap_chan_del(struct sock *sk, int err)
238 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
239 struct sock *parent = bt_sk(sk)->parent;
241 l2cap_sock_clear_timer(sk);
243 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
245 if (conn) {
246 /* Unlink from channel list */
247 l2cap_chan_unlink(&conn->chan_list, sk);
248 l2cap_pi(sk)->conn = NULL;
249 hci_conn_put(conn->hcon);
252 sk->sk_state = BT_CLOSED;
253 sock_set_flag(sk, SOCK_ZAPPED);
255 if (err)
256 sk->sk_err = err;
258 if (parent) {
259 bt_accept_unlink(sk);
260 parent->sk_data_ready(parent, 0);
261 } else
262 sk->sk_state_change(sk);
265 /* Service level security */
266 static inline int l2cap_check_security(struct sock *sk)
268 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
269 __u8 auth_type;
271 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
272 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
273 auth_type = HCI_AT_NO_BONDING_MITM;
274 else
275 auth_type = HCI_AT_NO_BONDING;
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
278 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
279 } else {
280 switch (l2cap_pi(sk)->sec_level) {
281 case BT_SECURITY_HIGH:
282 auth_type = HCI_AT_GENERAL_BONDING_MITM;
283 break;
284 case BT_SECURITY_MEDIUM:
285 auth_type = HCI_AT_GENERAL_BONDING;
286 break;
287 default:
288 auth_type = HCI_AT_NO_BONDING;
289 break;
293 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
294 auth_type);
297 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
299 u8 id;
301 /* Get next available identificator.
302 * 1 - 128 are used by kernel.
303 * 129 - 199 are reserved.
304 * 200 - 254 are used by utilities like l2ping, etc.
307 spin_lock_bh(&conn->lock);
309 if (++conn->tx_ident > 128)
310 conn->tx_ident = 1;
312 id = conn->tx_ident;
314 spin_unlock_bh(&conn->lock);
316 return id;
319 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
321 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
323 BT_DBG("code 0x%2.2x", code);
325 if (!skb)
326 return -ENOMEM;
328 return hci_send_acl(conn->hcon, skb, 0);
331 static void l2cap_do_start(struct sock *sk)
333 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
335 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
336 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
337 return;
339 if (l2cap_check_security(sk)) {
340 struct l2cap_conn_req req;
341 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
342 req.psm = l2cap_pi(sk)->psm;
344 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
346 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
347 L2CAP_CONN_REQ, sizeof(req), &req);
349 } else {
350 struct l2cap_info_req req;
351 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
353 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
354 conn->info_ident = l2cap_get_ident(conn);
356 mod_timer(&conn->info_timer, jiffies +
357 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
359 l2cap_send_cmd(conn, conn->info_ident,
360 L2CAP_INFO_REQ, sizeof(req), &req);
364 /* ---- L2CAP connections ---- */
365 static void l2cap_conn_start(struct l2cap_conn *conn)
367 struct l2cap_chan_list *l = &conn->chan_list;
368 struct sock *sk;
370 BT_DBG("conn %p", conn);
372 read_lock(&l->lock);
374 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
375 bh_lock_sock(sk);
377 if (sk->sk_type != SOCK_SEQPACKET) {
378 bh_unlock_sock(sk);
379 continue;
382 if (sk->sk_state == BT_CONNECT) {
383 if (l2cap_check_security(sk)) {
384 struct l2cap_conn_req req;
385 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
386 req.psm = l2cap_pi(sk)->psm;
388 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
390 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
391 L2CAP_CONN_REQ, sizeof(req), &req);
393 } else if (sk->sk_state == BT_CONNECT2) {
394 struct l2cap_conn_rsp rsp;
395 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
396 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
398 if (l2cap_check_security(sk)) {
399 if (bt_sk(sk)->defer_setup) {
400 struct sock *parent = bt_sk(sk)->parent;
401 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
402 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
403 parent->sk_data_ready(parent, 0);
405 } else {
406 sk->sk_state = BT_CONFIG;
407 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
408 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
410 } else {
411 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
412 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
415 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
416 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
419 bh_unlock_sock(sk);
422 read_unlock(&l->lock);
425 static void l2cap_conn_ready(struct l2cap_conn *conn)
427 struct l2cap_chan_list *l = &conn->chan_list;
428 struct sock *sk;
430 BT_DBG("conn %p", conn);
432 read_lock(&l->lock);
434 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
435 bh_lock_sock(sk);
437 if (sk->sk_type != SOCK_SEQPACKET) {
438 l2cap_sock_clear_timer(sk);
439 sk->sk_state = BT_CONNECTED;
440 sk->sk_state_change(sk);
441 } else if (sk->sk_state == BT_CONNECT)
442 l2cap_do_start(sk);
444 bh_unlock_sock(sk);
447 read_unlock(&l->lock);
450 /* Notify sockets that we cannot guaranty reliability anymore */
451 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
453 struct l2cap_chan_list *l = &conn->chan_list;
454 struct sock *sk;
456 BT_DBG("conn %p", conn);
458 read_lock(&l->lock);
460 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
461 if (l2cap_pi(sk)->force_reliable)
462 sk->sk_err = err;
465 read_unlock(&l->lock);
468 static void l2cap_info_timeout(unsigned long arg)
470 struct l2cap_conn *conn = (void *) arg;
472 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
473 conn->info_ident = 0;
475 l2cap_conn_start(conn);
478 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
480 struct l2cap_conn *conn = hcon->l2cap_data;
482 if (conn || status)
483 return conn;
485 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
486 if (!conn)
487 return NULL;
489 hcon->l2cap_data = conn;
490 conn->hcon = hcon;
492 BT_DBG("hcon %p conn %p", hcon, conn);
494 conn->mtu = hcon->hdev->acl_mtu;
495 conn->src = &hcon->hdev->bdaddr;
496 conn->dst = &hcon->dst;
498 conn->feat_mask = 0;
500 setup_timer(&conn->info_timer, l2cap_info_timeout,
501 (unsigned long) conn);
503 spin_lock_init(&conn->lock);
504 rwlock_init(&conn->chan_list.lock);
506 conn->disc_reason = 0x13;
508 return conn;
511 static void l2cap_conn_del(struct hci_conn *hcon, int err)
513 struct l2cap_conn *conn = hcon->l2cap_data;
514 struct sock *sk;
516 if (!conn)
517 return;
519 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
521 kfree_skb(conn->rx_skb);
523 /* Kill channels */
524 while ((sk = conn->chan_list.head)) {
525 bh_lock_sock(sk);
526 l2cap_chan_del(sk, err);
527 bh_unlock_sock(sk);
528 l2cap_sock_kill(sk);
531 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
532 del_timer_sync(&conn->info_timer);
534 hcon->l2cap_data = NULL;
535 kfree(conn);
538 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
540 struct l2cap_chan_list *l = &conn->chan_list;
541 write_lock_bh(&l->lock);
542 __l2cap_chan_add(conn, sk, parent);
543 write_unlock_bh(&l->lock);
546 /* ---- Socket interface ---- */
547 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
549 struct sock *sk;
550 struct hlist_node *node;
551 sk_for_each(sk, node, &l2cap_sk_list.head)
552 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
553 goto found;
554 sk = NULL;
555 found:
556 return sk;
559 /* Find socket with psm and source bdaddr.
560 * Returns closest match.
562 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
564 struct sock *sk = NULL, *sk1 = NULL;
565 struct hlist_node *node;
567 sk_for_each(sk, node, &l2cap_sk_list.head) {
568 if (state && sk->sk_state != state)
569 continue;
571 if (l2cap_pi(sk)->psm == psm) {
572 /* Exact match. */
573 if (!bacmp(&bt_sk(sk)->src, src))
574 break;
576 /* Closest match */
577 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
578 sk1 = sk;
581 return node ? sk : sk1;
584 /* Find socket with given address (psm, src).
585 * Returns locked socket */
586 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
588 struct sock *s;
589 read_lock(&l2cap_sk_list.lock);
590 s = __l2cap_get_sock_by_psm(state, psm, src);
591 if (s) bh_lock_sock(s);
592 read_unlock(&l2cap_sk_list.lock);
593 return s;
596 static void l2cap_sock_destruct(struct sock *sk)
598 BT_DBG("sk %p", sk);
600 skb_queue_purge(&sk->sk_receive_queue);
601 skb_queue_purge(&sk->sk_write_queue);
604 static void l2cap_sock_cleanup_listen(struct sock *parent)
606 struct sock *sk;
608 BT_DBG("parent %p", parent);
610 /* Close not yet accepted channels */
611 while ((sk = bt_accept_dequeue(parent, NULL)))
612 l2cap_sock_close(sk);
614 parent->sk_state = BT_CLOSED;
615 sock_set_flag(parent, SOCK_ZAPPED);
618 /* Kill socket (only if zapped and orphan)
619 * Must be called on unlocked socket.
621 static void l2cap_sock_kill(struct sock *sk)
623 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
624 return;
626 BT_DBG("sk %p state %d", sk, sk->sk_state);
628 /* Kill poor orphan */
629 bt_sock_unlink(&l2cap_sk_list, sk);
630 sock_set_flag(sk, SOCK_DEAD);
631 sock_put(sk);
634 static void __l2cap_sock_close(struct sock *sk, int reason)
636 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
638 switch (sk->sk_state) {
639 case BT_LISTEN:
640 l2cap_sock_cleanup_listen(sk);
641 break;
643 case BT_CONNECTED:
644 case BT_CONFIG:
645 if (sk->sk_type == SOCK_SEQPACKET) {
646 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
647 struct l2cap_disconn_req req;
649 sk->sk_state = BT_DISCONN;
650 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
652 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
653 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
654 l2cap_send_cmd(conn, l2cap_get_ident(conn),
655 L2CAP_DISCONN_REQ, sizeof(req), &req);
656 } else
657 l2cap_chan_del(sk, reason);
658 break;
660 case BT_CONNECT2:
661 if (sk->sk_type == SOCK_SEQPACKET) {
662 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
663 struct l2cap_conn_rsp rsp;
664 __u16 result;
666 if (bt_sk(sk)->defer_setup)
667 result = L2CAP_CR_SEC_BLOCK;
668 else
669 result = L2CAP_CR_BAD_PSM;
671 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
672 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
673 rsp.result = cpu_to_le16(result);
674 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
675 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
676 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
677 } else
678 l2cap_chan_del(sk, reason);
679 break;
681 case BT_CONNECT:
682 case BT_DISCONN:
683 l2cap_chan_del(sk, reason);
684 break;
686 default:
687 sock_set_flag(sk, SOCK_ZAPPED);
688 break;
692 /* Must be called on unlocked socket. */
693 static void l2cap_sock_close(struct sock *sk)
695 l2cap_sock_clear_timer(sk);
696 lock_sock(sk);
697 __l2cap_sock_close(sk, ECONNRESET);
698 release_sock(sk);
699 l2cap_sock_kill(sk);
702 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
704 struct l2cap_pinfo *pi = l2cap_pi(sk);
706 BT_DBG("sk %p", sk);
708 if (parent) {
709 sk->sk_type = parent->sk_type;
710 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
712 pi->imtu = l2cap_pi(parent)->imtu;
713 pi->omtu = l2cap_pi(parent)->omtu;
714 pi->sec_level = l2cap_pi(parent)->sec_level;
715 pi->role_switch = l2cap_pi(parent)->role_switch;
716 pi->force_reliable = l2cap_pi(parent)->force_reliable;
717 } else {
718 pi->imtu = L2CAP_DEFAULT_MTU;
719 pi->omtu = 0;
720 pi->sec_level = BT_SECURITY_LOW;
721 pi->role_switch = 0;
722 pi->force_reliable = 0;
725 /* Default config options */
726 pi->conf_len = 0;
727 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
730 static struct proto l2cap_proto = {
731 .name = "L2CAP",
732 .owner = THIS_MODULE,
733 .obj_size = sizeof(struct l2cap_pinfo)
736 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
738 struct sock *sk;
740 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
741 if (!sk)
742 return NULL;
744 sock_init_data(sock, sk);
745 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
747 sk->sk_destruct = l2cap_sock_destruct;
748 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
750 sock_reset_flag(sk, SOCK_ZAPPED);
752 sk->sk_protocol = proto;
753 sk->sk_state = BT_OPEN;
755 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
757 bt_sock_link(&l2cap_sk_list, sk);
758 return sk;
761 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
763 struct sock *sk;
765 BT_DBG("sock %p", sock);
767 sock->state = SS_UNCONNECTED;
769 if (sock->type != SOCK_SEQPACKET &&
770 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
771 return -ESOCKTNOSUPPORT;
773 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
774 return -EPERM;
776 sock->ops = &l2cap_sock_ops;
778 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
779 if (!sk)
780 return -ENOMEM;
782 l2cap_sock_init(sk, NULL);
783 return 0;
786 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
788 struct sock *sk = sock->sk;
789 struct sockaddr_l2 la;
790 int len, err = 0;
792 BT_DBG("sk %p", sk);
794 if (!addr || addr->sa_family != AF_BLUETOOTH)
795 return -EINVAL;
797 memset(&la, 0, sizeof(la));
798 len = min_t(unsigned int, sizeof(la), alen);
799 memcpy(&la, addr, len);
801 if (la.l2_cid)
802 return -EINVAL;
804 lock_sock(sk);
806 if (sk->sk_state != BT_OPEN) {
807 err = -EBADFD;
808 goto done;
811 if (la.l2_psm && btohs(la.l2_psm) < 0x1001 &&
812 !capable(CAP_NET_BIND_SERVICE)) {
813 err = -EACCES;
814 goto done;
817 write_lock_bh(&l2cap_sk_list.lock);
819 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
820 err = -EADDRINUSE;
821 } else {
822 /* Save source address */
823 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
824 l2cap_pi(sk)->psm = la.l2_psm;
825 l2cap_pi(sk)->sport = la.l2_psm;
826 sk->sk_state = BT_BOUND;
828 if (btohs(la.l2_psm) == 0x0001 || btohs(la.l2_psm) == 0x0003)
829 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
832 write_unlock_bh(&l2cap_sk_list.lock);
834 done:
835 release_sock(sk);
836 return err;
839 static int l2cap_do_connect(struct sock *sk)
841 bdaddr_t *src = &bt_sk(sk)->src;
842 bdaddr_t *dst = &bt_sk(sk)->dst;
843 struct l2cap_conn *conn;
844 struct hci_conn *hcon;
845 struct hci_dev *hdev;
846 __u8 auth_type;
847 int err = 0;
849 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
850 l2cap_pi(sk)->psm);
852 if (!(hdev = hci_get_route(dst, src)))
853 return -EHOSTUNREACH;
855 hci_dev_lock_bh(hdev);
857 err = -ENOMEM;
859 if (sk->sk_type == SOCK_RAW) {
860 switch (l2cap_pi(sk)->sec_level) {
861 case BT_SECURITY_HIGH:
862 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
863 break;
864 case BT_SECURITY_MEDIUM:
865 auth_type = HCI_AT_DEDICATED_BONDING;
866 break;
867 default:
868 auth_type = HCI_AT_NO_BONDING;
869 break;
871 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
872 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
873 auth_type = HCI_AT_NO_BONDING_MITM;
874 else
875 auth_type = HCI_AT_NO_BONDING;
877 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
878 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
879 } else {
880 switch (l2cap_pi(sk)->sec_level) {
881 case BT_SECURITY_HIGH:
882 auth_type = HCI_AT_GENERAL_BONDING_MITM;
883 break;
884 case BT_SECURITY_MEDIUM:
885 auth_type = HCI_AT_GENERAL_BONDING;
886 break;
887 default:
888 auth_type = HCI_AT_NO_BONDING;
889 break;
893 hcon = hci_connect(hdev, ACL_LINK, dst,
894 l2cap_pi(sk)->sec_level, auth_type);
895 if (!hcon)
896 goto done;
898 conn = l2cap_conn_add(hcon, 0);
899 if (!conn) {
900 hci_conn_put(hcon);
901 goto done;
904 err = 0;
906 /* Update source addr of the socket */
907 bacpy(src, conn->src);
909 l2cap_chan_add(conn, sk, NULL);
911 sk->sk_state = BT_CONNECT;
912 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
914 if (hcon->state == BT_CONNECTED) {
915 if (sk->sk_type != SOCK_SEQPACKET) {
916 l2cap_sock_clear_timer(sk);
917 sk->sk_state = BT_CONNECTED;
918 } else
919 l2cap_do_start(sk);
922 done:
923 hci_dev_unlock_bh(hdev);
924 hci_dev_put(hdev);
925 return err;
928 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
930 struct sock *sk = sock->sk;
931 struct sockaddr_l2 la;
932 int len, err = 0;
934 BT_DBG("sk %p", sk);
936 if (!addr || addr->sa_family != AF_BLUETOOTH)
937 return -EINVAL;
939 memset(&la, 0, sizeof(la));
940 len = min_t(unsigned int, sizeof(la), alen);
941 memcpy(&la, addr, len);
943 if (la.l2_cid)
944 return -EINVAL;
946 lock_sock(sk);
948 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
949 err = -EINVAL;
950 goto done;
953 switch(sk->sk_state) {
954 case BT_CONNECT:
955 case BT_CONNECT2:
956 case BT_CONFIG:
957 /* Already connecting */
958 goto wait;
960 case BT_CONNECTED:
961 /* Already connected */
962 goto done;
964 case BT_OPEN:
965 case BT_BOUND:
966 /* Can connect */
967 break;
969 default:
970 err = -EBADFD;
971 goto done;
974 /* Set destination address and psm */
975 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
976 l2cap_pi(sk)->psm = la.l2_psm;
978 if ((err = l2cap_do_connect(sk)))
979 goto done;
981 wait:
982 err = bt_sock_wait_state(sk, BT_CONNECTED,
983 sock_sndtimeo(sk, flags & O_NONBLOCK));
984 done:
985 release_sock(sk);
986 return err;
989 static int l2cap_sock_listen(struct socket *sock, int backlog)
991 struct sock *sk = sock->sk;
992 int err = 0;
994 BT_DBG("sk %p backlog %d", sk, backlog);
996 lock_sock(sk);
998 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
999 err = -EBADFD;
1000 goto done;
1003 if (!l2cap_pi(sk)->psm) {
1004 bdaddr_t *src = &bt_sk(sk)->src;
1005 u16 psm;
1007 err = -EINVAL;
1009 write_lock_bh(&l2cap_sk_list.lock);
1011 for (psm = 0x1001; psm < 0x1100; psm += 2)
1012 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
1013 l2cap_pi(sk)->psm = htobs(psm);
1014 l2cap_pi(sk)->sport = htobs(psm);
1015 err = 0;
1016 break;
1019 write_unlock_bh(&l2cap_sk_list.lock);
1021 if (err < 0)
1022 goto done;
1025 sk->sk_max_ack_backlog = backlog;
1026 sk->sk_ack_backlog = 0;
1027 sk->sk_state = BT_LISTEN;
1029 done:
1030 release_sock(sk);
1031 return err;
1034 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1036 DECLARE_WAITQUEUE(wait, current);
1037 struct sock *sk = sock->sk, *nsk;
1038 long timeo;
1039 int err = 0;
1041 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1043 if (sk->sk_state != BT_LISTEN) {
1044 err = -EBADFD;
1045 goto done;
1048 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1050 BT_DBG("sk %p timeo %ld", sk, timeo);
1052 /* Wait for an incoming connection. (wake-one). */
1053 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1054 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1055 set_current_state(TASK_INTERRUPTIBLE);
1056 if (!timeo) {
1057 err = -EAGAIN;
1058 break;
1061 release_sock(sk);
1062 timeo = schedule_timeout(timeo);
1063 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1065 if (sk->sk_state != BT_LISTEN) {
1066 err = -EBADFD;
1067 break;
1070 if (signal_pending(current)) {
1071 err = sock_intr_errno(timeo);
1072 break;
1075 set_current_state(TASK_RUNNING);
1076 remove_wait_queue(sk->sk_sleep, &wait);
1078 if (err)
1079 goto done;
1081 newsock->state = SS_CONNECTED;
1083 BT_DBG("new socket %p", nsk);
1085 done:
1086 release_sock(sk);
1087 return err;
1090 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1092 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1093 struct sock *sk = sock->sk;
1095 BT_DBG("sock %p, sk %p", sock, sk);
1097 addr->sa_family = AF_BLUETOOTH;
1098 *len = sizeof(struct sockaddr_l2);
1100 if (peer) {
1101 la->l2_psm = l2cap_pi(sk)->psm;
1102 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1103 la->l2_cid = htobs(l2cap_pi(sk)->dcid);
1104 } else {
1105 la->l2_psm = l2cap_pi(sk)->sport;
1106 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1107 la->l2_cid = htobs(l2cap_pi(sk)->scid);
1110 return 0;
1113 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1115 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1116 struct sk_buff *skb, **frag;
1117 int err, hlen, count, sent=0;
1118 struct l2cap_hdr *lh;
1120 BT_DBG("sk %p len %d", sk, len);
1122 /* First fragment (with L2CAP header) */
1123 if (sk->sk_type == SOCK_DGRAM)
1124 hlen = L2CAP_HDR_SIZE + 2;
1125 else
1126 hlen = L2CAP_HDR_SIZE;
1128 count = min_t(unsigned int, (conn->mtu - hlen), len);
1130 skb = bt_skb_send_alloc(sk, hlen + count,
1131 msg->msg_flags & MSG_DONTWAIT, &err);
1132 if (!skb)
1133 return err;
1135 /* Create L2CAP header */
1136 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1137 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1138 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1140 if (sk->sk_type == SOCK_DGRAM)
1141 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1143 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1144 err = -EFAULT;
1145 goto fail;
1148 sent += count;
1149 len -= count;
1151 /* Continuation fragments (no L2CAP header) */
1152 frag = &skb_shinfo(skb)->frag_list;
1153 while (len) {
1154 count = min_t(unsigned int, conn->mtu, len);
1156 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1157 if (!*frag)
1158 goto fail;
1160 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1161 err = -EFAULT;
1162 goto fail;
1165 sent += count;
1166 len -= count;
1168 frag = &(*frag)->next;
1171 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1172 goto fail;
1174 return sent;
1176 fail:
1177 kfree_skb(skb);
1178 return err;
1181 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1183 struct sock *sk = sock->sk;
1184 int err = 0;
1186 BT_DBG("sock %p, sk %p", sock, sk);
1188 err = sock_error(sk);
1189 if (err)
1190 return err;
1192 if (msg->msg_flags & MSG_OOB)
1193 return -EOPNOTSUPP;
1195 /* Check outgoing MTU */
1196 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1197 return -EINVAL;
1199 lock_sock(sk);
1201 if (sk->sk_state == BT_CONNECTED)
1202 err = l2cap_do_send(sk, msg, len);
1203 else
1204 err = -ENOTCONN;
1206 release_sock(sk);
1207 return err;
1210 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1212 struct sock *sk = sock->sk;
1214 lock_sock(sk);
1216 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1217 struct l2cap_conn_rsp rsp;
1219 sk->sk_state = BT_CONFIG;
1221 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1222 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1223 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1224 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1225 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1226 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1228 release_sock(sk);
1229 return 0;
1232 release_sock(sk);
1234 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1237 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1239 struct sock *sk = sock->sk;
1240 struct l2cap_options opts;
1241 int len, err = 0;
1242 u32 opt;
1244 BT_DBG("sk %p", sk);
1246 lock_sock(sk);
1248 switch (optname) {
1249 case L2CAP_OPTIONS:
1250 opts.imtu = l2cap_pi(sk)->imtu;
1251 opts.omtu = l2cap_pi(sk)->omtu;
1252 opts.flush_to = l2cap_pi(sk)->flush_to;
1253 opts.mode = L2CAP_MODE_BASIC;
1255 len = min_t(unsigned int, sizeof(opts), optlen);
1256 if (copy_from_user((char *) &opts, optval, len)) {
1257 err = -EFAULT;
1258 break;
1261 l2cap_pi(sk)->imtu = opts.imtu;
1262 l2cap_pi(sk)->omtu = opts.omtu;
1263 break;
1265 case L2CAP_LM:
1266 if (get_user(opt, (u32 __user *) optval)) {
1267 err = -EFAULT;
1268 break;
1271 if (opt & L2CAP_LM_AUTH)
1272 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1273 if (opt & L2CAP_LM_ENCRYPT)
1274 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1275 if (opt & L2CAP_LM_SECURE)
1276 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1278 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1279 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1280 break;
1282 default:
1283 err = -ENOPROTOOPT;
1284 break;
1287 release_sock(sk);
1288 return err;
1291 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1293 struct sock *sk = sock->sk;
1294 struct bt_security sec;
1295 int len, err = 0;
1296 u32 opt;
1298 BT_DBG("sk %p", sk);
1300 if (level == SOL_L2CAP)
1301 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1303 if (level != SOL_BLUETOOTH)
1304 return -ENOPROTOOPT;
1306 lock_sock(sk);
1308 switch (optname) {
1309 case BT_SECURITY:
1310 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1311 err = -EINVAL;
1312 break;
1315 sec.level = BT_SECURITY_LOW;
1317 len = min_t(unsigned int, sizeof(sec), optlen);
1318 if (copy_from_user((char *) &sec, optval, len)) {
1319 err = -EFAULT;
1320 break;
1323 if (sec.level < BT_SECURITY_LOW ||
1324 sec.level > BT_SECURITY_HIGH) {
1325 err = -EINVAL;
1326 break;
1329 l2cap_pi(sk)->sec_level = sec.level;
1330 break;
1332 case BT_DEFER_SETUP:
1333 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1334 err = -EINVAL;
1335 break;
1338 if (get_user(opt, (u32 __user *) optval)) {
1339 err = -EFAULT;
1340 break;
1343 bt_sk(sk)->defer_setup = opt;
1344 break;
1346 default:
1347 err = -ENOPROTOOPT;
1348 break;
1351 release_sock(sk);
1352 return err;
1355 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1357 struct sock *sk = sock->sk;
1358 struct l2cap_options opts;
1359 struct l2cap_conninfo cinfo;
1360 int len, err = 0;
1361 u32 opt;
1363 BT_DBG("sk %p", sk);
1365 if (get_user(len, optlen))
1366 return -EFAULT;
1368 lock_sock(sk);
1370 switch (optname) {
1371 case L2CAP_OPTIONS:
1372 opts.imtu = l2cap_pi(sk)->imtu;
1373 opts.omtu = l2cap_pi(sk)->omtu;
1374 opts.flush_to = l2cap_pi(sk)->flush_to;
1375 opts.mode = L2CAP_MODE_BASIC;
1377 len = min_t(unsigned int, len, sizeof(opts));
1378 if (copy_to_user(optval, (char *) &opts, len))
1379 err = -EFAULT;
1381 break;
1383 case L2CAP_LM:
1384 switch (l2cap_pi(sk)->sec_level) {
1385 case BT_SECURITY_LOW:
1386 opt = L2CAP_LM_AUTH;
1387 break;
1388 case BT_SECURITY_MEDIUM:
1389 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1390 break;
1391 case BT_SECURITY_HIGH:
1392 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1393 L2CAP_LM_SECURE;
1394 break;
1395 default:
1396 opt = 0;
1397 break;
1400 if (l2cap_pi(sk)->role_switch)
1401 opt |= L2CAP_LM_MASTER;
1403 if (l2cap_pi(sk)->force_reliable)
1404 opt |= L2CAP_LM_RELIABLE;
1406 if (put_user(opt, (u32 __user *) optval))
1407 err = -EFAULT;
1408 break;
1410 case L2CAP_CONNINFO:
1411 if (sk->sk_state != BT_CONNECTED &&
1412 !(sk->sk_state == BT_CONNECT2 &&
1413 bt_sk(sk)->defer_setup)) {
1414 err = -ENOTCONN;
1415 break;
1418 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1419 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1421 len = min_t(unsigned int, len, sizeof(cinfo));
1422 if (copy_to_user(optval, (char *) &cinfo, len))
1423 err = -EFAULT;
1425 break;
1427 default:
1428 err = -ENOPROTOOPT;
1429 break;
1432 release_sock(sk);
1433 return err;
1436 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1438 struct sock *sk = sock->sk;
1439 struct bt_security sec;
1440 int len, err = 0;
1442 BT_DBG("sk %p", sk);
1444 if (level == SOL_L2CAP)
1445 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1447 if (level != SOL_BLUETOOTH)
1448 return -ENOPROTOOPT;
1450 if (get_user(len, optlen))
1451 return -EFAULT;
1453 lock_sock(sk);
1455 switch (optname) {
1456 case BT_SECURITY:
1457 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1458 err = -EINVAL;
1459 break;
1462 sec.level = l2cap_pi(sk)->sec_level;
1464 len = min_t(unsigned int, len, sizeof(sec));
1465 if (copy_to_user(optval, (char *) &sec, len))
1466 err = -EFAULT;
1468 break;
1470 case BT_DEFER_SETUP:
1471 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1472 err = -EINVAL;
1473 break;
1476 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1477 err = -EFAULT;
1479 break;
1481 default:
1482 err = -ENOPROTOOPT;
1483 break;
1486 release_sock(sk);
1487 return err;
1490 static int l2cap_sock_shutdown(struct socket *sock, int how)
1492 struct sock *sk = sock->sk;
1493 int err = 0;
1495 BT_DBG("sock %p, sk %p", sock, sk);
1497 if (!sk)
1498 return 0;
1500 lock_sock(sk);
1501 if (!sk->sk_shutdown) {
1502 sk->sk_shutdown = SHUTDOWN_MASK;
1503 l2cap_sock_clear_timer(sk);
1504 __l2cap_sock_close(sk, 0);
1506 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1507 err = bt_sock_wait_state(sk, BT_CLOSED,
1508 sk->sk_lingertime);
1510 release_sock(sk);
1511 return err;
1514 static int l2cap_sock_release(struct socket *sock)
1516 struct sock *sk = sock->sk;
1517 int err;
1519 BT_DBG("sock %p, sk %p", sock, sk);
1521 if (!sk)
1522 return 0;
1524 err = l2cap_sock_shutdown(sock, 2);
1526 sock_orphan(sk);
1527 l2cap_sock_kill(sk);
1528 return err;
1531 static void l2cap_chan_ready(struct sock *sk)
1533 struct sock *parent = bt_sk(sk)->parent;
1535 BT_DBG("sk %p, parent %p", sk, parent);
1537 l2cap_pi(sk)->conf_state = 0;
1538 l2cap_sock_clear_timer(sk);
1540 if (!parent) {
1541 /* Outgoing channel.
1542 * Wake up socket sleeping on connect.
1544 sk->sk_state = BT_CONNECTED;
1545 sk->sk_state_change(sk);
1546 } else {
1547 /* Incoming channel.
1548 * Wake up socket sleeping on accept.
1550 parent->sk_data_ready(parent, 0);
1554 /* Copy frame to all raw sockets on that connection */
1555 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1557 struct l2cap_chan_list *l = &conn->chan_list;
1558 struct sk_buff *nskb;
1559 struct sock * sk;
1561 BT_DBG("conn %p", conn);
1563 read_lock(&l->lock);
1564 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1565 if (sk->sk_type != SOCK_RAW)
1566 continue;
1568 /* Don't send frame to the socket it came from */
1569 if (skb->sk == sk)
1570 continue;
1572 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1573 continue;
1575 if (sock_queue_rcv_skb(sk, nskb))
1576 kfree_skb(nskb);
1578 read_unlock(&l->lock);
1581 /* ---- L2CAP signalling commands ---- */
1582 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1583 u8 code, u8 ident, u16 dlen, void *data)
1585 struct sk_buff *skb, **frag;
1586 struct l2cap_cmd_hdr *cmd;
1587 struct l2cap_hdr *lh;
1588 int len, count;
1590 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1592 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1593 count = min_t(unsigned int, conn->mtu, len);
1595 skb = bt_skb_alloc(count, GFP_ATOMIC);
1596 if (!skb)
1597 return NULL;
1599 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1600 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1601 lh->cid = cpu_to_le16(0x0001);
1603 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1604 cmd->code = code;
1605 cmd->ident = ident;
1606 cmd->len = cpu_to_le16(dlen);
1608 if (dlen) {
1609 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1610 memcpy(skb_put(skb, count), data, count);
1611 data += count;
1614 len -= skb->len;
1616 /* Continuation fragments (no L2CAP header) */
1617 frag = &skb_shinfo(skb)->frag_list;
1618 while (len) {
1619 count = min_t(unsigned int, conn->mtu, len);
1621 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1622 if (!*frag)
1623 goto fail;
1625 memcpy(skb_put(*frag, count), data, count);
1627 len -= count;
1628 data += count;
1630 frag = &(*frag)->next;
1633 return skb;
1635 fail:
1636 kfree_skb(skb);
1637 return NULL;
1640 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1642 struct l2cap_conf_opt *opt = *ptr;
1643 int len;
1645 len = L2CAP_CONF_OPT_SIZE + opt->len;
1646 *ptr += len;
1648 *type = opt->type;
1649 *olen = opt->len;
1651 switch (opt->len) {
1652 case 1:
1653 *val = *((u8 *) opt->val);
1654 break;
1656 case 2:
1657 *val = __le16_to_cpu(*((__le16 *) opt->val));
1658 break;
1660 case 4:
1661 *val = __le32_to_cpu(*((__le32 *) opt->val));
1662 break;
1664 default:
1665 *val = (unsigned long) opt->val;
1666 break;
1669 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1670 return len;
1673 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1675 struct l2cap_conf_opt *opt = *ptr;
1677 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1679 opt->type = type;
1680 opt->len = len;
1682 switch (len) {
1683 case 1:
1684 *((u8 *) opt->val) = val;
1685 break;
1687 case 2:
1688 *((__le16 *) opt->val) = cpu_to_le16(val);
1689 break;
1691 case 4:
1692 *((__le32 *) opt->val) = cpu_to_le32(val);
1693 break;
1695 default:
1696 memcpy(opt->val, (void *) val, len);
1697 break;
1700 *ptr += L2CAP_CONF_OPT_SIZE + len;
1703 static int l2cap_build_conf_req(struct sock *sk, void *data)
1705 struct l2cap_pinfo *pi = l2cap_pi(sk);
1706 struct l2cap_conf_req *req = data;
1707 void *ptr = req->data;
1709 BT_DBG("sk %p", sk);
1711 if (pi->imtu != L2CAP_DEFAULT_MTU)
1712 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1714 /* FIXME: Need actual value of the flush timeout */
1715 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1716 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1718 req->dcid = cpu_to_le16(pi->dcid);
1719 req->flags = cpu_to_le16(0);
1721 return ptr - data;
1724 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1726 struct l2cap_pinfo *pi = l2cap_pi(sk);
1727 struct l2cap_conf_rsp *rsp = data;
1728 void *ptr = rsp->data;
1729 void *req = pi->conf_req;
1730 int len = pi->conf_len;
1731 int type, hint, olen;
1732 unsigned long val;
1733 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1734 u16 mtu = L2CAP_DEFAULT_MTU;
1735 u16 result = L2CAP_CONF_SUCCESS;
1737 BT_DBG("sk %p", sk);
1739 while (len >= L2CAP_CONF_OPT_SIZE) {
1740 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1742 hint = type & 0x80;
1743 type &= 0x7f;
1745 switch (type) {
1746 case L2CAP_CONF_MTU:
1747 mtu = val;
1748 break;
1750 case L2CAP_CONF_FLUSH_TO:
1751 pi->flush_to = val;
1752 break;
1754 case L2CAP_CONF_QOS:
1755 break;
1757 case L2CAP_CONF_RFC:
1758 if (olen == sizeof(rfc))
1759 memcpy(&rfc, (void *) val, olen);
1760 break;
1762 default:
1763 if (hint)
1764 break;
1766 result = L2CAP_CONF_UNKNOWN;
1767 *((u8 *) ptr++) = type;
1768 break;
1772 if (result == L2CAP_CONF_SUCCESS) {
1773 /* Configure output options and let the other side know
1774 * which ones we don't like. */
1776 if (rfc.mode == L2CAP_MODE_BASIC) {
1777 if (mtu < pi->omtu)
1778 result = L2CAP_CONF_UNACCEPT;
1779 else {
1780 pi->omtu = mtu;
1781 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1784 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1785 } else {
1786 result = L2CAP_CONF_UNACCEPT;
1788 memset(&rfc, 0, sizeof(rfc));
1789 rfc.mode = L2CAP_MODE_BASIC;
1791 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1792 sizeof(rfc), (unsigned long) &rfc);
1796 rsp->scid = cpu_to_le16(pi->dcid);
1797 rsp->result = cpu_to_le16(result);
1798 rsp->flags = cpu_to_le16(0x0000);
1800 return ptr - data;
1803 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1805 struct l2cap_conf_rsp *rsp = data;
1806 void *ptr = rsp->data;
1808 BT_DBG("sk %p", sk);
1810 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1811 rsp->result = cpu_to_le16(result);
1812 rsp->flags = cpu_to_le16(flags);
1814 return ptr - data;
1817 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1819 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1821 if (rej->reason != 0x0000)
1822 return 0;
1824 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1825 cmd->ident == conn->info_ident) {
1826 del_timer(&conn->info_timer);
1828 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1829 conn->info_ident = 0;
1831 l2cap_conn_start(conn);
1834 return 0;
1837 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1839 struct l2cap_chan_list *list = &conn->chan_list;
1840 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1841 struct l2cap_conn_rsp rsp;
1842 struct sock *sk, *parent;
1843 int result, status = L2CAP_CS_NO_INFO;
1845 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1846 __le16 psm = req->psm;
1848 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1850 /* Check if we have socket listening on psm */
1851 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1852 if (!parent) {
1853 result = L2CAP_CR_BAD_PSM;
1854 goto sendresp;
1857 /* Check if the ACL is secure enough (if not SDP) */
1858 if (psm != cpu_to_le16(0x0001) &&
1859 !hci_conn_check_link_mode(conn->hcon)) {
1860 conn->disc_reason = 0x05;
1861 result = L2CAP_CR_SEC_BLOCK;
1862 goto response;
1865 result = L2CAP_CR_NO_MEM;
1867 /* Check for backlog size */
1868 if (sk_acceptq_is_full(parent)) {
1869 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1870 goto response;
1873 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1874 if (!sk)
1875 goto response;
1877 write_lock_bh(&list->lock);
1879 /* Check if we already have channel with that dcid */
1880 if (__l2cap_get_chan_by_dcid(list, scid)) {
1881 write_unlock_bh(&list->lock);
1882 sock_set_flag(sk, SOCK_ZAPPED);
1883 l2cap_sock_kill(sk);
1884 goto response;
1887 hci_conn_hold(conn->hcon);
1889 l2cap_sock_init(sk, parent);
1890 bacpy(&bt_sk(sk)->src, conn->src);
1891 bacpy(&bt_sk(sk)->dst, conn->dst);
1892 l2cap_pi(sk)->psm = psm;
1893 l2cap_pi(sk)->dcid = scid;
1895 __l2cap_chan_add(conn, sk, parent);
1896 dcid = l2cap_pi(sk)->scid;
1898 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1900 l2cap_pi(sk)->ident = cmd->ident;
1902 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
1903 if (l2cap_check_security(sk)) {
1904 if (bt_sk(sk)->defer_setup) {
1905 sk->sk_state = BT_CONNECT2;
1906 result = L2CAP_CR_PEND;
1907 status = L2CAP_CS_AUTHOR_PEND;
1908 parent->sk_data_ready(parent, 0);
1909 } else {
1910 sk->sk_state = BT_CONFIG;
1911 result = L2CAP_CR_SUCCESS;
1912 status = L2CAP_CS_NO_INFO;
1914 } else {
1915 sk->sk_state = BT_CONNECT2;
1916 result = L2CAP_CR_PEND;
1917 status = L2CAP_CS_AUTHEN_PEND;
1919 } else {
1920 sk->sk_state = BT_CONNECT2;
1921 result = L2CAP_CR_PEND;
1922 status = L2CAP_CS_NO_INFO;
1925 write_unlock_bh(&list->lock);
1927 response:
1928 bh_unlock_sock(parent);
1930 sendresp:
1931 rsp.scid = cpu_to_le16(scid);
1932 rsp.dcid = cpu_to_le16(dcid);
1933 rsp.result = cpu_to_le16(result);
1934 rsp.status = cpu_to_le16(status);
1935 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1937 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1938 struct l2cap_info_req info;
1939 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1941 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1942 conn->info_ident = l2cap_get_ident(conn);
1944 mod_timer(&conn->info_timer, jiffies +
1945 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1947 l2cap_send_cmd(conn, conn->info_ident,
1948 L2CAP_INFO_REQ, sizeof(info), &info);
1951 return 0;
1954 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1956 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1957 u16 scid, dcid, result, status;
1958 struct sock *sk;
1959 u8 req[128];
1961 scid = __le16_to_cpu(rsp->scid);
1962 dcid = __le16_to_cpu(rsp->dcid);
1963 result = __le16_to_cpu(rsp->result);
1964 status = __le16_to_cpu(rsp->status);
1966 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1968 if (scid) {
1969 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1970 return 0;
1971 } else {
1972 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1973 return 0;
1976 switch (result) {
1977 case L2CAP_CR_SUCCESS:
1978 sk->sk_state = BT_CONFIG;
1979 l2cap_pi(sk)->ident = 0;
1980 l2cap_pi(sk)->dcid = dcid;
1981 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1983 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
1985 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1986 l2cap_build_conf_req(sk, req), req);
1987 break;
1989 case L2CAP_CR_PEND:
1990 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
1991 break;
1993 default:
1994 l2cap_chan_del(sk, ECONNREFUSED);
1995 break;
1998 bh_unlock_sock(sk);
1999 return 0;
2002 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2004 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2005 u16 dcid, flags;
2006 u8 rsp[64];
2007 struct sock *sk;
2008 int len;
2010 dcid = __le16_to_cpu(req->dcid);
2011 flags = __le16_to_cpu(req->flags);
2013 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2015 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
2016 return -ENOENT;
2018 if (sk->sk_state == BT_DISCONN)
2019 goto unlock;
2021 /* Reject if config buffer is too small. */
2022 len = cmd_len - sizeof(*req);
2023 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2024 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2025 l2cap_build_conf_rsp(sk, rsp,
2026 L2CAP_CONF_REJECT, flags), rsp);
2027 goto unlock;
2030 /* Store config. */
2031 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2032 l2cap_pi(sk)->conf_len += len;
2034 if (flags & 0x0001) {
2035 /* Incomplete config. Send empty response. */
2036 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2037 l2cap_build_conf_rsp(sk, rsp,
2038 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2039 goto unlock;
2042 /* Complete config. */
2043 len = l2cap_parse_conf_req(sk, rsp);
2044 if (len < 0)
2045 goto unlock;
2047 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2049 /* Reset config buffer. */
2050 l2cap_pi(sk)->conf_len = 0;
2052 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2053 goto unlock;
2055 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2056 sk->sk_state = BT_CONNECTED;
2057 l2cap_chan_ready(sk);
2058 goto unlock;
2061 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2062 u8 buf[64];
2063 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2064 l2cap_build_conf_req(sk, buf), buf);
2067 unlock:
2068 bh_unlock_sock(sk);
2069 return 0;
2072 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2074 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2075 u16 scid, flags, result;
2076 struct sock *sk;
2078 scid = __le16_to_cpu(rsp->scid);
2079 flags = __le16_to_cpu(rsp->flags);
2080 result = __le16_to_cpu(rsp->result);
2082 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
2084 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2085 return 0;
2087 switch (result) {
2088 case L2CAP_CONF_SUCCESS:
2089 break;
2091 case L2CAP_CONF_UNACCEPT:
2092 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
2093 char req[128];
2094 /* It does not make sense to adjust L2CAP parameters
2095 * that are currently defined in the spec. We simply
2096 * resend config request that we sent earlier. It is
2097 * stupid, but it helps qualification testing which
2098 * expects at least some response from us. */
2099 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2100 l2cap_build_conf_req(sk, req), req);
2101 goto done;
2104 default:
2105 sk->sk_state = BT_DISCONN;
2106 sk->sk_err = ECONNRESET;
2107 l2cap_sock_set_timer(sk, HZ * 5);
2109 struct l2cap_disconn_req req;
2110 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
2111 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2112 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2113 L2CAP_DISCONN_REQ, sizeof(req), &req);
2115 goto done;
2118 if (flags & 0x01)
2119 goto done;
2121 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2123 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2124 sk->sk_state = BT_CONNECTED;
2125 l2cap_chan_ready(sk);
2128 done:
2129 bh_unlock_sock(sk);
2130 return 0;
2133 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2135 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2136 struct l2cap_disconn_rsp rsp;
2137 u16 dcid, scid;
2138 struct sock *sk;
2140 scid = __le16_to_cpu(req->scid);
2141 dcid = __le16_to_cpu(req->dcid);
2143 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2145 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
2146 return 0;
2148 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2149 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2150 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2152 sk->sk_shutdown = SHUTDOWN_MASK;
2154 l2cap_chan_del(sk, ECONNRESET);
2155 bh_unlock_sock(sk);
2157 l2cap_sock_kill(sk);
2158 return 0;
2161 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2163 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2164 u16 dcid, scid;
2165 struct sock *sk;
2167 scid = __le16_to_cpu(rsp->scid);
2168 dcid = __le16_to_cpu(rsp->dcid);
2170 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2172 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
2173 return 0;
2175 l2cap_chan_del(sk, 0);
2176 bh_unlock_sock(sk);
2178 l2cap_sock_kill(sk);
2179 return 0;
2182 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2184 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2185 u16 type;
2187 type = __le16_to_cpu(req->type);
2189 BT_DBG("type 0x%4.4x", type);
2191 if (type == L2CAP_IT_FEAT_MASK) {
2192 u8 buf[8];
2193 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2194 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2195 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2196 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
2197 l2cap_send_cmd(conn, cmd->ident,
2198 L2CAP_INFO_RSP, sizeof(buf), buf);
2199 } else if (type == L2CAP_IT_FIXED_CHAN) {
2200 u8 buf[12];
2201 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2202 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2203 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2204 memcpy(buf + 4, l2cap_fixed_chan, 8);
2205 l2cap_send_cmd(conn, cmd->ident,
2206 L2CAP_INFO_RSP, sizeof(buf), buf);
2207 } else {
2208 struct l2cap_info_rsp rsp;
2209 rsp.type = cpu_to_le16(type);
2210 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2211 l2cap_send_cmd(conn, cmd->ident,
2212 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2215 return 0;
2218 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2220 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2221 u16 type, result;
2223 type = __le16_to_cpu(rsp->type);
2224 result = __le16_to_cpu(rsp->result);
2226 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2228 del_timer(&conn->info_timer);
2230 if (type == L2CAP_IT_FEAT_MASK) {
2231 conn->feat_mask = get_unaligned_le32(rsp->data);
2233 if (conn->feat_mask & 0x0080) {
2234 struct l2cap_info_req req;
2235 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2237 conn->info_ident = l2cap_get_ident(conn);
2239 l2cap_send_cmd(conn, conn->info_ident,
2240 L2CAP_INFO_REQ, sizeof(req), &req);
2241 } else {
2242 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2243 conn->info_ident = 0;
2245 l2cap_conn_start(conn);
2247 } else if (type == L2CAP_IT_FIXED_CHAN) {
2248 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2249 conn->info_ident = 0;
2251 l2cap_conn_start(conn);
2254 return 0;
2257 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2259 u8 *data = skb->data;
2260 int len = skb->len;
2261 struct l2cap_cmd_hdr cmd;
2262 int err = 0;
2264 l2cap_raw_recv(conn, skb);
2266 while (len >= L2CAP_CMD_HDR_SIZE) {
2267 u16 cmd_len;
2268 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2269 data += L2CAP_CMD_HDR_SIZE;
2270 len -= L2CAP_CMD_HDR_SIZE;
2272 cmd_len = le16_to_cpu(cmd.len);
2274 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2276 if (cmd_len > len || !cmd.ident) {
2277 BT_DBG("corrupted command");
2278 break;
2281 switch (cmd.code) {
2282 case L2CAP_COMMAND_REJ:
2283 l2cap_command_rej(conn, &cmd, data);
2284 break;
2286 case L2CAP_CONN_REQ:
2287 err = l2cap_connect_req(conn, &cmd, data);
2288 break;
2290 case L2CAP_CONN_RSP:
2291 err = l2cap_connect_rsp(conn, &cmd, data);
2292 break;
2294 case L2CAP_CONF_REQ:
2295 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2296 break;
2298 case L2CAP_CONF_RSP:
2299 err = l2cap_config_rsp(conn, &cmd, data);
2300 break;
2302 case L2CAP_DISCONN_REQ:
2303 err = l2cap_disconnect_req(conn, &cmd, data);
2304 break;
2306 case L2CAP_DISCONN_RSP:
2307 err = l2cap_disconnect_rsp(conn, &cmd, data);
2308 break;
2310 case L2CAP_ECHO_REQ:
2311 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2312 break;
2314 case L2CAP_ECHO_RSP:
2315 break;
2317 case L2CAP_INFO_REQ:
2318 err = l2cap_information_req(conn, &cmd, data);
2319 break;
2321 case L2CAP_INFO_RSP:
2322 err = l2cap_information_rsp(conn, &cmd, data);
2323 break;
2325 default:
2326 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2327 err = -EINVAL;
2328 break;
2331 if (err) {
2332 struct l2cap_cmd_rej rej;
2333 BT_DBG("error %d", err);
2335 /* FIXME: Map err to a valid reason */
2336 rej.reason = cpu_to_le16(0);
2337 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2340 data += cmd_len;
2341 len -= cmd_len;
2344 kfree_skb(skb);
2347 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2349 struct sock *sk;
2351 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2352 if (!sk) {
2353 BT_DBG("unknown cid 0x%4.4x", cid);
2354 goto drop;
2357 BT_DBG("sk %p, len %d", sk, skb->len);
2359 if (sk->sk_state != BT_CONNECTED)
2360 goto drop;
2362 if (l2cap_pi(sk)->imtu < skb->len)
2363 goto drop;
2365 /* If socket recv buffers overflows we drop data here
2366 * which is *bad* because L2CAP has to be reliable.
2367 * But we don't have any other choice. L2CAP doesn't
2368 * provide flow control mechanism. */
2370 if (!sock_queue_rcv_skb(sk, skb))
2371 goto done;
2373 drop:
2374 kfree_skb(skb);
2376 done:
2377 if (sk)
2378 bh_unlock_sock(sk);
2380 return 0;
2383 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2385 struct sock *sk;
2387 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2388 if (!sk)
2389 goto drop;
2391 BT_DBG("sk %p, len %d", sk, skb->len);
2393 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2394 goto drop;
2396 if (l2cap_pi(sk)->imtu < skb->len)
2397 goto drop;
2399 if (!sock_queue_rcv_skb(sk, skb))
2400 goto done;
2402 drop:
2403 kfree_skb(skb);
2405 done:
2406 if (sk) bh_unlock_sock(sk);
2407 return 0;
2410 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2412 struct l2cap_hdr *lh = (void *) skb->data;
2413 u16 cid, len;
2414 __le16 psm;
2416 skb_pull(skb, L2CAP_HDR_SIZE);
2417 cid = __le16_to_cpu(lh->cid);
2418 len = __le16_to_cpu(lh->len);
2420 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2422 switch (cid) {
2423 case 0x0001:
2424 l2cap_sig_channel(conn, skb);
2425 break;
2427 case 0x0002:
2428 psm = get_unaligned((__le16 *) skb->data);
2429 skb_pull(skb, 2);
2430 l2cap_conless_channel(conn, psm, skb);
2431 break;
2433 default:
2434 l2cap_data_channel(conn, cid, skb);
2435 break;
2439 /* ---- L2CAP interface with lower layer (HCI) ---- */
2441 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2443 int exact = 0, lm1 = 0, lm2 = 0;
2444 register struct sock *sk;
2445 struct hlist_node *node;
2447 if (type != ACL_LINK)
2448 return 0;
2450 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2452 /* Find listening sockets and check their link_mode */
2453 read_lock(&l2cap_sk_list.lock);
2454 sk_for_each(sk, node, &l2cap_sk_list.head) {
2455 if (sk->sk_state != BT_LISTEN)
2456 continue;
2458 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2459 lm1 |= HCI_LM_ACCEPT;
2460 if (l2cap_pi(sk)->role_switch)
2461 lm1 |= HCI_LM_MASTER;
2462 exact++;
2463 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
2464 lm2 |= HCI_LM_ACCEPT;
2465 if (l2cap_pi(sk)->role_switch)
2466 lm2 |= HCI_LM_MASTER;
2469 read_unlock(&l2cap_sk_list.lock);
2471 return exact ? lm1 : lm2;
2474 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2476 struct l2cap_conn *conn;
2478 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2480 if (hcon->type != ACL_LINK)
2481 return 0;
2483 if (!status) {
2484 conn = l2cap_conn_add(hcon, status);
2485 if (conn)
2486 l2cap_conn_ready(conn);
2487 } else
2488 l2cap_conn_del(hcon, bt_err(status));
2490 return 0;
2493 static int l2cap_disconn_ind(struct hci_conn *hcon)
2495 struct l2cap_conn *conn = hcon->l2cap_data;
2497 BT_DBG("hcon %p", hcon);
2499 if (hcon->type != ACL_LINK || !conn)
2500 return 0x13;
2502 return conn->disc_reason;
2505 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
2507 BT_DBG("hcon %p reason %d", hcon, reason);
2509 if (hcon->type != ACL_LINK)
2510 return 0;
2512 l2cap_conn_del(hcon, bt_err(reason));
2514 return 0;
2517 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
2519 if (sk->sk_type != SOCK_SEQPACKET)
2520 return;
2522 if (encrypt == 0x00) {
2523 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
2524 l2cap_sock_clear_timer(sk);
2525 l2cap_sock_set_timer(sk, HZ * 5);
2526 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
2527 __l2cap_sock_close(sk, ECONNREFUSED);
2528 } else {
2529 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
2530 l2cap_sock_clear_timer(sk);
2534 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2536 struct l2cap_chan_list *l;
2537 struct l2cap_conn *conn = hcon->l2cap_data;
2538 struct sock *sk;
2540 if (!conn)
2541 return 0;
2543 l = &conn->chan_list;
2545 BT_DBG("conn %p", conn);
2547 read_lock(&l->lock);
2549 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2550 bh_lock_sock(sk);
2552 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
2553 bh_unlock_sock(sk);
2554 continue;
2557 if (!status && (sk->sk_state == BT_CONNECTED ||
2558 sk->sk_state == BT_CONFIG)) {
2559 l2cap_check_encryption(sk, encrypt);
2560 bh_unlock_sock(sk);
2561 continue;
2564 if (sk->sk_state == BT_CONNECT) {
2565 if (!status) {
2566 struct l2cap_conn_req req;
2567 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2568 req.psm = l2cap_pi(sk)->psm;
2570 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2572 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2573 L2CAP_CONN_REQ, sizeof(req), &req);
2574 } else {
2575 l2cap_sock_clear_timer(sk);
2576 l2cap_sock_set_timer(sk, HZ / 10);
2578 } else if (sk->sk_state == BT_CONNECT2) {
2579 struct l2cap_conn_rsp rsp;
2580 __u16 result;
2582 if (!status) {
2583 sk->sk_state = BT_CONFIG;
2584 result = L2CAP_CR_SUCCESS;
2585 } else {
2586 sk->sk_state = BT_DISCONN;
2587 l2cap_sock_set_timer(sk, HZ / 10);
2588 result = L2CAP_CR_SEC_BLOCK;
2591 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2592 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2593 rsp.result = cpu_to_le16(result);
2594 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2595 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2596 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2599 bh_unlock_sock(sk);
2602 read_unlock(&l->lock);
2604 return 0;
2607 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2609 struct l2cap_conn *conn = hcon->l2cap_data;
2611 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2612 goto drop;
2614 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2616 if (flags & ACL_START) {
2617 struct l2cap_hdr *hdr;
2618 int len;
2620 if (conn->rx_len) {
2621 BT_ERR("Unexpected start frame (len %d)", skb->len);
2622 kfree_skb(conn->rx_skb);
2623 conn->rx_skb = NULL;
2624 conn->rx_len = 0;
2625 l2cap_conn_unreliable(conn, ECOMM);
2628 if (skb->len < 2) {
2629 BT_ERR("Frame is too short (len %d)", skb->len);
2630 l2cap_conn_unreliable(conn, ECOMM);
2631 goto drop;
2634 hdr = (struct l2cap_hdr *) skb->data;
2635 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2637 if (len == skb->len) {
2638 /* Complete frame received */
2639 l2cap_recv_frame(conn, skb);
2640 return 0;
2643 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2645 if (skb->len > len) {
2646 BT_ERR("Frame is too long (len %d, expected len %d)",
2647 skb->len, len);
2648 l2cap_conn_unreliable(conn, ECOMM);
2649 goto drop;
2652 /* Allocate skb for the complete frame (with header) */
2653 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2654 goto drop;
2656 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2657 skb->len);
2658 conn->rx_len = len - skb->len;
2659 } else {
2660 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2662 if (!conn->rx_len) {
2663 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2664 l2cap_conn_unreliable(conn, ECOMM);
2665 goto drop;
2668 if (skb->len > conn->rx_len) {
2669 BT_ERR("Fragment is too long (len %d, expected %d)",
2670 skb->len, conn->rx_len);
2671 kfree_skb(conn->rx_skb);
2672 conn->rx_skb = NULL;
2673 conn->rx_len = 0;
2674 l2cap_conn_unreliable(conn, ECOMM);
2675 goto drop;
2678 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2679 skb->len);
2680 conn->rx_len -= skb->len;
2682 if (!conn->rx_len) {
2683 /* Complete frame received */
2684 l2cap_recv_frame(conn, conn->rx_skb);
2685 conn->rx_skb = NULL;
2689 drop:
2690 kfree_skb(skb);
2691 return 0;
2694 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2696 struct sock *sk;
2697 struct hlist_node *node;
2698 char *str = buf;
2700 read_lock_bh(&l2cap_sk_list.lock);
2702 sk_for_each(sk, node, &l2cap_sk_list.head) {
2703 struct l2cap_pinfo *pi = l2cap_pi(sk);
2705 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
2706 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2707 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2708 pi->imtu, pi->omtu, pi->sec_level);
2711 read_unlock_bh(&l2cap_sk_list.lock);
2713 return (str - buf);
2716 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2718 static const struct proto_ops l2cap_sock_ops = {
2719 .family = PF_BLUETOOTH,
2720 .owner = THIS_MODULE,
2721 .release = l2cap_sock_release,
2722 .bind = l2cap_sock_bind,
2723 .connect = l2cap_sock_connect,
2724 .listen = l2cap_sock_listen,
2725 .accept = l2cap_sock_accept,
2726 .getname = l2cap_sock_getname,
2727 .sendmsg = l2cap_sock_sendmsg,
2728 .recvmsg = l2cap_sock_recvmsg,
2729 .poll = bt_sock_poll,
2730 .ioctl = bt_sock_ioctl,
2731 .mmap = sock_no_mmap,
2732 .socketpair = sock_no_socketpair,
2733 .shutdown = l2cap_sock_shutdown,
2734 .setsockopt = l2cap_sock_setsockopt,
2735 .getsockopt = l2cap_sock_getsockopt
2738 static struct net_proto_family l2cap_sock_family_ops = {
2739 .family = PF_BLUETOOTH,
2740 .owner = THIS_MODULE,
2741 .create = l2cap_sock_create,
2744 static struct hci_proto l2cap_hci_proto = {
2745 .name = "L2CAP",
2746 .id = HCI_PROTO_L2CAP,
2747 .connect_ind = l2cap_connect_ind,
2748 .connect_cfm = l2cap_connect_cfm,
2749 .disconn_ind = l2cap_disconn_ind,
2750 .disconn_cfm = l2cap_disconn_cfm,
2751 .security_cfm = l2cap_security_cfm,
2752 .recv_acldata = l2cap_recv_acldata
2755 static int __init l2cap_init(void)
2757 int err;
2759 err = proto_register(&l2cap_proto, 0);
2760 if (err < 0)
2761 return err;
2763 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2764 if (err < 0) {
2765 BT_ERR("L2CAP socket registration failed");
2766 goto error;
2769 err = hci_register_proto(&l2cap_hci_proto);
2770 if (err < 0) {
2771 BT_ERR("L2CAP protocol registration failed");
2772 bt_sock_unregister(BTPROTO_L2CAP);
2773 goto error;
2776 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2777 BT_ERR("Failed to create L2CAP info file");
2779 BT_INFO("L2CAP ver %s", VERSION);
2780 BT_INFO("L2CAP socket layer initialized");
2782 return 0;
2784 error:
2785 proto_unregister(&l2cap_proto);
2786 return err;
2789 static void __exit l2cap_exit(void)
2791 class_remove_file(bt_class, &class_attr_l2cap);
2793 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2794 BT_ERR("L2CAP socket unregistration failed");
2796 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2797 BT_ERR("L2CAP protocol unregistration failed");
2799 proto_unregister(&l2cap_proto);
2802 void l2cap_load(void)
2804 /* Dummy function to trigger automatic L2CAP module loading by
2805 * other modules that use L2CAP sockets but don't use any other
2806 * symbols from it. */
2807 return;
2809 EXPORT_SYMBOL(l2cap_load);
2811 module_init(l2cap_init);
2812 module_exit(l2cap_exit);
2814 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2815 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2816 MODULE_VERSION(VERSION);
2817 MODULE_LICENSE("GPL");
2818 MODULE_ALIAS("bt-proto-0");