hd: stop defining MAJOR_NR
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blobbd0a4c1bced048c97e61c106cc45b7c9dba8fac1
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <net/sock.h>
46 #include <asm/system.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.13"
55 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
56 static u8 l2cap_fixed_chan[8] = { 0x02, };
58 static const struct proto_ops l2cap_sock_ops;
60 static struct bt_sock_list l2cap_sk_list = {
61 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
64 static void __l2cap_sock_close(struct sock *sk, int reason);
65 static void l2cap_sock_close(struct sock *sk);
66 static void l2cap_sock_kill(struct sock *sk);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
71 /* ---- L2CAP timers ---- */
72 static void l2cap_sock_timeout(unsigned long arg)
74 struct sock *sk = (struct sock *) arg;
75 int reason;
77 BT_DBG("sock %p state %d", sk, sk->sk_state);
79 bh_lock_sock(sk);
81 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
82 reason = ECONNREFUSED;
83 else if (sk->sk_state == BT_CONNECT &&
84 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
85 reason = ECONNREFUSED;
86 else
87 reason = ETIMEDOUT;
89 __l2cap_sock_close(sk, reason);
91 bh_unlock_sock(sk);
93 l2cap_sock_kill(sk);
94 sock_put(sk);
97 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
99 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
100 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
103 static void l2cap_sock_clear_timer(struct sock *sk)
105 BT_DBG("sock %p state %d", sk, sk->sk_state);
106 sk_stop_timer(sk, &sk->sk_timer);
109 /* ---- L2CAP channels ---- */
110 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
112 struct sock *s;
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->dcid == cid)
115 break;
117 return s;
120 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
122 struct sock *s;
123 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
124 if (l2cap_pi(s)->scid == cid)
125 break;
127 return s;
130 /* Find channel with given SCID.
131 * Returns locked socket */
132 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 struct sock *s;
135 read_lock(&l->lock);
136 s = __l2cap_get_chan_by_scid(l, cid);
137 if (s)
138 bh_lock_sock(s);
139 read_unlock(&l->lock);
140 return s;
143 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
145 struct sock *s;
146 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
147 if (l2cap_pi(s)->ident == ident)
148 break;
150 return s;
153 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 struct sock *s;
156 read_lock(&l->lock);
157 s = __l2cap_get_chan_by_ident(l, ident);
158 if (s)
159 bh_lock_sock(s);
160 read_unlock(&l->lock);
161 return s;
164 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
166 u16 cid = L2CAP_CID_DYN_START;
168 for (; cid < L2CAP_CID_DYN_END; cid++) {
169 if (!__l2cap_get_chan_by_scid(l, cid))
170 return cid;
173 return 0;
176 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
178 sock_hold(sk);
180 if (l->head)
181 l2cap_pi(l->head)->prev_c = sk;
183 l2cap_pi(sk)->next_c = l->head;
184 l2cap_pi(sk)->prev_c = NULL;
185 l->head = sk;
188 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
190 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
192 write_lock_bh(&l->lock);
193 if (sk == l->head)
194 l->head = next;
196 if (next)
197 l2cap_pi(next)->prev_c = prev;
198 if (prev)
199 l2cap_pi(prev)->next_c = next;
200 write_unlock_bh(&l->lock);
202 __sock_put(sk);
205 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
207 struct l2cap_chan_list *l = &conn->chan_list;
209 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
210 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
212 conn->disc_reason = 0x13;
214 l2cap_pi(sk)->conn = conn;
216 if (sk->sk_type == SOCK_SEQPACKET) {
217 /* Alloc CID for connection-oriented socket */
218 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
219 } else if (sk->sk_type == SOCK_DGRAM) {
220 /* Connectionless socket */
221 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
222 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
223 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
224 } else {
225 /* Raw socket can send/recv signalling messages only */
226 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
227 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
228 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
231 __l2cap_chan_link(l, sk);
233 if (parent)
234 bt_accept_enqueue(parent, sk);
237 /* Delete channel.
238 * Must be called on the locked socket. */
239 static void l2cap_chan_del(struct sock *sk, int err)
241 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
242 struct sock *parent = bt_sk(sk)->parent;
244 l2cap_sock_clear_timer(sk);
246 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
248 if (conn) {
249 /* Unlink from channel list */
250 l2cap_chan_unlink(&conn->chan_list, sk);
251 l2cap_pi(sk)->conn = NULL;
252 hci_conn_put(conn->hcon);
255 sk->sk_state = BT_CLOSED;
256 sock_set_flag(sk, SOCK_ZAPPED);
258 if (err)
259 sk->sk_err = err;
261 if (parent) {
262 bt_accept_unlink(sk);
263 parent->sk_data_ready(parent, 0);
264 } else
265 sk->sk_state_change(sk);
268 /* Service level security */
269 static inline int l2cap_check_security(struct sock *sk)
271 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
272 __u8 auth_type;
274 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
275 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
276 auth_type = HCI_AT_NO_BONDING_MITM;
277 else
278 auth_type = HCI_AT_NO_BONDING;
280 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
281 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
282 } else {
283 switch (l2cap_pi(sk)->sec_level) {
284 case BT_SECURITY_HIGH:
285 auth_type = HCI_AT_GENERAL_BONDING_MITM;
286 break;
287 case BT_SECURITY_MEDIUM:
288 auth_type = HCI_AT_GENERAL_BONDING;
289 break;
290 default:
291 auth_type = HCI_AT_NO_BONDING;
292 break;
296 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
297 auth_type);
300 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
302 u8 id;
304 /* Get next available identificator.
305 * 1 - 128 are used by kernel.
306 * 129 - 199 are reserved.
307 * 200 - 254 are used by utilities like l2ping, etc.
310 spin_lock_bh(&conn->lock);
312 if (++conn->tx_ident > 128)
313 conn->tx_ident = 1;
315 id = conn->tx_ident;
317 spin_unlock_bh(&conn->lock);
319 return id;
322 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
324 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
326 BT_DBG("code 0x%2.2x", code);
328 if (!skb)
329 return -ENOMEM;
331 return hci_send_acl(conn->hcon, skb, 0);
334 static void l2cap_do_start(struct sock *sk)
336 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
338 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
339 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
340 return;
342 if (l2cap_check_security(sk)) {
343 struct l2cap_conn_req req;
344 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
345 req.psm = l2cap_pi(sk)->psm;
347 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
349 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
350 L2CAP_CONN_REQ, sizeof(req), &req);
352 } else {
353 struct l2cap_info_req req;
354 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
356 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
357 conn->info_ident = l2cap_get_ident(conn);
359 mod_timer(&conn->info_timer, jiffies +
360 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
362 l2cap_send_cmd(conn, conn->info_ident,
363 L2CAP_INFO_REQ, sizeof(req), &req);
367 /* ---- L2CAP connections ---- */
368 static void l2cap_conn_start(struct l2cap_conn *conn)
370 struct l2cap_chan_list *l = &conn->chan_list;
371 struct sock *sk;
373 BT_DBG("conn %p", conn);
375 read_lock(&l->lock);
377 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
378 bh_lock_sock(sk);
380 if (sk->sk_type != SOCK_SEQPACKET) {
381 bh_unlock_sock(sk);
382 continue;
385 if (sk->sk_state == BT_CONNECT) {
386 if (l2cap_check_security(sk)) {
387 struct l2cap_conn_req req;
388 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
389 req.psm = l2cap_pi(sk)->psm;
391 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
393 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
394 L2CAP_CONN_REQ, sizeof(req), &req);
396 } else if (sk->sk_state == BT_CONNECT2) {
397 struct l2cap_conn_rsp rsp;
398 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
399 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
401 if (l2cap_check_security(sk)) {
402 if (bt_sk(sk)->defer_setup) {
403 struct sock *parent = bt_sk(sk)->parent;
404 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
405 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
406 parent->sk_data_ready(parent, 0);
408 } else {
409 sk->sk_state = BT_CONFIG;
410 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
411 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
413 } else {
414 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
415 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
418 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
419 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
422 bh_unlock_sock(sk);
425 read_unlock(&l->lock);
428 static void l2cap_conn_ready(struct l2cap_conn *conn)
430 struct l2cap_chan_list *l = &conn->chan_list;
431 struct sock *sk;
433 BT_DBG("conn %p", conn);
435 read_lock(&l->lock);
437 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
438 bh_lock_sock(sk);
440 if (sk->sk_type != SOCK_SEQPACKET) {
441 l2cap_sock_clear_timer(sk);
442 sk->sk_state = BT_CONNECTED;
443 sk->sk_state_change(sk);
444 } else if (sk->sk_state == BT_CONNECT)
445 l2cap_do_start(sk);
447 bh_unlock_sock(sk);
450 read_unlock(&l->lock);
453 /* Notify sockets that we cannot guaranty reliability anymore */
454 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
456 struct l2cap_chan_list *l = &conn->chan_list;
457 struct sock *sk;
459 BT_DBG("conn %p", conn);
461 read_lock(&l->lock);
463 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
464 if (l2cap_pi(sk)->force_reliable)
465 sk->sk_err = err;
468 read_unlock(&l->lock);
471 static void l2cap_info_timeout(unsigned long arg)
473 struct l2cap_conn *conn = (void *) arg;
475 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
476 conn->info_ident = 0;
478 l2cap_conn_start(conn);
481 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
483 struct l2cap_conn *conn = hcon->l2cap_data;
485 if (conn || status)
486 return conn;
488 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
489 if (!conn)
490 return NULL;
492 hcon->l2cap_data = conn;
493 conn->hcon = hcon;
495 BT_DBG("hcon %p conn %p", hcon, conn);
497 conn->mtu = hcon->hdev->acl_mtu;
498 conn->src = &hcon->hdev->bdaddr;
499 conn->dst = &hcon->dst;
501 conn->feat_mask = 0;
503 setup_timer(&conn->info_timer, l2cap_info_timeout,
504 (unsigned long) conn);
506 spin_lock_init(&conn->lock);
507 rwlock_init(&conn->chan_list.lock);
509 conn->disc_reason = 0x13;
511 return conn;
514 static void l2cap_conn_del(struct hci_conn *hcon, int err)
516 struct l2cap_conn *conn = hcon->l2cap_data;
517 struct sock *sk;
519 if (!conn)
520 return;
522 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
524 kfree_skb(conn->rx_skb);
526 /* Kill channels */
527 while ((sk = conn->chan_list.head)) {
528 bh_lock_sock(sk);
529 l2cap_chan_del(sk, err);
530 bh_unlock_sock(sk);
531 l2cap_sock_kill(sk);
534 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
535 del_timer_sync(&conn->info_timer);
537 hcon->l2cap_data = NULL;
538 kfree(conn);
541 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
543 struct l2cap_chan_list *l = &conn->chan_list;
544 write_lock_bh(&l->lock);
545 __l2cap_chan_add(conn, sk, parent);
546 write_unlock_bh(&l->lock);
549 /* ---- Socket interface ---- */
550 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
552 struct sock *sk;
553 struct hlist_node *node;
554 sk_for_each(sk, node, &l2cap_sk_list.head)
555 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
556 goto found;
557 sk = NULL;
558 found:
559 return sk;
562 /* Find socket with psm and source bdaddr.
563 * Returns closest match.
565 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
567 struct sock *sk = NULL, *sk1 = NULL;
568 struct hlist_node *node;
570 sk_for_each(sk, node, &l2cap_sk_list.head) {
571 if (state && sk->sk_state != state)
572 continue;
574 if (l2cap_pi(sk)->psm == psm) {
575 /* Exact match. */
576 if (!bacmp(&bt_sk(sk)->src, src))
577 break;
579 /* Closest match */
580 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
581 sk1 = sk;
584 return node ? sk : sk1;
587 /* Find socket with given address (psm, src).
588 * Returns locked socket */
589 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
591 struct sock *s;
592 read_lock(&l2cap_sk_list.lock);
593 s = __l2cap_get_sock_by_psm(state, psm, src);
594 if (s)
595 bh_lock_sock(s);
596 read_unlock(&l2cap_sk_list.lock);
597 return s;
600 static void l2cap_sock_destruct(struct sock *sk)
602 BT_DBG("sk %p", sk);
604 skb_queue_purge(&sk->sk_receive_queue);
605 skb_queue_purge(&sk->sk_write_queue);
608 static void l2cap_sock_cleanup_listen(struct sock *parent)
610 struct sock *sk;
612 BT_DBG("parent %p", parent);
614 /* Close not yet accepted channels */
615 while ((sk = bt_accept_dequeue(parent, NULL)))
616 l2cap_sock_close(sk);
618 parent->sk_state = BT_CLOSED;
619 sock_set_flag(parent, SOCK_ZAPPED);
622 /* Kill socket (only if zapped and orphan)
623 * Must be called on unlocked socket.
625 static void l2cap_sock_kill(struct sock *sk)
627 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
628 return;
630 BT_DBG("sk %p state %d", sk, sk->sk_state);
632 /* Kill poor orphan */
633 bt_sock_unlink(&l2cap_sk_list, sk);
634 sock_set_flag(sk, SOCK_DEAD);
635 sock_put(sk);
638 static void __l2cap_sock_close(struct sock *sk, int reason)
640 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
642 switch (sk->sk_state) {
643 case BT_LISTEN:
644 l2cap_sock_cleanup_listen(sk);
645 break;
647 case BT_CONNECTED:
648 case BT_CONFIG:
649 if (sk->sk_type == SOCK_SEQPACKET) {
650 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
651 struct l2cap_disconn_req req;
653 sk->sk_state = BT_DISCONN;
654 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
656 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
657 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
658 l2cap_send_cmd(conn, l2cap_get_ident(conn),
659 L2CAP_DISCONN_REQ, sizeof(req), &req);
660 } else
661 l2cap_chan_del(sk, reason);
662 break;
664 case BT_CONNECT2:
665 if (sk->sk_type == SOCK_SEQPACKET) {
666 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
667 struct l2cap_conn_rsp rsp;
668 __u16 result;
670 if (bt_sk(sk)->defer_setup)
671 result = L2CAP_CR_SEC_BLOCK;
672 else
673 result = L2CAP_CR_BAD_PSM;
675 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
676 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
677 rsp.result = cpu_to_le16(result);
678 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
679 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
680 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
681 } else
682 l2cap_chan_del(sk, reason);
683 break;
685 case BT_CONNECT:
686 case BT_DISCONN:
687 l2cap_chan_del(sk, reason);
688 break;
690 default:
691 sock_set_flag(sk, SOCK_ZAPPED);
692 break;
696 /* Must be called on unlocked socket. */
697 static void l2cap_sock_close(struct sock *sk)
699 l2cap_sock_clear_timer(sk);
700 lock_sock(sk);
701 __l2cap_sock_close(sk, ECONNRESET);
702 release_sock(sk);
703 l2cap_sock_kill(sk);
706 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
708 struct l2cap_pinfo *pi = l2cap_pi(sk);
710 BT_DBG("sk %p", sk);
712 if (parent) {
713 sk->sk_type = parent->sk_type;
714 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
716 pi->imtu = l2cap_pi(parent)->imtu;
717 pi->omtu = l2cap_pi(parent)->omtu;
718 pi->sec_level = l2cap_pi(parent)->sec_level;
719 pi->role_switch = l2cap_pi(parent)->role_switch;
720 pi->force_reliable = l2cap_pi(parent)->force_reliable;
721 } else {
722 pi->imtu = L2CAP_DEFAULT_MTU;
723 pi->omtu = 0;
724 pi->sec_level = BT_SECURITY_LOW;
725 pi->role_switch = 0;
726 pi->force_reliable = 0;
729 /* Default config options */
730 pi->conf_len = 0;
731 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
734 static struct proto l2cap_proto = {
735 .name = "L2CAP",
736 .owner = THIS_MODULE,
737 .obj_size = sizeof(struct l2cap_pinfo)
740 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
742 struct sock *sk;
744 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
745 if (!sk)
746 return NULL;
748 sock_init_data(sock, sk);
749 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
751 sk->sk_destruct = l2cap_sock_destruct;
752 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
754 sock_reset_flag(sk, SOCK_ZAPPED);
756 sk->sk_protocol = proto;
757 sk->sk_state = BT_OPEN;
759 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
761 bt_sock_link(&l2cap_sk_list, sk);
762 return sk;
765 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
767 struct sock *sk;
769 BT_DBG("sock %p", sock);
771 sock->state = SS_UNCONNECTED;
773 if (sock->type != SOCK_SEQPACKET &&
774 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
775 return -ESOCKTNOSUPPORT;
777 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
778 return -EPERM;
780 sock->ops = &l2cap_sock_ops;
782 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
783 if (!sk)
784 return -ENOMEM;
786 l2cap_sock_init(sk, NULL);
787 return 0;
790 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
792 struct sock *sk = sock->sk;
793 struct sockaddr_l2 la;
794 int len, err = 0;
796 BT_DBG("sk %p", sk);
798 if (!addr || addr->sa_family != AF_BLUETOOTH)
799 return -EINVAL;
801 memset(&la, 0, sizeof(la));
802 len = min_t(unsigned int, sizeof(la), alen);
803 memcpy(&la, addr, len);
805 if (la.l2_cid)
806 return -EINVAL;
808 lock_sock(sk);
810 if (sk->sk_state != BT_OPEN) {
811 err = -EBADFD;
812 goto done;
815 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
816 !capable(CAP_NET_BIND_SERVICE)) {
817 err = -EACCES;
818 goto done;
821 write_lock_bh(&l2cap_sk_list.lock);
823 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
824 err = -EADDRINUSE;
825 } else {
826 /* Save source address */
827 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
828 l2cap_pi(sk)->psm = la.l2_psm;
829 l2cap_pi(sk)->sport = la.l2_psm;
830 sk->sk_state = BT_BOUND;
832 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
833 __le16_to_cpu(la.l2_psm) == 0x0003)
834 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
837 write_unlock_bh(&l2cap_sk_list.lock);
839 done:
840 release_sock(sk);
841 return err;
844 static int l2cap_do_connect(struct sock *sk)
846 bdaddr_t *src = &bt_sk(sk)->src;
847 bdaddr_t *dst = &bt_sk(sk)->dst;
848 struct l2cap_conn *conn;
849 struct hci_conn *hcon;
850 struct hci_dev *hdev;
851 __u8 auth_type;
852 int err;
854 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
855 l2cap_pi(sk)->psm);
857 hdev = hci_get_route(dst, src);
858 if (!hdev)
859 return -EHOSTUNREACH;
861 hci_dev_lock_bh(hdev);
863 err = -ENOMEM;
865 if (sk->sk_type == SOCK_RAW) {
866 switch (l2cap_pi(sk)->sec_level) {
867 case BT_SECURITY_HIGH:
868 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
869 break;
870 case BT_SECURITY_MEDIUM:
871 auth_type = HCI_AT_DEDICATED_BONDING;
872 break;
873 default:
874 auth_type = HCI_AT_NO_BONDING;
875 break;
877 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
878 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
879 auth_type = HCI_AT_NO_BONDING_MITM;
880 else
881 auth_type = HCI_AT_NO_BONDING;
883 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
884 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
885 } else {
886 switch (l2cap_pi(sk)->sec_level) {
887 case BT_SECURITY_HIGH:
888 auth_type = HCI_AT_GENERAL_BONDING_MITM;
889 break;
890 case BT_SECURITY_MEDIUM:
891 auth_type = HCI_AT_GENERAL_BONDING;
892 break;
893 default:
894 auth_type = HCI_AT_NO_BONDING;
895 break;
899 hcon = hci_connect(hdev, ACL_LINK, dst,
900 l2cap_pi(sk)->sec_level, auth_type);
901 if (!hcon)
902 goto done;
904 conn = l2cap_conn_add(hcon, 0);
905 if (!conn) {
906 hci_conn_put(hcon);
907 goto done;
910 err = 0;
912 /* Update source addr of the socket */
913 bacpy(src, conn->src);
915 l2cap_chan_add(conn, sk, NULL);
917 sk->sk_state = BT_CONNECT;
918 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
920 if (hcon->state == BT_CONNECTED) {
921 if (sk->sk_type != SOCK_SEQPACKET) {
922 l2cap_sock_clear_timer(sk);
923 sk->sk_state = BT_CONNECTED;
924 } else
925 l2cap_do_start(sk);
928 done:
929 hci_dev_unlock_bh(hdev);
930 hci_dev_put(hdev);
931 return err;
934 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
936 struct sock *sk = sock->sk;
937 struct sockaddr_l2 la;
938 int len, err = 0;
940 BT_DBG("sk %p", sk);
942 if (!addr || addr->sa_family != AF_BLUETOOTH)
943 return -EINVAL;
945 memset(&la, 0, sizeof(la));
946 len = min_t(unsigned int, sizeof(la), alen);
947 memcpy(&la, addr, len);
949 if (la.l2_cid)
950 return -EINVAL;
952 lock_sock(sk);
954 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
955 err = -EINVAL;
956 goto done;
959 switch (sk->sk_state) {
960 case BT_CONNECT:
961 case BT_CONNECT2:
962 case BT_CONFIG:
963 /* Already connecting */
964 goto wait;
966 case BT_CONNECTED:
967 /* Already connected */
968 goto done;
970 case BT_OPEN:
971 case BT_BOUND:
972 /* Can connect */
973 break;
975 default:
976 err = -EBADFD;
977 goto done;
980 /* Set destination address and psm */
981 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
982 l2cap_pi(sk)->psm = la.l2_psm;
984 err = l2cap_do_connect(sk);
985 if (err)
986 goto done;
988 wait:
989 err = bt_sock_wait_state(sk, BT_CONNECTED,
990 sock_sndtimeo(sk, flags & O_NONBLOCK));
991 done:
992 release_sock(sk);
993 return err;
996 static int l2cap_sock_listen(struct socket *sock, int backlog)
998 struct sock *sk = sock->sk;
999 int err = 0;
1001 BT_DBG("sk %p backlog %d", sk, backlog);
1003 lock_sock(sk);
1005 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1006 err = -EBADFD;
1007 goto done;
1010 if (!l2cap_pi(sk)->psm) {
1011 bdaddr_t *src = &bt_sk(sk)->src;
1012 u16 psm;
1014 err = -EINVAL;
1016 write_lock_bh(&l2cap_sk_list.lock);
1018 for (psm = 0x1001; psm < 0x1100; psm += 2)
1019 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1020 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1021 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1022 err = 0;
1023 break;
1026 write_unlock_bh(&l2cap_sk_list.lock);
1028 if (err < 0)
1029 goto done;
1032 sk->sk_max_ack_backlog = backlog;
1033 sk->sk_ack_backlog = 0;
1034 sk->sk_state = BT_LISTEN;
1036 done:
1037 release_sock(sk);
1038 return err;
1041 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1043 DECLARE_WAITQUEUE(wait, current);
1044 struct sock *sk = sock->sk, *nsk;
1045 long timeo;
1046 int err = 0;
1048 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1050 if (sk->sk_state != BT_LISTEN) {
1051 err = -EBADFD;
1052 goto done;
1055 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1057 BT_DBG("sk %p timeo %ld", sk, timeo);
1059 /* Wait for an incoming connection. (wake-one). */
1060 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1061 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1062 set_current_state(TASK_INTERRUPTIBLE);
1063 if (!timeo) {
1064 err = -EAGAIN;
1065 break;
1068 release_sock(sk);
1069 timeo = schedule_timeout(timeo);
1070 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1072 if (sk->sk_state != BT_LISTEN) {
1073 err = -EBADFD;
1074 break;
1077 if (signal_pending(current)) {
1078 err = sock_intr_errno(timeo);
1079 break;
1082 set_current_state(TASK_RUNNING);
1083 remove_wait_queue(sk->sk_sleep, &wait);
1085 if (err)
1086 goto done;
1088 newsock->state = SS_CONNECTED;
1090 BT_DBG("new socket %p", nsk);
1092 done:
1093 release_sock(sk);
1094 return err;
1097 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1099 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1100 struct sock *sk = sock->sk;
1102 BT_DBG("sock %p, sk %p", sock, sk);
1104 addr->sa_family = AF_BLUETOOTH;
1105 *len = sizeof(struct sockaddr_l2);
1107 if (peer) {
1108 la->l2_psm = l2cap_pi(sk)->psm;
1109 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1110 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1111 } else {
1112 la->l2_psm = l2cap_pi(sk)->sport;
1113 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1114 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1117 return 0;
1120 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1122 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1123 struct sk_buff *skb, **frag;
1124 int err, hlen, count, sent = 0;
1125 struct l2cap_hdr *lh;
1127 BT_DBG("sk %p len %d", sk, len);
1129 /* First fragment (with L2CAP header) */
1130 if (sk->sk_type == SOCK_DGRAM)
1131 hlen = L2CAP_HDR_SIZE + 2;
1132 else
1133 hlen = L2CAP_HDR_SIZE;
1135 count = min_t(unsigned int, (conn->mtu - hlen), len);
1137 skb = bt_skb_send_alloc(sk, hlen + count,
1138 msg->msg_flags & MSG_DONTWAIT, &err);
1139 if (!skb)
1140 return err;
1142 /* Create L2CAP header */
1143 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1144 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1145 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1147 if (sk->sk_type == SOCK_DGRAM)
1148 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1150 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1151 err = -EFAULT;
1152 goto fail;
1155 sent += count;
1156 len -= count;
1158 /* Continuation fragments (no L2CAP header) */
1159 frag = &skb_shinfo(skb)->frag_list;
1160 while (len) {
1161 count = min_t(unsigned int, conn->mtu, len);
1163 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1164 if (!*frag)
1165 goto fail;
1167 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1168 err = -EFAULT;
1169 goto fail;
1172 sent += count;
1173 len -= count;
1175 frag = &(*frag)->next;
1177 err = hci_send_acl(conn->hcon, skb, 0);
1178 if (err < 0)
1179 goto fail;
1181 return sent;
1183 fail:
1184 kfree_skb(skb);
1185 return err;
1188 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1190 struct sock *sk = sock->sk;
1191 int err = 0;
1193 BT_DBG("sock %p, sk %p", sock, sk);
1195 err = sock_error(sk);
1196 if (err)
1197 return err;
1199 if (msg->msg_flags & MSG_OOB)
1200 return -EOPNOTSUPP;
1202 /* Check outgoing MTU */
1203 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1204 return -EINVAL;
1206 lock_sock(sk);
1208 if (sk->sk_state == BT_CONNECTED)
1209 err = l2cap_do_send(sk, msg, len);
1210 else
1211 err = -ENOTCONN;
1213 release_sock(sk);
1214 return err;
1217 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1219 struct sock *sk = sock->sk;
1221 lock_sock(sk);
1223 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1224 struct l2cap_conn_rsp rsp;
1226 sk->sk_state = BT_CONFIG;
1228 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1229 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1230 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1231 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1232 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1233 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1235 release_sock(sk);
1236 return 0;
1239 release_sock(sk);
1241 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1244 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1246 struct sock *sk = sock->sk;
1247 struct l2cap_options opts;
1248 int len, err = 0;
1249 u32 opt;
1251 BT_DBG("sk %p", sk);
1253 lock_sock(sk);
1255 switch (optname) {
1256 case L2CAP_OPTIONS:
1257 opts.imtu = l2cap_pi(sk)->imtu;
1258 opts.omtu = l2cap_pi(sk)->omtu;
1259 opts.flush_to = l2cap_pi(sk)->flush_to;
1260 opts.mode = L2CAP_MODE_BASIC;
1262 len = min_t(unsigned int, sizeof(opts), optlen);
1263 if (copy_from_user((char *) &opts, optval, len)) {
1264 err = -EFAULT;
1265 break;
1268 l2cap_pi(sk)->imtu = opts.imtu;
1269 l2cap_pi(sk)->omtu = opts.omtu;
1270 break;
1272 case L2CAP_LM:
1273 if (get_user(opt, (u32 __user *) optval)) {
1274 err = -EFAULT;
1275 break;
1278 if (opt & L2CAP_LM_AUTH)
1279 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1280 if (opt & L2CAP_LM_ENCRYPT)
1281 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1282 if (opt & L2CAP_LM_SECURE)
1283 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1285 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1286 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1287 break;
1289 default:
1290 err = -ENOPROTOOPT;
1291 break;
1294 release_sock(sk);
1295 return err;
1298 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1300 struct sock *sk = sock->sk;
1301 struct bt_security sec;
1302 int len, err = 0;
1303 u32 opt;
1305 BT_DBG("sk %p", sk);
1307 if (level == SOL_L2CAP)
1308 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1310 if (level != SOL_BLUETOOTH)
1311 return -ENOPROTOOPT;
1313 lock_sock(sk);
1315 switch (optname) {
1316 case BT_SECURITY:
1317 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1318 err = -EINVAL;
1319 break;
1322 sec.level = BT_SECURITY_LOW;
1324 len = min_t(unsigned int, sizeof(sec), optlen);
1325 if (copy_from_user((char *) &sec, optval, len)) {
1326 err = -EFAULT;
1327 break;
1330 if (sec.level < BT_SECURITY_LOW ||
1331 sec.level > BT_SECURITY_HIGH) {
1332 err = -EINVAL;
1333 break;
1336 l2cap_pi(sk)->sec_level = sec.level;
1337 break;
1339 case BT_DEFER_SETUP:
1340 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1341 err = -EINVAL;
1342 break;
1345 if (get_user(opt, (u32 __user *) optval)) {
1346 err = -EFAULT;
1347 break;
1350 bt_sk(sk)->defer_setup = opt;
1351 break;
1353 default:
1354 err = -ENOPROTOOPT;
1355 break;
1358 release_sock(sk);
1359 return err;
1362 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1364 struct sock *sk = sock->sk;
1365 struct l2cap_options opts;
1366 struct l2cap_conninfo cinfo;
1367 int len, err = 0;
1368 u32 opt;
1370 BT_DBG("sk %p", sk);
1372 if (get_user(len, optlen))
1373 return -EFAULT;
1375 lock_sock(sk);
1377 switch (optname) {
1378 case L2CAP_OPTIONS:
1379 opts.imtu = l2cap_pi(sk)->imtu;
1380 opts.omtu = l2cap_pi(sk)->omtu;
1381 opts.flush_to = l2cap_pi(sk)->flush_to;
1382 opts.mode = L2CAP_MODE_BASIC;
1384 len = min_t(unsigned int, len, sizeof(opts));
1385 if (copy_to_user(optval, (char *) &opts, len))
1386 err = -EFAULT;
1388 break;
1390 case L2CAP_LM:
1391 switch (l2cap_pi(sk)->sec_level) {
1392 case BT_SECURITY_LOW:
1393 opt = L2CAP_LM_AUTH;
1394 break;
1395 case BT_SECURITY_MEDIUM:
1396 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1397 break;
1398 case BT_SECURITY_HIGH:
1399 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1400 L2CAP_LM_SECURE;
1401 break;
1402 default:
1403 opt = 0;
1404 break;
1407 if (l2cap_pi(sk)->role_switch)
1408 opt |= L2CAP_LM_MASTER;
1410 if (l2cap_pi(sk)->force_reliable)
1411 opt |= L2CAP_LM_RELIABLE;
1413 if (put_user(opt, (u32 __user *) optval))
1414 err = -EFAULT;
1415 break;
1417 case L2CAP_CONNINFO:
1418 if (sk->sk_state != BT_CONNECTED &&
1419 !(sk->sk_state == BT_CONNECT2 &&
1420 bt_sk(sk)->defer_setup)) {
1421 err = -ENOTCONN;
1422 break;
1425 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1426 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1428 len = min_t(unsigned int, len, sizeof(cinfo));
1429 if (copy_to_user(optval, (char *) &cinfo, len))
1430 err = -EFAULT;
1432 break;
1434 default:
1435 err = -ENOPROTOOPT;
1436 break;
1439 release_sock(sk);
1440 return err;
1443 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1445 struct sock *sk = sock->sk;
1446 struct bt_security sec;
1447 int len, err = 0;
1449 BT_DBG("sk %p", sk);
1451 if (level == SOL_L2CAP)
1452 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1454 if (level != SOL_BLUETOOTH)
1455 return -ENOPROTOOPT;
1457 if (get_user(len, optlen))
1458 return -EFAULT;
1460 lock_sock(sk);
1462 switch (optname) {
1463 case BT_SECURITY:
1464 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1465 err = -EINVAL;
1466 break;
1469 sec.level = l2cap_pi(sk)->sec_level;
1471 len = min_t(unsigned int, len, sizeof(sec));
1472 if (copy_to_user(optval, (char *) &sec, len))
1473 err = -EFAULT;
1475 break;
1477 case BT_DEFER_SETUP:
1478 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1479 err = -EINVAL;
1480 break;
1483 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1484 err = -EFAULT;
1486 break;
1488 default:
1489 err = -ENOPROTOOPT;
1490 break;
1493 release_sock(sk);
1494 return err;
1497 static int l2cap_sock_shutdown(struct socket *sock, int how)
1499 struct sock *sk = sock->sk;
1500 int err = 0;
1502 BT_DBG("sock %p, sk %p", sock, sk);
1504 if (!sk)
1505 return 0;
1507 lock_sock(sk);
1508 if (!sk->sk_shutdown) {
1509 sk->sk_shutdown = SHUTDOWN_MASK;
1510 l2cap_sock_clear_timer(sk);
1511 __l2cap_sock_close(sk, 0);
1513 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1514 err = bt_sock_wait_state(sk, BT_CLOSED,
1515 sk->sk_lingertime);
1517 release_sock(sk);
1518 return err;
1521 static int l2cap_sock_release(struct socket *sock)
1523 struct sock *sk = sock->sk;
1524 int err;
1526 BT_DBG("sock %p, sk %p", sock, sk);
1528 if (!sk)
1529 return 0;
1531 err = l2cap_sock_shutdown(sock, 2);
1533 sock_orphan(sk);
1534 l2cap_sock_kill(sk);
1535 return err;
1538 static void l2cap_chan_ready(struct sock *sk)
1540 struct sock *parent = bt_sk(sk)->parent;
1542 BT_DBG("sk %p, parent %p", sk, parent);
1544 l2cap_pi(sk)->conf_state = 0;
1545 l2cap_sock_clear_timer(sk);
1547 if (!parent) {
1548 /* Outgoing channel.
1549 * Wake up socket sleeping on connect.
1551 sk->sk_state = BT_CONNECTED;
1552 sk->sk_state_change(sk);
1553 } else {
1554 /* Incoming channel.
1555 * Wake up socket sleeping on accept.
1557 parent->sk_data_ready(parent, 0);
1561 /* Copy frame to all raw sockets on that connection */
1562 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1564 struct l2cap_chan_list *l = &conn->chan_list;
1565 struct sk_buff *nskb;
1566 struct sock *sk;
1568 BT_DBG("conn %p", conn);
1570 read_lock(&l->lock);
1571 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1572 if (sk->sk_type != SOCK_RAW)
1573 continue;
1575 /* Don't send frame to the socket it came from */
1576 if (skb->sk == sk)
1577 continue;
1578 nskb = skb_clone(skb, GFP_ATOMIC);
1579 if (!nskb)
1580 continue;
1582 if (sock_queue_rcv_skb(sk, nskb))
1583 kfree_skb(nskb);
1585 read_unlock(&l->lock);
1588 /* ---- L2CAP signalling commands ---- */
1589 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1590 u8 code, u8 ident, u16 dlen, void *data)
1592 struct sk_buff *skb, **frag;
1593 struct l2cap_cmd_hdr *cmd;
1594 struct l2cap_hdr *lh;
1595 int len, count;
1597 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1598 conn, code, ident, dlen);
1600 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1601 count = min_t(unsigned int, conn->mtu, len);
1603 skb = bt_skb_alloc(count, GFP_ATOMIC);
1604 if (!skb)
1605 return NULL;
1607 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1608 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1609 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1611 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1612 cmd->code = code;
1613 cmd->ident = ident;
1614 cmd->len = cpu_to_le16(dlen);
1616 if (dlen) {
1617 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1618 memcpy(skb_put(skb, count), data, count);
1619 data += count;
1622 len -= skb->len;
1624 /* Continuation fragments (no L2CAP header) */
1625 frag = &skb_shinfo(skb)->frag_list;
1626 while (len) {
1627 count = min_t(unsigned int, conn->mtu, len);
1629 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1630 if (!*frag)
1631 goto fail;
1633 memcpy(skb_put(*frag, count), data, count);
1635 len -= count;
1636 data += count;
1638 frag = &(*frag)->next;
1641 return skb;
1643 fail:
1644 kfree_skb(skb);
1645 return NULL;
1648 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1650 struct l2cap_conf_opt *opt = *ptr;
1651 int len;
1653 len = L2CAP_CONF_OPT_SIZE + opt->len;
1654 *ptr += len;
1656 *type = opt->type;
1657 *olen = opt->len;
1659 switch (opt->len) {
1660 case 1:
1661 *val = *((u8 *) opt->val);
1662 break;
1664 case 2:
1665 *val = __le16_to_cpu(*((__le16 *) opt->val));
1666 break;
1668 case 4:
1669 *val = __le32_to_cpu(*((__le32 *) opt->val));
1670 break;
1672 default:
1673 *val = (unsigned long) opt->val;
1674 break;
1677 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1678 return len;
1681 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1683 struct l2cap_conf_opt *opt = *ptr;
1685 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1687 opt->type = type;
1688 opt->len = len;
1690 switch (len) {
1691 case 1:
1692 *((u8 *) opt->val) = val;
1693 break;
1695 case 2:
1696 *((__le16 *) opt->val) = cpu_to_le16(val);
1697 break;
1699 case 4:
1700 *((__le32 *) opt->val) = cpu_to_le32(val);
1701 break;
1703 default:
1704 memcpy(opt->val, (void *) val, len);
1705 break;
1708 *ptr += L2CAP_CONF_OPT_SIZE + len;
1711 static int l2cap_build_conf_req(struct sock *sk, void *data)
1713 struct l2cap_pinfo *pi = l2cap_pi(sk);
1714 struct l2cap_conf_req *req = data;
1715 void *ptr = req->data;
1717 BT_DBG("sk %p", sk);
1719 if (pi->imtu != L2CAP_DEFAULT_MTU)
1720 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1722 /* FIXME: Need actual value of the flush timeout */
1723 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1724 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1726 req->dcid = cpu_to_le16(pi->dcid);
1727 req->flags = cpu_to_le16(0);
1729 return ptr - data;
1732 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1734 struct l2cap_pinfo *pi = l2cap_pi(sk);
1735 struct l2cap_conf_rsp *rsp = data;
1736 void *ptr = rsp->data;
1737 void *req = pi->conf_req;
1738 int len = pi->conf_len;
1739 int type, hint, olen;
1740 unsigned long val;
1741 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1742 u16 mtu = L2CAP_DEFAULT_MTU;
1743 u16 result = L2CAP_CONF_SUCCESS;
1745 BT_DBG("sk %p", sk);
1747 while (len >= L2CAP_CONF_OPT_SIZE) {
1748 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1750 hint = type & L2CAP_CONF_HINT;
1751 type &= L2CAP_CONF_MASK;
1753 switch (type) {
1754 case L2CAP_CONF_MTU:
1755 mtu = val;
1756 break;
1758 case L2CAP_CONF_FLUSH_TO:
1759 pi->flush_to = val;
1760 break;
1762 case L2CAP_CONF_QOS:
1763 break;
1765 case L2CAP_CONF_RFC:
1766 if (olen == sizeof(rfc))
1767 memcpy(&rfc, (void *) val, olen);
1768 break;
1770 default:
1771 if (hint)
1772 break;
1774 result = L2CAP_CONF_UNKNOWN;
1775 *((u8 *) ptr++) = type;
1776 break;
1780 if (result == L2CAP_CONF_SUCCESS) {
1781 /* Configure output options and let the other side know
1782 * which ones we don't like. */
1784 if (rfc.mode == L2CAP_MODE_BASIC) {
1785 if (mtu < pi->omtu)
1786 result = L2CAP_CONF_UNACCEPT;
1787 else {
1788 pi->omtu = mtu;
1789 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1792 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1793 } else {
1794 result = L2CAP_CONF_UNACCEPT;
1796 memset(&rfc, 0, sizeof(rfc));
1797 rfc.mode = L2CAP_MODE_BASIC;
1799 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1800 sizeof(rfc), (unsigned long) &rfc);
1804 rsp->scid = cpu_to_le16(pi->dcid);
1805 rsp->result = cpu_to_le16(result);
1806 rsp->flags = cpu_to_le16(0x0000);
1808 return ptr - data;
1811 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1813 struct l2cap_conf_rsp *rsp = data;
1814 void *ptr = rsp->data;
1816 BT_DBG("sk %p", sk);
1818 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1819 rsp->result = cpu_to_le16(result);
1820 rsp->flags = cpu_to_le16(flags);
1822 return ptr - data;
1825 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1827 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1829 if (rej->reason != 0x0000)
1830 return 0;
1832 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1833 cmd->ident == conn->info_ident) {
1834 del_timer(&conn->info_timer);
1836 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1837 conn->info_ident = 0;
1839 l2cap_conn_start(conn);
1842 return 0;
1845 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1847 struct l2cap_chan_list *list = &conn->chan_list;
1848 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1849 struct l2cap_conn_rsp rsp;
1850 struct sock *sk, *parent;
1851 int result, status = L2CAP_CS_NO_INFO;
1853 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1854 __le16 psm = req->psm;
1856 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1858 /* Check if we have socket listening on psm */
1859 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1860 if (!parent) {
1861 result = L2CAP_CR_BAD_PSM;
1862 goto sendresp;
1865 /* Check if the ACL is secure enough (if not SDP) */
1866 if (psm != cpu_to_le16(0x0001) &&
1867 !hci_conn_check_link_mode(conn->hcon)) {
1868 conn->disc_reason = 0x05;
1869 result = L2CAP_CR_SEC_BLOCK;
1870 goto response;
1873 result = L2CAP_CR_NO_MEM;
1875 /* Check for backlog size */
1876 if (sk_acceptq_is_full(parent)) {
1877 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1878 goto response;
1881 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1882 if (!sk)
1883 goto response;
1885 write_lock_bh(&list->lock);
1887 /* Check if we already have channel with that dcid */
1888 if (__l2cap_get_chan_by_dcid(list, scid)) {
1889 write_unlock_bh(&list->lock);
1890 sock_set_flag(sk, SOCK_ZAPPED);
1891 l2cap_sock_kill(sk);
1892 goto response;
1895 hci_conn_hold(conn->hcon);
1897 l2cap_sock_init(sk, parent);
1898 bacpy(&bt_sk(sk)->src, conn->src);
1899 bacpy(&bt_sk(sk)->dst, conn->dst);
1900 l2cap_pi(sk)->psm = psm;
1901 l2cap_pi(sk)->dcid = scid;
1903 __l2cap_chan_add(conn, sk, parent);
1904 dcid = l2cap_pi(sk)->scid;
1906 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1908 l2cap_pi(sk)->ident = cmd->ident;
1910 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
1911 if (l2cap_check_security(sk)) {
1912 if (bt_sk(sk)->defer_setup) {
1913 sk->sk_state = BT_CONNECT2;
1914 result = L2CAP_CR_PEND;
1915 status = L2CAP_CS_AUTHOR_PEND;
1916 parent->sk_data_ready(parent, 0);
1917 } else {
1918 sk->sk_state = BT_CONFIG;
1919 result = L2CAP_CR_SUCCESS;
1920 status = L2CAP_CS_NO_INFO;
1922 } else {
1923 sk->sk_state = BT_CONNECT2;
1924 result = L2CAP_CR_PEND;
1925 status = L2CAP_CS_AUTHEN_PEND;
1927 } else {
1928 sk->sk_state = BT_CONNECT2;
1929 result = L2CAP_CR_PEND;
1930 status = L2CAP_CS_NO_INFO;
1933 write_unlock_bh(&list->lock);
1935 response:
1936 bh_unlock_sock(parent);
1938 sendresp:
1939 rsp.scid = cpu_to_le16(scid);
1940 rsp.dcid = cpu_to_le16(dcid);
1941 rsp.result = cpu_to_le16(result);
1942 rsp.status = cpu_to_le16(status);
1943 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1945 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1946 struct l2cap_info_req info;
1947 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1949 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1950 conn->info_ident = l2cap_get_ident(conn);
1952 mod_timer(&conn->info_timer, jiffies +
1953 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1955 l2cap_send_cmd(conn, conn->info_ident,
1956 L2CAP_INFO_REQ, sizeof(info), &info);
1959 return 0;
1962 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1964 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1965 u16 scid, dcid, result, status;
1966 struct sock *sk;
1967 u8 req[128];
1969 scid = __le16_to_cpu(rsp->scid);
1970 dcid = __le16_to_cpu(rsp->dcid);
1971 result = __le16_to_cpu(rsp->result);
1972 status = __le16_to_cpu(rsp->status);
1974 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1976 if (scid) {
1977 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
1978 if (!sk)
1979 return 0;
1980 } else {
1981 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
1982 if (!sk)
1983 return 0;
1986 switch (result) {
1987 case L2CAP_CR_SUCCESS:
1988 sk->sk_state = BT_CONFIG;
1989 l2cap_pi(sk)->ident = 0;
1990 l2cap_pi(sk)->dcid = dcid;
1991 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1993 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
1995 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1996 l2cap_build_conf_req(sk, req), req);
1997 break;
1999 case L2CAP_CR_PEND:
2000 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2001 break;
2003 default:
2004 l2cap_chan_del(sk, ECONNREFUSED);
2005 break;
2008 bh_unlock_sock(sk);
2009 return 0;
2012 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2014 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2015 u16 dcid, flags;
2016 u8 rsp[64];
2017 struct sock *sk;
2018 int len;
2020 dcid = __le16_to_cpu(req->dcid);
2021 flags = __le16_to_cpu(req->flags);
2023 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2025 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2026 if (!sk)
2027 return -ENOENT;
2029 if (sk->sk_state == BT_DISCONN)
2030 goto unlock;
2032 /* Reject if config buffer is too small. */
2033 len = cmd_len - sizeof(*req);
2034 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2035 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2036 l2cap_build_conf_rsp(sk, rsp,
2037 L2CAP_CONF_REJECT, flags), rsp);
2038 goto unlock;
2041 /* Store config. */
2042 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2043 l2cap_pi(sk)->conf_len += len;
2045 if (flags & 0x0001) {
2046 /* Incomplete config. Send empty response. */
2047 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2048 l2cap_build_conf_rsp(sk, rsp,
2049 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2050 goto unlock;
2053 /* Complete config. */
2054 len = l2cap_parse_conf_req(sk, rsp);
2055 if (len < 0)
2056 goto unlock;
2058 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2060 /* Reset config buffer. */
2061 l2cap_pi(sk)->conf_len = 0;
2063 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2064 goto unlock;
2066 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2067 sk->sk_state = BT_CONNECTED;
2068 l2cap_chan_ready(sk);
2069 goto unlock;
2072 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2073 u8 buf[64];
2074 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2075 l2cap_build_conf_req(sk, buf), buf);
2078 unlock:
2079 bh_unlock_sock(sk);
2080 return 0;
2083 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2085 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2086 u16 scid, flags, result;
2087 struct sock *sk;
2089 scid = __le16_to_cpu(rsp->scid);
2090 flags = __le16_to_cpu(rsp->flags);
2091 result = __le16_to_cpu(rsp->result);
2093 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2094 scid, flags, result);
2096 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2097 if (!sk)
2098 return 0;
2100 switch (result) {
2101 case L2CAP_CONF_SUCCESS:
2102 break;
2104 case L2CAP_CONF_UNACCEPT:
2105 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
2106 char req[128];
2107 /* It does not make sense to adjust L2CAP parameters
2108 * that are currently defined in the spec. We simply
2109 * resend config request that we sent earlier. It is
2110 * stupid, but it helps qualification testing which
2111 * expects at least some response from us. */
2112 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2113 l2cap_build_conf_req(sk, req), req);
2114 goto done;
2117 default:
2118 sk->sk_state = BT_DISCONN;
2119 sk->sk_err = ECONNRESET;
2120 l2cap_sock_set_timer(sk, HZ * 5);
2122 struct l2cap_disconn_req req;
2123 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
2124 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2125 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2126 L2CAP_DISCONN_REQ, sizeof(req), &req);
2128 goto done;
2131 if (flags & 0x01)
2132 goto done;
2134 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2136 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2137 sk->sk_state = BT_CONNECTED;
2138 l2cap_chan_ready(sk);
2141 done:
2142 bh_unlock_sock(sk);
2143 return 0;
2146 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2148 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2149 struct l2cap_disconn_rsp rsp;
2150 u16 dcid, scid;
2151 struct sock *sk;
2153 scid = __le16_to_cpu(req->scid);
2154 dcid = __le16_to_cpu(req->dcid);
2156 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2158 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2159 if (!sk)
2160 return 0;
2162 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2163 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2164 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2166 sk->sk_shutdown = SHUTDOWN_MASK;
2168 l2cap_chan_del(sk, ECONNRESET);
2169 bh_unlock_sock(sk);
2171 l2cap_sock_kill(sk);
2172 return 0;
2175 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2177 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2178 u16 dcid, scid;
2179 struct sock *sk;
2181 scid = __le16_to_cpu(rsp->scid);
2182 dcid = __le16_to_cpu(rsp->dcid);
2184 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2186 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2187 if (!sk)
2188 return 0;
2190 l2cap_chan_del(sk, 0);
2191 bh_unlock_sock(sk);
2193 l2cap_sock_kill(sk);
2194 return 0;
2197 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2199 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2200 u16 type;
2202 type = __le16_to_cpu(req->type);
2204 BT_DBG("type 0x%4.4x", type);
2206 if (type == L2CAP_IT_FEAT_MASK) {
2207 u8 buf[8];
2208 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2209 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2210 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2211 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
2212 l2cap_send_cmd(conn, cmd->ident,
2213 L2CAP_INFO_RSP, sizeof(buf), buf);
2214 } else if (type == L2CAP_IT_FIXED_CHAN) {
2215 u8 buf[12];
2216 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2217 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2218 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2219 memcpy(buf + 4, l2cap_fixed_chan, 8);
2220 l2cap_send_cmd(conn, cmd->ident,
2221 L2CAP_INFO_RSP, sizeof(buf), buf);
2222 } else {
2223 struct l2cap_info_rsp rsp;
2224 rsp.type = cpu_to_le16(type);
2225 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2226 l2cap_send_cmd(conn, cmd->ident,
2227 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2230 return 0;
2233 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2235 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2236 u16 type, result;
2238 type = __le16_to_cpu(rsp->type);
2239 result = __le16_to_cpu(rsp->result);
2241 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2243 del_timer(&conn->info_timer);
2245 if (type == L2CAP_IT_FEAT_MASK) {
2246 conn->feat_mask = get_unaligned_le32(rsp->data);
2248 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2249 struct l2cap_info_req req;
2250 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2252 conn->info_ident = l2cap_get_ident(conn);
2254 l2cap_send_cmd(conn, conn->info_ident,
2255 L2CAP_INFO_REQ, sizeof(req), &req);
2256 } else {
2257 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2258 conn->info_ident = 0;
2260 l2cap_conn_start(conn);
2262 } else if (type == L2CAP_IT_FIXED_CHAN) {
2263 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2264 conn->info_ident = 0;
2266 l2cap_conn_start(conn);
2269 return 0;
2272 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2274 u8 *data = skb->data;
2275 int len = skb->len;
2276 struct l2cap_cmd_hdr cmd;
2277 int err = 0;
2279 l2cap_raw_recv(conn, skb);
2281 while (len >= L2CAP_CMD_HDR_SIZE) {
2282 u16 cmd_len;
2283 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2284 data += L2CAP_CMD_HDR_SIZE;
2285 len -= L2CAP_CMD_HDR_SIZE;
2287 cmd_len = le16_to_cpu(cmd.len);
2289 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2291 if (cmd_len > len || !cmd.ident) {
2292 BT_DBG("corrupted command");
2293 break;
2296 switch (cmd.code) {
2297 case L2CAP_COMMAND_REJ:
2298 l2cap_command_rej(conn, &cmd, data);
2299 break;
2301 case L2CAP_CONN_REQ:
2302 err = l2cap_connect_req(conn, &cmd, data);
2303 break;
2305 case L2CAP_CONN_RSP:
2306 err = l2cap_connect_rsp(conn, &cmd, data);
2307 break;
2309 case L2CAP_CONF_REQ:
2310 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2311 break;
2313 case L2CAP_CONF_RSP:
2314 err = l2cap_config_rsp(conn, &cmd, data);
2315 break;
2317 case L2CAP_DISCONN_REQ:
2318 err = l2cap_disconnect_req(conn, &cmd, data);
2319 break;
2321 case L2CAP_DISCONN_RSP:
2322 err = l2cap_disconnect_rsp(conn, &cmd, data);
2323 break;
2325 case L2CAP_ECHO_REQ:
2326 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2327 break;
2329 case L2CAP_ECHO_RSP:
2330 break;
2332 case L2CAP_INFO_REQ:
2333 err = l2cap_information_req(conn, &cmd, data);
2334 break;
2336 case L2CAP_INFO_RSP:
2337 err = l2cap_information_rsp(conn, &cmd, data);
2338 break;
2340 default:
2341 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2342 err = -EINVAL;
2343 break;
2346 if (err) {
2347 struct l2cap_cmd_rej rej;
2348 BT_DBG("error %d", err);
2350 /* FIXME: Map err to a valid reason */
2351 rej.reason = cpu_to_le16(0);
2352 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2355 data += cmd_len;
2356 len -= cmd_len;
2359 kfree_skb(skb);
2362 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2364 struct sock *sk;
2366 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2367 if (!sk) {
2368 BT_DBG("unknown cid 0x%4.4x", cid);
2369 goto drop;
2372 BT_DBG("sk %p, len %d", sk, skb->len);
2374 if (sk->sk_state != BT_CONNECTED)
2375 goto drop;
2377 if (l2cap_pi(sk)->imtu < skb->len)
2378 goto drop;
2380 /* If socket recv buffers overflows we drop data here
2381 * which is *bad* because L2CAP has to be reliable.
2382 * But we don't have any other choice. L2CAP doesn't
2383 * provide flow control mechanism. */
2385 if (!sock_queue_rcv_skb(sk, skb))
2386 goto done;
2388 drop:
2389 kfree_skb(skb);
2391 done:
2392 if (sk)
2393 bh_unlock_sock(sk);
2395 return 0;
2398 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2400 struct sock *sk;
2402 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2403 if (!sk)
2404 goto drop;
2406 BT_DBG("sk %p, len %d", sk, skb->len);
2408 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2409 goto drop;
2411 if (l2cap_pi(sk)->imtu < skb->len)
2412 goto drop;
2414 if (!sock_queue_rcv_skb(sk, skb))
2415 goto done;
2417 drop:
2418 kfree_skb(skb);
2420 done:
2421 if (sk)
2422 bh_unlock_sock(sk);
2423 return 0;
2426 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2428 struct l2cap_hdr *lh = (void *) skb->data;
2429 u16 cid, len;
2430 __le16 psm;
2432 skb_pull(skb, L2CAP_HDR_SIZE);
2433 cid = __le16_to_cpu(lh->cid);
2434 len = __le16_to_cpu(lh->len);
2436 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2438 switch (cid) {
2439 case L2CAP_CID_SIGNALING:
2440 l2cap_sig_channel(conn, skb);
2441 break;
2443 case L2CAP_CID_CONN_LESS:
2444 psm = get_unaligned((__le16 *) skb->data);
2445 skb_pull(skb, 2);
2446 l2cap_conless_channel(conn, psm, skb);
2447 break;
2449 default:
2450 l2cap_data_channel(conn, cid, skb);
2451 break;
2455 /* ---- L2CAP interface with lower layer (HCI) ---- */
2457 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2459 int exact = 0, lm1 = 0, lm2 = 0;
2460 register struct sock *sk;
2461 struct hlist_node *node;
2463 if (type != ACL_LINK)
2464 return 0;
2466 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2468 /* Find listening sockets and check their link_mode */
2469 read_lock(&l2cap_sk_list.lock);
2470 sk_for_each(sk, node, &l2cap_sk_list.head) {
2471 if (sk->sk_state != BT_LISTEN)
2472 continue;
2474 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2475 lm1 |= HCI_LM_ACCEPT;
2476 if (l2cap_pi(sk)->role_switch)
2477 lm1 |= HCI_LM_MASTER;
2478 exact++;
2479 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
2480 lm2 |= HCI_LM_ACCEPT;
2481 if (l2cap_pi(sk)->role_switch)
2482 lm2 |= HCI_LM_MASTER;
2485 read_unlock(&l2cap_sk_list.lock);
2487 return exact ? lm1 : lm2;
2490 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2492 struct l2cap_conn *conn;
2494 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2496 if (hcon->type != ACL_LINK)
2497 return 0;
2499 if (!status) {
2500 conn = l2cap_conn_add(hcon, status);
2501 if (conn)
2502 l2cap_conn_ready(conn);
2503 } else
2504 l2cap_conn_del(hcon, bt_err(status));
2506 return 0;
2509 static int l2cap_disconn_ind(struct hci_conn *hcon)
2511 struct l2cap_conn *conn = hcon->l2cap_data;
2513 BT_DBG("hcon %p", hcon);
2515 if (hcon->type != ACL_LINK || !conn)
2516 return 0x13;
2518 return conn->disc_reason;
2521 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
2523 BT_DBG("hcon %p reason %d", hcon, reason);
2525 if (hcon->type != ACL_LINK)
2526 return 0;
2528 l2cap_conn_del(hcon, bt_err(reason));
2530 return 0;
2533 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
2535 if (sk->sk_type != SOCK_SEQPACKET)
2536 return;
2538 if (encrypt == 0x00) {
2539 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
2540 l2cap_sock_clear_timer(sk);
2541 l2cap_sock_set_timer(sk, HZ * 5);
2542 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
2543 __l2cap_sock_close(sk, ECONNREFUSED);
2544 } else {
2545 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
2546 l2cap_sock_clear_timer(sk);
2550 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2552 struct l2cap_chan_list *l;
2553 struct l2cap_conn *conn = hcon->l2cap_data;
2554 struct sock *sk;
2556 if (!conn)
2557 return 0;
2559 l = &conn->chan_list;
2561 BT_DBG("conn %p", conn);
2563 read_lock(&l->lock);
2565 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2566 bh_lock_sock(sk);
2568 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
2569 bh_unlock_sock(sk);
2570 continue;
2573 if (!status && (sk->sk_state == BT_CONNECTED ||
2574 sk->sk_state == BT_CONFIG)) {
2575 l2cap_check_encryption(sk, encrypt);
2576 bh_unlock_sock(sk);
2577 continue;
2580 if (sk->sk_state == BT_CONNECT) {
2581 if (!status) {
2582 struct l2cap_conn_req req;
2583 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2584 req.psm = l2cap_pi(sk)->psm;
2586 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2588 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2589 L2CAP_CONN_REQ, sizeof(req), &req);
2590 } else {
2591 l2cap_sock_clear_timer(sk);
2592 l2cap_sock_set_timer(sk, HZ / 10);
2594 } else if (sk->sk_state == BT_CONNECT2) {
2595 struct l2cap_conn_rsp rsp;
2596 __u16 result;
2598 if (!status) {
2599 sk->sk_state = BT_CONFIG;
2600 result = L2CAP_CR_SUCCESS;
2601 } else {
2602 sk->sk_state = BT_DISCONN;
2603 l2cap_sock_set_timer(sk, HZ / 10);
2604 result = L2CAP_CR_SEC_BLOCK;
2607 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2608 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2609 rsp.result = cpu_to_le16(result);
2610 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2611 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2612 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2615 bh_unlock_sock(sk);
2618 read_unlock(&l->lock);
2620 return 0;
2623 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2625 struct l2cap_conn *conn = hcon->l2cap_data;
2627 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2628 goto drop;
2630 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2632 if (flags & ACL_START) {
2633 struct l2cap_hdr *hdr;
2634 int len;
2636 if (conn->rx_len) {
2637 BT_ERR("Unexpected start frame (len %d)", skb->len);
2638 kfree_skb(conn->rx_skb);
2639 conn->rx_skb = NULL;
2640 conn->rx_len = 0;
2641 l2cap_conn_unreliable(conn, ECOMM);
2644 if (skb->len < 2) {
2645 BT_ERR("Frame is too short (len %d)", skb->len);
2646 l2cap_conn_unreliable(conn, ECOMM);
2647 goto drop;
2650 hdr = (struct l2cap_hdr *) skb->data;
2651 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2653 if (len == skb->len) {
2654 /* Complete frame received */
2655 l2cap_recv_frame(conn, skb);
2656 return 0;
2659 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2661 if (skb->len > len) {
2662 BT_ERR("Frame is too long (len %d, expected len %d)",
2663 skb->len, len);
2664 l2cap_conn_unreliable(conn, ECOMM);
2665 goto drop;
2668 /* Allocate skb for the complete frame (with header) */
2669 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
2670 if (!conn->rx_skb)
2671 goto drop;
2673 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2674 skb->len);
2675 conn->rx_len = len - skb->len;
2676 } else {
2677 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2679 if (!conn->rx_len) {
2680 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2681 l2cap_conn_unreliable(conn, ECOMM);
2682 goto drop;
2685 if (skb->len > conn->rx_len) {
2686 BT_ERR("Fragment is too long (len %d, expected %d)",
2687 skb->len, conn->rx_len);
2688 kfree_skb(conn->rx_skb);
2689 conn->rx_skb = NULL;
2690 conn->rx_len = 0;
2691 l2cap_conn_unreliable(conn, ECOMM);
2692 goto drop;
2695 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2696 skb->len);
2697 conn->rx_len -= skb->len;
2699 if (!conn->rx_len) {
2700 /* Complete frame received */
2701 l2cap_recv_frame(conn, conn->rx_skb);
2702 conn->rx_skb = NULL;
2706 drop:
2707 kfree_skb(skb);
2708 return 0;
2711 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2713 struct sock *sk;
2714 struct hlist_node *node;
2715 char *str = buf;
2717 read_lock_bh(&l2cap_sk_list.lock);
2719 sk_for_each(sk, node, &l2cap_sk_list.head) {
2720 struct l2cap_pinfo *pi = l2cap_pi(sk);
2722 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
2723 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2724 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
2725 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
2728 read_unlock_bh(&l2cap_sk_list.lock);
2730 return str - buf;
2733 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2735 static const struct proto_ops l2cap_sock_ops = {
2736 .family = PF_BLUETOOTH,
2737 .owner = THIS_MODULE,
2738 .release = l2cap_sock_release,
2739 .bind = l2cap_sock_bind,
2740 .connect = l2cap_sock_connect,
2741 .listen = l2cap_sock_listen,
2742 .accept = l2cap_sock_accept,
2743 .getname = l2cap_sock_getname,
2744 .sendmsg = l2cap_sock_sendmsg,
2745 .recvmsg = l2cap_sock_recvmsg,
2746 .poll = bt_sock_poll,
2747 .ioctl = bt_sock_ioctl,
2748 .mmap = sock_no_mmap,
2749 .socketpair = sock_no_socketpair,
2750 .shutdown = l2cap_sock_shutdown,
2751 .setsockopt = l2cap_sock_setsockopt,
2752 .getsockopt = l2cap_sock_getsockopt
2755 static struct net_proto_family l2cap_sock_family_ops = {
2756 .family = PF_BLUETOOTH,
2757 .owner = THIS_MODULE,
2758 .create = l2cap_sock_create,
2761 static struct hci_proto l2cap_hci_proto = {
2762 .name = "L2CAP",
2763 .id = HCI_PROTO_L2CAP,
2764 .connect_ind = l2cap_connect_ind,
2765 .connect_cfm = l2cap_connect_cfm,
2766 .disconn_ind = l2cap_disconn_ind,
2767 .disconn_cfm = l2cap_disconn_cfm,
2768 .security_cfm = l2cap_security_cfm,
2769 .recv_acldata = l2cap_recv_acldata
2772 static int __init l2cap_init(void)
2774 int err;
2776 err = proto_register(&l2cap_proto, 0);
2777 if (err < 0)
2778 return err;
2780 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2781 if (err < 0) {
2782 BT_ERR("L2CAP socket registration failed");
2783 goto error;
2786 err = hci_register_proto(&l2cap_hci_proto);
2787 if (err < 0) {
2788 BT_ERR("L2CAP protocol registration failed");
2789 bt_sock_unregister(BTPROTO_L2CAP);
2790 goto error;
2793 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2794 BT_ERR("Failed to create L2CAP info file");
2796 BT_INFO("L2CAP ver %s", VERSION);
2797 BT_INFO("L2CAP socket layer initialized");
2799 return 0;
2801 error:
2802 proto_unregister(&l2cap_proto);
2803 return err;
2806 static void __exit l2cap_exit(void)
2808 class_remove_file(bt_class, &class_attr_l2cap);
2810 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2811 BT_ERR("L2CAP socket unregistration failed");
2813 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2814 BT_ERR("L2CAP protocol unregistration failed");
2816 proto_unregister(&l2cap_proto);
2819 void l2cap_load(void)
2821 /* Dummy function to trigger automatic L2CAP module loading by
2822 * other modules that use L2CAP sockets but don't use any other
2823 * symbols from it. */
2824 return;
2826 EXPORT_SYMBOL(l2cap_load);
2828 module_init(l2cap_init);
2829 module_exit(l2cap_exit);
2831 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2832 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2833 MODULE_VERSION(VERSION);
2834 MODULE_LICENSE("GPL");
2835 MODULE_ALIAS("bt-proto-0");