eCryptfs: Check for O_RDONLY lower inodes when opening lower files
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blobb03012564647dc21216059e4ffbcacb78da7389a
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
45 #include <net/sock.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm = 0;
58 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
59 static u8 l2cap_fixed_chan[8] = { 0x02, };
61 static const struct proto_ops l2cap_sock_ops;
63 static struct bt_sock_list l2cap_sk_list = {
64 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 static void __l2cap_sock_close(struct sock *sk, int reason);
68 static void l2cap_sock_close(struct sock *sk);
69 static void l2cap_sock_kill(struct sock *sk);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
74 /* ---- L2CAP timers ---- */
75 static void l2cap_sock_timeout(unsigned long arg)
77 struct sock *sk = (struct sock *) arg;
78 int reason;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
82 bh_lock_sock(sk);
84 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
85 reason = ECONNREFUSED;
86 else if (sk->sk_state == BT_CONNECT &&
87 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
88 reason = ECONNREFUSED;
89 else
90 reason = ETIMEDOUT;
92 __l2cap_sock_close(sk, reason);
94 bh_unlock_sock(sk);
96 l2cap_sock_kill(sk);
97 sock_put(sk);
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
106 static void l2cap_sock_clear_timer(struct sock *sk)
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
118 break;
120 return s;
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
125 struct sock *s;
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
128 break;
130 return s;
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
137 struct sock *s;
138 read_lock(&l->lock);
139 s = __l2cap_get_chan_by_scid(l, cid);
140 if (s)
141 bh_lock_sock(s);
142 read_unlock(&l->lock);
143 return s;
146 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
148 struct sock *s;
149 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
150 if (l2cap_pi(s)->ident == ident)
151 break;
153 return s;
156 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
158 struct sock *s;
159 read_lock(&l->lock);
160 s = __l2cap_get_chan_by_ident(l, ident);
161 if (s)
162 bh_lock_sock(s);
163 read_unlock(&l->lock);
164 return s;
167 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
169 u16 cid = L2CAP_CID_DYN_START;
171 for (; cid < L2CAP_CID_DYN_END; cid++) {
172 if (!__l2cap_get_chan_by_scid(l, cid))
173 return cid;
176 return 0;
179 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
181 sock_hold(sk);
183 if (l->head)
184 l2cap_pi(l->head)->prev_c = sk;
186 l2cap_pi(sk)->next_c = l->head;
187 l2cap_pi(sk)->prev_c = NULL;
188 l->head = sk;
191 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
193 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
195 write_lock_bh(&l->lock);
196 if (sk == l->head)
197 l->head = next;
199 if (next)
200 l2cap_pi(next)->prev_c = prev;
201 if (prev)
202 l2cap_pi(prev)->next_c = next;
203 write_unlock_bh(&l->lock);
205 __sock_put(sk);
208 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
210 struct l2cap_chan_list *l = &conn->chan_list;
212 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
213 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
215 conn->disc_reason = 0x13;
217 l2cap_pi(sk)->conn = conn;
219 if (sk->sk_type == SOCK_SEQPACKET) {
220 /* Alloc CID for connection-oriented socket */
221 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
222 } else if (sk->sk_type == SOCK_DGRAM) {
223 /* Connectionless socket */
224 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 } else {
228 /* Raw socket can send/recv signalling messages only */
229 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 __l2cap_chan_link(l, sk);
236 if (parent)
237 bt_accept_enqueue(parent, sk);
240 /* Delete channel.
241 * Must be called on the locked socket. */
242 static void l2cap_chan_del(struct sock *sk, int err)
244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
245 struct sock *parent = bt_sk(sk)->parent;
247 l2cap_sock_clear_timer(sk);
249 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
251 if (conn) {
252 /* Unlink from channel list */
253 l2cap_chan_unlink(&conn->chan_list, sk);
254 l2cap_pi(sk)->conn = NULL;
255 hci_conn_put(conn->hcon);
258 sk->sk_state = BT_CLOSED;
259 sock_set_flag(sk, SOCK_ZAPPED);
261 if (err)
262 sk->sk_err = err;
264 if (parent) {
265 bt_accept_unlink(sk);
266 parent->sk_data_ready(parent, 0);
267 } else
268 sk->sk_state_change(sk);
271 /* Service level security */
272 static inline int l2cap_check_security(struct sock *sk)
274 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
275 __u8 auth_type;
277 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
278 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
279 auth_type = HCI_AT_NO_BONDING_MITM;
280 else
281 auth_type = HCI_AT_NO_BONDING;
283 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
284 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285 } else {
286 switch (l2cap_pi(sk)->sec_level) {
287 case BT_SECURITY_HIGH:
288 auth_type = HCI_AT_GENERAL_BONDING_MITM;
289 break;
290 case BT_SECURITY_MEDIUM:
291 auth_type = HCI_AT_GENERAL_BONDING;
292 break;
293 default:
294 auth_type = HCI_AT_NO_BONDING;
295 break;
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
300 auth_type);
303 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
305 u8 id;
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
313 spin_lock_bh(&conn->lock);
315 if (++conn->tx_ident > 128)
316 conn->tx_ident = 1;
318 id = conn->tx_ident;
320 spin_unlock_bh(&conn->lock);
322 return id;
325 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 BT_DBG("code 0x%2.2x", code);
331 if (!skb)
332 return -ENOMEM;
334 return hci_send_acl(conn->hcon, skb, 0);
337 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
339 struct sk_buff *skb;
340 struct l2cap_hdr *lh;
341 struct l2cap_conn *conn = pi->conn;
342 int count, hlen = L2CAP_HDR_SIZE + 2;
344 if (pi->fcs == L2CAP_FCS_CRC16)
345 hlen += 2;
347 BT_DBG("pi %p, control 0x%2.2x", pi, control);
349 count = min_t(unsigned int, conn->mtu, hlen);
350 control |= L2CAP_CTRL_FRAME_TYPE;
352 skb = bt_skb_alloc(count, GFP_ATOMIC);
353 if (!skb)
354 return -ENOMEM;
356 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
357 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
358 lh->cid = cpu_to_le16(pi->dcid);
359 put_unaligned_le16(control, skb_put(skb, 2));
361 if (pi->fcs == L2CAP_FCS_CRC16) {
362 u16 fcs = crc16(0, (u8 *)lh, count - 2);
363 put_unaligned_le16(fcs, skb_put(skb, 2));
366 return hci_send_acl(pi->conn->hcon, skb, 0);
369 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
371 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
372 control |= L2CAP_SUPER_RCV_NOT_READY;
373 else
374 control |= L2CAP_SUPER_RCV_READY;
376 return l2cap_send_sframe(pi, control);
379 static void l2cap_do_start(struct sock *sk)
381 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
383 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
384 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
385 return;
387 if (l2cap_check_security(sk)) {
388 struct l2cap_conn_req req;
389 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
390 req.psm = l2cap_pi(sk)->psm;
392 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
394 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
395 L2CAP_CONN_REQ, sizeof(req), &req);
397 } else {
398 struct l2cap_info_req req;
399 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
402 conn->info_ident = l2cap_get_ident(conn);
404 mod_timer(&conn->info_timer, jiffies +
405 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
407 l2cap_send_cmd(conn, conn->info_ident,
408 L2CAP_INFO_REQ, sizeof(req), &req);
412 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
414 struct l2cap_disconn_req req;
416 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
417 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
418 l2cap_send_cmd(conn, l2cap_get_ident(conn),
419 L2CAP_DISCONN_REQ, sizeof(req), &req);
422 /* ---- L2CAP connections ---- */
423 static void l2cap_conn_start(struct l2cap_conn *conn)
425 struct l2cap_chan_list *l = &conn->chan_list;
426 struct sock *sk;
428 BT_DBG("conn %p", conn);
430 read_lock(&l->lock);
432 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
433 bh_lock_sock(sk);
435 if (sk->sk_type != SOCK_SEQPACKET) {
436 bh_unlock_sock(sk);
437 continue;
440 if (sk->sk_state == BT_CONNECT) {
441 if (l2cap_check_security(sk)) {
442 struct l2cap_conn_req req;
443 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
444 req.psm = l2cap_pi(sk)->psm;
446 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
448 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
449 L2CAP_CONN_REQ, sizeof(req), &req);
451 } else if (sk->sk_state == BT_CONNECT2) {
452 struct l2cap_conn_rsp rsp;
453 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
454 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
456 if (l2cap_check_security(sk)) {
457 if (bt_sk(sk)->defer_setup) {
458 struct sock *parent = bt_sk(sk)->parent;
459 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
460 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
461 parent->sk_data_ready(parent, 0);
463 } else {
464 sk->sk_state = BT_CONFIG;
465 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
466 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
468 } else {
469 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
470 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
473 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
474 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
477 bh_unlock_sock(sk);
480 read_unlock(&l->lock);
483 static void l2cap_conn_ready(struct l2cap_conn *conn)
485 struct l2cap_chan_list *l = &conn->chan_list;
486 struct sock *sk;
488 BT_DBG("conn %p", conn);
490 read_lock(&l->lock);
492 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
493 bh_lock_sock(sk);
495 if (sk->sk_type != SOCK_SEQPACKET) {
496 l2cap_sock_clear_timer(sk);
497 sk->sk_state = BT_CONNECTED;
498 sk->sk_state_change(sk);
499 } else if (sk->sk_state == BT_CONNECT)
500 l2cap_do_start(sk);
502 bh_unlock_sock(sk);
505 read_unlock(&l->lock);
508 /* Notify sockets that we cannot guaranty reliability anymore */
509 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
511 struct l2cap_chan_list *l = &conn->chan_list;
512 struct sock *sk;
514 BT_DBG("conn %p", conn);
516 read_lock(&l->lock);
518 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
519 if (l2cap_pi(sk)->force_reliable)
520 sk->sk_err = err;
523 read_unlock(&l->lock);
526 static void l2cap_info_timeout(unsigned long arg)
528 struct l2cap_conn *conn = (void *) arg;
530 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
531 conn->info_ident = 0;
533 l2cap_conn_start(conn);
536 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
538 struct l2cap_conn *conn = hcon->l2cap_data;
540 if (conn || status)
541 return conn;
543 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
544 if (!conn)
545 return NULL;
547 hcon->l2cap_data = conn;
548 conn->hcon = hcon;
550 BT_DBG("hcon %p conn %p", hcon, conn);
552 conn->mtu = hcon->hdev->acl_mtu;
553 conn->src = &hcon->hdev->bdaddr;
554 conn->dst = &hcon->dst;
556 conn->feat_mask = 0;
558 setup_timer(&conn->info_timer, l2cap_info_timeout,
559 (unsigned long) conn);
561 spin_lock_init(&conn->lock);
562 rwlock_init(&conn->chan_list.lock);
564 conn->disc_reason = 0x13;
566 return conn;
569 static void l2cap_conn_del(struct hci_conn *hcon, int err)
571 struct l2cap_conn *conn = hcon->l2cap_data;
572 struct sock *sk;
574 if (!conn)
575 return;
577 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
579 kfree_skb(conn->rx_skb);
581 /* Kill channels */
582 while ((sk = conn->chan_list.head)) {
583 bh_lock_sock(sk);
584 l2cap_chan_del(sk, err);
585 bh_unlock_sock(sk);
586 l2cap_sock_kill(sk);
589 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
590 del_timer_sync(&conn->info_timer);
592 hcon->l2cap_data = NULL;
593 kfree(conn);
596 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
598 struct l2cap_chan_list *l = &conn->chan_list;
599 write_lock_bh(&l->lock);
600 __l2cap_chan_add(conn, sk, parent);
601 write_unlock_bh(&l->lock);
604 /* ---- Socket interface ---- */
605 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
607 struct sock *sk;
608 struct hlist_node *node;
609 sk_for_each(sk, node, &l2cap_sk_list.head)
610 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
611 goto found;
612 sk = NULL;
613 found:
614 return sk;
617 /* Find socket with psm and source bdaddr.
618 * Returns closest match.
620 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
622 struct sock *sk = NULL, *sk1 = NULL;
623 struct hlist_node *node;
625 sk_for_each(sk, node, &l2cap_sk_list.head) {
626 if (state && sk->sk_state != state)
627 continue;
629 if (l2cap_pi(sk)->psm == psm) {
630 /* Exact match. */
631 if (!bacmp(&bt_sk(sk)->src, src))
632 break;
634 /* Closest match */
635 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
636 sk1 = sk;
639 return node ? sk : sk1;
642 /* Find socket with given address (psm, src).
643 * Returns locked socket */
644 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
646 struct sock *s;
647 read_lock(&l2cap_sk_list.lock);
648 s = __l2cap_get_sock_by_psm(state, psm, src);
649 if (s)
650 bh_lock_sock(s);
651 read_unlock(&l2cap_sk_list.lock);
652 return s;
655 static void l2cap_sock_destruct(struct sock *sk)
657 BT_DBG("sk %p", sk);
659 skb_queue_purge(&sk->sk_receive_queue);
660 skb_queue_purge(&sk->sk_write_queue);
663 static void l2cap_sock_cleanup_listen(struct sock *parent)
665 struct sock *sk;
667 BT_DBG("parent %p", parent);
669 /* Close not yet accepted channels */
670 while ((sk = bt_accept_dequeue(parent, NULL)))
671 l2cap_sock_close(sk);
673 parent->sk_state = BT_CLOSED;
674 sock_set_flag(parent, SOCK_ZAPPED);
677 /* Kill socket (only if zapped and orphan)
678 * Must be called on unlocked socket.
680 static void l2cap_sock_kill(struct sock *sk)
682 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
683 return;
685 BT_DBG("sk %p state %d", sk, sk->sk_state);
687 /* Kill poor orphan */
688 bt_sock_unlink(&l2cap_sk_list, sk);
689 sock_set_flag(sk, SOCK_DEAD);
690 sock_put(sk);
693 static void __l2cap_sock_close(struct sock *sk, int reason)
695 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
697 switch (sk->sk_state) {
698 case BT_LISTEN:
699 l2cap_sock_cleanup_listen(sk);
700 break;
702 case BT_CONNECTED:
703 case BT_CONFIG:
704 if (sk->sk_type == SOCK_SEQPACKET) {
705 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
707 sk->sk_state = BT_DISCONN;
708 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
709 l2cap_send_disconn_req(conn, sk);
710 } else
711 l2cap_chan_del(sk, reason);
712 break;
714 case BT_CONNECT2:
715 if (sk->sk_type == SOCK_SEQPACKET) {
716 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
717 struct l2cap_conn_rsp rsp;
718 __u16 result;
720 if (bt_sk(sk)->defer_setup)
721 result = L2CAP_CR_SEC_BLOCK;
722 else
723 result = L2CAP_CR_BAD_PSM;
725 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
726 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
727 rsp.result = cpu_to_le16(result);
728 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
729 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
730 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
731 } else
732 l2cap_chan_del(sk, reason);
733 break;
735 case BT_CONNECT:
736 case BT_DISCONN:
737 l2cap_chan_del(sk, reason);
738 break;
740 default:
741 sock_set_flag(sk, SOCK_ZAPPED);
742 break;
746 /* Must be called on unlocked socket. */
747 static void l2cap_sock_close(struct sock *sk)
749 l2cap_sock_clear_timer(sk);
750 lock_sock(sk);
751 __l2cap_sock_close(sk, ECONNRESET);
752 release_sock(sk);
753 l2cap_sock_kill(sk);
756 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
758 struct l2cap_pinfo *pi = l2cap_pi(sk);
760 BT_DBG("sk %p", sk);
762 if (parent) {
763 sk->sk_type = parent->sk_type;
764 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
766 pi->imtu = l2cap_pi(parent)->imtu;
767 pi->omtu = l2cap_pi(parent)->omtu;
768 pi->mode = l2cap_pi(parent)->mode;
769 pi->fcs = l2cap_pi(parent)->fcs;
770 pi->sec_level = l2cap_pi(parent)->sec_level;
771 pi->role_switch = l2cap_pi(parent)->role_switch;
772 pi->force_reliable = l2cap_pi(parent)->force_reliable;
773 } else {
774 pi->imtu = L2CAP_DEFAULT_MTU;
775 pi->omtu = 0;
776 pi->mode = L2CAP_MODE_BASIC;
777 pi->fcs = L2CAP_FCS_CRC16;
778 pi->sec_level = BT_SECURITY_LOW;
779 pi->role_switch = 0;
780 pi->force_reliable = 0;
783 /* Default config options */
784 pi->conf_len = 0;
785 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
788 static struct proto l2cap_proto = {
789 .name = "L2CAP",
790 .owner = THIS_MODULE,
791 .obj_size = sizeof(struct l2cap_pinfo)
794 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
796 struct sock *sk;
798 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
799 if (!sk)
800 return NULL;
802 sock_init_data(sock, sk);
803 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
805 sk->sk_destruct = l2cap_sock_destruct;
806 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
808 sock_reset_flag(sk, SOCK_ZAPPED);
810 sk->sk_protocol = proto;
811 sk->sk_state = BT_OPEN;
813 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
815 bt_sock_link(&l2cap_sk_list, sk);
816 return sk;
819 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
821 struct sock *sk;
823 BT_DBG("sock %p", sock);
825 sock->state = SS_UNCONNECTED;
827 if (sock->type != SOCK_SEQPACKET &&
828 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
829 return -ESOCKTNOSUPPORT;
831 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
832 return -EPERM;
834 sock->ops = &l2cap_sock_ops;
836 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
837 if (!sk)
838 return -ENOMEM;
840 l2cap_sock_init(sk, NULL);
841 return 0;
844 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
846 struct sock *sk = sock->sk;
847 struct sockaddr_l2 la;
848 int len, err = 0;
850 BT_DBG("sk %p", sk);
852 if (!addr || addr->sa_family != AF_BLUETOOTH)
853 return -EINVAL;
855 memset(&la, 0, sizeof(la));
856 len = min_t(unsigned int, sizeof(la), alen);
857 memcpy(&la, addr, len);
859 if (la.l2_cid)
860 return -EINVAL;
862 lock_sock(sk);
864 if (sk->sk_state != BT_OPEN) {
865 err = -EBADFD;
866 goto done;
869 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
870 !capable(CAP_NET_BIND_SERVICE)) {
871 err = -EACCES;
872 goto done;
875 write_lock_bh(&l2cap_sk_list.lock);
877 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
878 err = -EADDRINUSE;
879 } else {
880 /* Save source address */
881 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
882 l2cap_pi(sk)->psm = la.l2_psm;
883 l2cap_pi(sk)->sport = la.l2_psm;
884 sk->sk_state = BT_BOUND;
886 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
887 __le16_to_cpu(la.l2_psm) == 0x0003)
888 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
891 write_unlock_bh(&l2cap_sk_list.lock);
893 done:
894 release_sock(sk);
895 return err;
898 static int l2cap_do_connect(struct sock *sk)
900 bdaddr_t *src = &bt_sk(sk)->src;
901 bdaddr_t *dst = &bt_sk(sk)->dst;
902 struct l2cap_conn *conn;
903 struct hci_conn *hcon;
904 struct hci_dev *hdev;
905 __u8 auth_type;
906 int err;
908 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
909 l2cap_pi(sk)->psm);
911 hdev = hci_get_route(dst, src);
912 if (!hdev)
913 return -EHOSTUNREACH;
915 hci_dev_lock_bh(hdev);
917 err = -ENOMEM;
919 if (sk->sk_type == SOCK_RAW) {
920 switch (l2cap_pi(sk)->sec_level) {
921 case BT_SECURITY_HIGH:
922 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
923 break;
924 case BT_SECURITY_MEDIUM:
925 auth_type = HCI_AT_DEDICATED_BONDING;
926 break;
927 default:
928 auth_type = HCI_AT_NO_BONDING;
929 break;
931 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
932 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
933 auth_type = HCI_AT_NO_BONDING_MITM;
934 else
935 auth_type = HCI_AT_NO_BONDING;
937 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
938 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
939 } else {
940 switch (l2cap_pi(sk)->sec_level) {
941 case BT_SECURITY_HIGH:
942 auth_type = HCI_AT_GENERAL_BONDING_MITM;
943 break;
944 case BT_SECURITY_MEDIUM:
945 auth_type = HCI_AT_GENERAL_BONDING;
946 break;
947 default:
948 auth_type = HCI_AT_NO_BONDING;
949 break;
953 hcon = hci_connect(hdev, ACL_LINK, dst,
954 l2cap_pi(sk)->sec_level, auth_type);
955 if (!hcon)
956 goto done;
958 conn = l2cap_conn_add(hcon, 0);
959 if (!conn) {
960 hci_conn_put(hcon);
961 goto done;
964 err = 0;
966 /* Update source addr of the socket */
967 bacpy(src, conn->src);
969 l2cap_chan_add(conn, sk, NULL);
971 sk->sk_state = BT_CONNECT;
972 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
974 if (hcon->state == BT_CONNECTED) {
975 if (sk->sk_type != SOCK_SEQPACKET) {
976 l2cap_sock_clear_timer(sk);
977 sk->sk_state = BT_CONNECTED;
978 } else
979 l2cap_do_start(sk);
982 done:
983 hci_dev_unlock_bh(hdev);
984 hci_dev_put(hdev);
985 return err;
988 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
990 struct sock *sk = sock->sk;
991 struct sockaddr_l2 la;
992 int len, err = 0;
994 BT_DBG("sk %p", sk);
996 if (!addr || addr->sa_family != AF_BLUETOOTH)
997 return -EINVAL;
999 memset(&la, 0, sizeof(la));
1000 len = min_t(unsigned int, sizeof(la), alen);
1001 memcpy(&la, addr, len);
1003 if (la.l2_cid)
1004 return -EINVAL;
1006 lock_sock(sk);
1008 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1009 err = -EINVAL;
1010 goto done;
1013 switch (l2cap_pi(sk)->mode) {
1014 case L2CAP_MODE_BASIC:
1015 break;
1016 case L2CAP_MODE_ERTM:
1017 case L2CAP_MODE_STREAMING:
1018 if (enable_ertm)
1019 break;
1020 /* fall through */
1021 default:
1022 err = -ENOTSUPP;
1023 goto done;
1026 switch (sk->sk_state) {
1027 case BT_CONNECT:
1028 case BT_CONNECT2:
1029 case BT_CONFIG:
1030 /* Already connecting */
1031 goto wait;
1033 case BT_CONNECTED:
1034 /* Already connected */
1035 goto done;
1037 case BT_OPEN:
1038 case BT_BOUND:
1039 /* Can connect */
1040 break;
1042 default:
1043 err = -EBADFD;
1044 goto done;
1047 /* Set destination address and psm */
1048 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1049 l2cap_pi(sk)->psm = la.l2_psm;
1051 err = l2cap_do_connect(sk);
1052 if (err)
1053 goto done;
1055 wait:
1056 err = bt_sock_wait_state(sk, BT_CONNECTED,
1057 sock_sndtimeo(sk, flags & O_NONBLOCK));
1058 done:
1059 release_sock(sk);
1060 return err;
1063 static int l2cap_sock_listen(struct socket *sock, int backlog)
1065 struct sock *sk = sock->sk;
1066 int err = 0;
1068 BT_DBG("sk %p backlog %d", sk, backlog);
1070 lock_sock(sk);
1072 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1073 err = -EBADFD;
1074 goto done;
1077 switch (l2cap_pi(sk)->mode) {
1078 case L2CAP_MODE_BASIC:
1079 break;
1080 case L2CAP_MODE_ERTM:
1081 case L2CAP_MODE_STREAMING:
1082 if (enable_ertm)
1083 break;
1084 /* fall through */
1085 default:
1086 err = -ENOTSUPP;
1087 goto done;
1090 if (!l2cap_pi(sk)->psm) {
1091 bdaddr_t *src = &bt_sk(sk)->src;
1092 u16 psm;
1094 err = -EINVAL;
1096 write_lock_bh(&l2cap_sk_list.lock);
1098 for (psm = 0x1001; psm < 0x1100; psm += 2)
1099 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1100 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1101 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1102 err = 0;
1103 break;
1106 write_unlock_bh(&l2cap_sk_list.lock);
1108 if (err < 0)
1109 goto done;
1112 sk->sk_max_ack_backlog = backlog;
1113 sk->sk_ack_backlog = 0;
1114 sk->sk_state = BT_LISTEN;
1116 done:
1117 release_sock(sk);
1118 return err;
1121 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1123 DECLARE_WAITQUEUE(wait, current);
1124 struct sock *sk = sock->sk, *nsk;
1125 long timeo;
1126 int err = 0;
1128 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1130 if (sk->sk_state != BT_LISTEN) {
1131 err = -EBADFD;
1132 goto done;
1135 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1137 BT_DBG("sk %p timeo %ld", sk, timeo);
1139 /* Wait for an incoming connection. (wake-one). */
1140 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1141 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1142 set_current_state(TASK_INTERRUPTIBLE);
1143 if (!timeo) {
1144 err = -EAGAIN;
1145 break;
1148 release_sock(sk);
1149 timeo = schedule_timeout(timeo);
1150 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1152 if (sk->sk_state != BT_LISTEN) {
1153 err = -EBADFD;
1154 break;
1157 if (signal_pending(current)) {
1158 err = sock_intr_errno(timeo);
1159 break;
1162 set_current_state(TASK_RUNNING);
1163 remove_wait_queue(sk->sk_sleep, &wait);
1165 if (err)
1166 goto done;
1168 newsock->state = SS_CONNECTED;
1170 BT_DBG("new socket %p", nsk);
1172 done:
1173 release_sock(sk);
1174 return err;
1177 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1179 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1180 struct sock *sk = sock->sk;
1182 BT_DBG("sock %p, sk %p", sock, sk);
1184 addr->sa_family = AF_BLUETOOTH;
1185 *len = sizeof(struct sockaddr_l2);
1187 if (peer) {
1188 la->l2_psm = l2cap_pi(sk)->psm;
1189 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1190 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1191 } else {
1192 la->l2_psm = l2cap_pi(sk)->sport;
1193 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1194 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1197 return 0;
1200 static void l2cap_monitor_timeout(unsigned long arg)
1202 struct sock *sk = (void *) arg;
1203 u16 control;
1205 bh_lock_sock(sk);
1206 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1207 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1208 return;
1211 l2cap_pi(sk)->retry_count++;
1212 __mod_monitor_timer();
1214 control = L2CAP_CTRL_POLL;
1215 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1216 bh_unlock_sock(sk);
1219 static void l2cap_retrans_timeout(unsigned long arg)
1221 struct sock *sk = (void *) arg;
1222 u16 control;
1224 bh_lock_sock(sk);
1225 l2cap_pi(sk)->retry_count = 1;
1226 __mod_monitor_timer();
1228 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1230 control = L2CAP_CTRL_POLL;
1231 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1232 bh_unlock_sock(sk);
1235 static void l2cap_drop_acked_frames(struct sock *sk)
1237 struct sk_buff *skb;
1239 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1240 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1241 break;
1243 skb = skb_dequeue(TX_QUEUE(sk));
1244 kfree_skb(skb);
1246 l2cap_pi(sk)->unacked_frames--;
1249 if (!l2cap_pi(sk)->unacked_frames)
1250 del_timer(&l2cap_pi(sk)->retrans_timer);
1252 return;
1255 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1257 struct l2cap_pinfo *pi = l2cap_pi(sk);
1258 int err;
1260 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1262 err = hci_send_acl(pi->conn->hcon, skb, 0);
1263 if (err < 0)
1264 kfree_skb(skb);
1266 return err;
1269 static int l2cap_streaming_send(struct sock *sk)
1271 struct sk_buff *skb, *tx_skb;
1272 struct l2cap_pinfo *pi = l2cap_pi(sk);
1273 u16 control, fcs;
1274 int err;
1276 while ((skb = sk->sk_send_head)) {
1277 tx_skb = skb_clone(skb, GFP_ATOMIC);
1279 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1280 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1281 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1283 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1284 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1285 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1288 err = l2cap_do_send(sk, tx_skb);
1289 if (err < 0) {
1290 l2cap_send_disconn_req(pi->conn, sk);
1291 return err;
1294 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1296 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1297 sk->sk_send_head = NULL;
1298 else
1299 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1301 skb = skb_dequeue(TX_QUEUE(sk));
1302 kfree_skb(skb);
1304 return 0;
1307 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1309 struct l2cap_pinfo *pi = l2cap_pi(sk);
1310 struct sk_buff *skb, *tx_skb;
1311 u16 control, fcs;
1312 int err;
1314 skb = skb_peek(TX_QUEUE(sk));
1315 do {
1316 if (bt_cb(skb)->tx_seq != tx_seq) {
1317 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1318 break;
1319 skb = skb_queue_next(TX_QUEUE(sk), skb);
1320 continue;
1323 if (pi->remote_max_tx &&
1324 bt_cb(skb)->retries == pi->remote_max_tx) {
1325 l2cap_send_disconn_req(pi->conn, sk);
1326 break;
1329 tx_skb = skb_clone(skb, GFP_ATOMIC);
1330 bt_cb(skb)->retries++;
1331 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1332 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1333 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1334 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1336 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1337 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1338 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1341 err = l2cap_do_send(sk, tx_skb);
1342 if (err < 0) {
1343 l2cap_send_disconn_req(pi->conn, sk);
1344 return err;
1346 break;
1347 } while(1);
1348 return 0;
1351 static int l2cap_ertm_send(struct sock *sk)
1353 struct sk_buff *skb, *tx_skb;
1354 struct l2cap_pinfo *pi = l2cap_pi(sk);
1355 u16 control, fcs;
1356 int err;
1358 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1359 return 0;
1361 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))
1362 && !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1363 tx_skb = skb_clone(skb, GFP_ATOMIC);
1365 if (pi->remote_max_tx &&
1366 bt_cb(skb)->retries == pi->remote_max_tx) {
1367 l2cap_send_disconn_req(pi->conn, sk);
1368 break;
1371 bt_cb(skb)->retries++;
1373 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1374 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1375 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1376 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1379 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1380 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1381 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1384 err = l2cap_do_send(sk, tx_skb);
1385 if (err < 0) {
1386 l2cap_send_disconn_req(pi->conn, sk);
1387 return err;
1389 __mod_retrans_timer();
1391 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1392 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1394 pi->unacked_frames++;
1396 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1397 sk->sk_send_head = NULL;
1398 else
1399 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1402 return 0;
1405 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1407 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1408 struct sk_buff **frag;
1409 int err, sent = 0;
1411 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1412 return -EFAULT;
1415 sent += count;
1416 len -= count;
1418 /* Continuation fragments (no L2CAP header) */
1419 frag = &skb_shinfo(skb)->frag_list;
1420 while (len) {
1421 count = min_t(unsigned int, conn->mtu, len);
1423 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1424 if (!*frag)
1425 return -EFAULT;
1426 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1427 return -EFAULT;
1429 sent += count;
1430 len -= count;
1432 frag = &(*frag)->next;
1435 return sent;
1438 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1440 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1441 struct sk_buff *skb;
1442 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1443 struct l2cap_hdr *lh;
1445 BT_DBG("sk %p len %d", sk, (int)len);
1447 count = min_t(unsigned int, (conn->mtu - hlen), len);
1448 skb = bt_skb_send_alloc(sk, count + hlen,
1449 msg->msg_flags & MSG_DONTWAIT, &err);
1450 if (!skb)
1451 return ERR_PTR(-ENOMEM);
1453 /* Create L2CAP header */
1454 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1455 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1456 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1457 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1459 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1460 if (unlikely(err < 0)) {
1461 kfree_skb(skb);
1462 return ERR_PTR(err);
1464 return skb;
1467 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1469 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1470 struct sk_buff *skb;
1471 int err, count, hlen = L2CAP_HDR_SIZE;
1472 struct l2cap_hdr *lh;
1474 BT_DBG("sk %p len %d", sk, (int)len);
1476 count = min_t(unsigned int, (conn->mtu - hlen), len);
1477 skb = bt_skb_send_alloc(sk, count + hlen,
1478 msg->msg_flags & MSG_DONTWAIT, &err);
1479 if (!skb)
1480 return ERR_PTR(-ENOMEM);
1482 /* Create L2CAP header */
1483 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1484 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1485 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1487 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1488 if (unlikely(err < 0)) {
1489 kfree_skb(skb);
1490 return ERR_PTR(err);
1492 return skb;
1495 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1497 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1498 struct sk_buff *skb;
1499 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1500 struct l2cap_hdr *lh;
1502 BT_DBG("sk %p len %d", sk, (int)len);
1504 if (sdulen)
1505 hlen += 2;
1507 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1508 hlen += 2;
1510 count = min_t(unsigned int, (conn->mtu - hlen), len);
1511 skb = bt_skb_send_alloc(sk, count + hlen,
1512 msg->msg_flags & MSG_DONTWAIT, &err);
1513 if (!skb)
1514 return ERR_PTR(-ENOMEM);
1516 /* Create L2CAP header */
1517 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1518 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1519 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1520 put_unaligned_le16(control, skb_put(skb, 2));
1521 if (sdulen)
1522 put_unaligned_le16(sdulen, skb_put(skb, 2));
1524 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1525 if (unlikely(err < 0)) {
1526 kfree_skb(skb);
1527 return ERR_PTR(err);
1530 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1531 put_unaligned_le16(0, skb_put(skb, 2));
1533 bt_cb(skb)->retries = 0;
1534 return skb;
1537 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1539 struct l2cap_pinfo *pi = l2cap_pi(sk);
1540 struct sk_buff *skb;
1541 struct sk_buff_head sar_queue;
1542 u16 control;
1543 size_t size = 0;
1545 __skb_queue_head_init(&sar_queue);
1546 control = L2CAP_SDU_START;
1547 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1548 if (IS_ERR(skb))
1549 return PTR_ERR(skb);
1551 __skb_queue_tail(&sar_queue, skb);
1552 len -= pi->max_pdu_size;
1553 size +=pi->max_pdu_size;
1554 control = 0;
1556 while (len > 0) {
1557 size_t buflen;
1559 if (len > pi->max_pdu_size) {
1560 control |= L2CAP_SDU_CONTINUE;
1561 buflen = pi->max_pdu_size;
1562 } else {
1563 control |= L2CAP_SDU_END;
1564 buflen = len;
1567 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1568 if (IS_ERR(skb)) {
1569 skb_queue_purge(&sar_queue);
1570 return PTR_ERR(skb);
1573 __skb_queue_tail(&sar_queue, skb);
1574 len -= buflen;
1575 size += buflen;
1576 control = 0;
1578 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1579 if (sk->sk_send_head == NULL)
1580 sk->sk_send_head = sar_queue.next;
1582 return size;
1585 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1587 struct sock *sk = sock->sk;
1588 struct l2cap_pinfo *pi = l2cap_pi(sk);
1589 struct sk_buff *skb;
1590 u16 control;
1591 int err;
1593 BT_DBG("sock %p, sk %p", sock, sk);
1595 err = sock_error(sk);
1596 if (err)
1597 return err;
1599 if (msg->msg_flags & MSG_OOB)
1600 return -EOPNOTSUPP;
1602 /* Check outgoing MTU */
1603 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1604 && len > pi->omtu)
1605 return -EINVAL;
1607 lock_sock(sk);
1609 if (sk->sk_state != BT_CONNECTED) {
1610 err = -ENOTCONN;
1611 goto done;
1614 /* Connectionless channel */
1615 if (sk->sk_type == SOCK_DGRAM) {
1616 skb = l2cap_create_connless_pdu(sk, msg, len);
1617 err = l2cap_do_send(sk, skb);
1618 goto done;
1621 switch (pi->mode) {
1622 case L2CAP_MODE_BASIC:
1623 /* Create a basic PDU */
1624 skb = l2cap_create_basic_pdu(sk, msg, len);
1625 if (IS_ERR(skb)) {
1626 err = PTR_ERR(skb);
1627 goto done;
1630 err = l2cap_do_send(sk, skb);
1631 if (!err)
1632 err = len;
1633 break;
1635 case L2CAP_MODE_ERTM:
1636 case L2CAP_MODE_STREAMING:
1637 /* Entire SDU fits into one PDU */
1638 if (len <= pi->max_pdu_size) {
1639 control = L2CAP_SDU_UNSEGMENTED;
1640 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1641 if (IS_ERR(skb)) {
1642 err = PTR_ERR(skb);
1643 goto done;
1645 __skb_queue_tail(TX_QUEUE(sk), skb);
1646 if (sk->sk_send_head == NULL)
1647 sk->sk_send_head = skb;
1648 } else {
1649 /* Segment SDU into multiples PDUs */
1650 err = l2cap_sar_segment_sdu(sk, msg, len);
1651 if (err < 0)
1652 goto done;
1655 if (pi->mode == L2CAP_MODE_STREAMING)
1656 err = l2cap_streaming_send(sk);
1657 else
1658 err = l2cap_ertm_send(sk);
1660 if (!err)
1661 err = len;
1662 break;
1664 default:
1665 BT_DBG("bad state %1.1x", pi->mode);
1666 err = -EINVAL;
1669 done:
1670 release_sock(sk);
1671 return err;
1674 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1676 struct sock *sk = sock->sk;
1678 lock_sock(sk);
1680 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1681 struct l2cap_conn_rsp rsp;
1683 sk->sk_state = BT_CONFIG;
1685 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1686 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1687 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1688 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1689 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1690 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1692 release_sock(sk);
1693 return 0;
1696 release_sock(sk);
1698 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1701 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1703 struct sock *sk = sock->sk;
1704 struct l2cap_options opts;
1705 int len, err = 0;
1706 u32 opt;
1708 BT_DBG("sk %p", sk);
1710 lock_sock(sk);
1712 switch (optname) {
1713 case L2CAP_OPTIONS:
1714 opts.imtu = l2cap_pi(sk)->imtu;
1715 opts.omtu = l2cap_pi(sk)->omtu;
1716 opts.flush_to = l2cap_pi(sk)->flush_to;
1717 opts.mode = l2cap_pi(sk)->mode;
1718 opts.fcs = l2cap_pi(sk)->fcs;
1720 len = min_t(unsigned int, sizeof(opts), optlen);
1721 if (copy_from_user((char *) &opts, optval, len)) {
1722 err = -EFAULT;
1723 break;
1726 l2cap_pi(sk)->imtu = opts.imtu;
1727 l2cap_pi(sk)->omtu = opts.omtu;
1728 l2cap_pi(sk)->mode = opts.mode;
1729 l2cap_pi(sk)->fcs = opts.fcs;
1730 break;
1732 case L2CAP_LM:
1733 if (get_user(opt, (u32 __user *) optval)) {
1734 err = -EFAULT;
1735 break;
1738 if (opt & L2CAP_LM_AUTH)
1739 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1740 if (opt & L2CAP_LM_ENCRYPT)
1741 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1742 if (opt & L2CAP_LM_SECURE)
1743 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1745 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1746 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1747 break;
1749 default:
1750 err = -ENOPROTOOPT;
1751 break;
1754 release_sock(sk);
1755 return err;
1758 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1760 struct sock *sk = sock->sk;
1761 struct bt_security sec;
1762 int len, err = 0;
1763 u32 opt;
1765 BT_DBG("sk %p", sk);
1767 if (level == SOL_L2CAP)
1768 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1770 if (level != SOL_BLUETOOTH)
1771 return -ENOPROTOOPT;
1773 lock_sock(sk);
1775 switch (optname) {
1776 case BT_SECURITY:
1777 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1778 err = -EINVAL;
1779 break;
1782 sec.level = BT_SECURITY_LOW;
1784 len = min_t(unsigned int, sizeof(sec), optlen);
1785 if (copy_from_user((char *) &sec, optval, len)) {
1786 err = -EFAULT;
1787 break;
1790 if (sec.level < BT_SECURITY_LOW ||
1791 sec.level > BT_SECURITY_HIGH) {
1792 err = -EINVAL;
1793 break;
1796 l2cap_pi(sk)->sec_level = sec.level;
1797 break;
1799 case BT_DEFER_SETUP:
1800 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1801 err = -EINVAL;
1802 break;
1805 if (get_user(opt, (u32 __user *) optval)) {
1806 err = -EFAULT;
1807 break;
1810 bt_sk(sk)->defer_setup = opt;
1811 break;
1813 default:
1814 err = -ENOPROTOOPT;
1815 break;
1818 release_sock(sk);
1819 return err;
1822 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1824 struct sock *sk = sock->sk;
1825 struct l2cap_options opts;
1826 struct l2cap_conninfo cinfo;
1827 int len, err = 0;
1828 u32 opt;
1830 BT_DBG("sk %p", sk);
1832 if (get_user(len, optlen))
1833 return -EFAULT;
1835 lock_sock(sk);
1837 switch (optname) {
1838 case L2CAP_OPTIONS:
1839 opts.imtu = l2cap_pi(sk)->imtu;
1840 opts.omtu = l2cap_pi(sk)->omtu;
1841 opts.flush_to = l2cap_pi(sk)->flush_to;
1842 opts.mode = l2cap_pi(sk)->mode;
1843 opts.fcs = l2cap_pi(sk)->fcs;
1845 len = min_t(unsigned int, len, sizeof(opts));
1846 if (copy_to_user(optval, (char *) &opts, len))
1847 err = -EFAULT;
1849 break;
1851 case L2CAP_LM:
1852 switch (l2cap_pi(sk)->sec_level) {
1853 case BT_SECURITY_LOW:
1854 opt = L2CAP_LM_AUTH;
1855 break;
1856 case BT_SECURITY_MEDIUM:
1857 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1858 break;
1859 case BT_SECURITY_HIGH:
1860 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1861 L2CAP_LM_SECURE;
1862 break;
1863 default:
1864 opt = 0;
1865 break;
1868 if (l2cap_pi(sk)->role_switch)
1869 opt |= L2CAP_LM_MASTER;
1871 if (l2cap_pi(sk)->force_reliable)
1872 opt |= L2CAP_LM_RELIABLE;
1874 if (put_user(opt, (u32 __user *) optval))
1875 err = -EFAULT;
1876 break;
1878 case L2CAP_CONNINFO:
1879 if (sk->sk_state != BT_CONNECTED &&
1880 !(sk->sk_state == BT_CONNECT2 &&
1881 bt_sk(sk)->defer_setup)) {
1882 err = -ENOTCONN;
1883 break;
1886 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1887 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1889 len = min_t(unsigned int, len, sizeof(cinfo));
1890 if (copy_to_user(optval, (char *) &cinfo, len))
1891 err = -EFAULT;
1893 break;
1895 default:
1896 err = -ENOPROTOOPT;
1897 break;
1900 release_sock(sk);
1901 return err;
1904 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1906 struct sock *sk = sock->sk;
1907 struct bt_security sec;
1908 int len, err = 0;
1910 BT_DBG("sk %p", sk);
1912 if (level == SOL_L2CAP)
1913 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1915 if (level != SOL_BLUETOOTH)
1916 return -ENOPROTOOPT;
1918 if (get_user(len, optlen))
1919 return -EFAULT;
1921 lock_sock(sk);
1923 switch (optname) {
1924 case BT_SECURITY:
1925 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1926 err = -EINVAL;
1927 break;
1930 sec.level = l2cap_pi(sk)->sec_level;
1932 len = min_t(unsigned int, len, sizeof(sec));
1933 if (copy_to_user(optval, (char *) &sec, len))
1934 err = -EFAULT;
1936 break;
1938 case BT_DEFER_SETUP:
1939 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1940 err = -EINVAL;
1941 break;
1944 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1945 err = -EFAULT;
1947 break;
1949 default:
1950 err = -ENOPROTOOPT;
1951 break;
1954 release_sock(sk);
1955 return err;
1958 static int l2cap_sock_shutdown(struct socket *sock, int how)
1960 struct sock *sk = sock->sk;
1961 int err = 0;
1963 BT_DBG("sock %p, sk %p", sock, sk);
1965 if (!sk)
1966 return 0;
1968 lock_sock(sk);
1969 if (!sk->sk_shutdown) {
1970 sk->sk_shutdown = SHUTDOWN_MASK;
1971 l2cap_sock_clear_timer(sk);
1972 __l2cap_sock_close(sk, 0);
1974 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1975 err = bt_sock_wait_state(sk, BT_CLOSED,
1976 sk->sk_lingertime);
1978 release_sock(sk);
1979 return err;
1982 static int l2cap_sock_release(struct socket *sock)
1984 struct sock *sk = sock->sk;
1985 int err;
1987 BT_DBG("sock %p, sk %p", sock, sk);
1989 if (!sk)
1990 return 0;
1992 err = l2cap_sock_shutdown(sock, 2);
1994 sock_orphan(sk);
1995 l2cap_sock_kill(sk);
1996 return err;
1999 static void l2cap_chan_ready(struct sock *sk)
2001 struct sock *parent = bt_sk(sk)->parent;
2003 BT_DBG("sk %p, parent %p", sk, parent);
2005 l2cap_pi(sk)->conf_state = 0;
2006 l2cap_sock_clear_timer(sk);
2008 if (!parent) {
2009 /* Outgoing channel.
2010 * Wake up socket sleeping on connect.
2012 sk->sk_state = BT_CONNECTED;
2013 sk->sk_state_change(sk);
2014 } else {
2015 /* Incoming channel.
2016 * Wake up socket sleeping on accept.
2018 parent->sk_data_ready(parent, 0);
2022 /* Copy frame to all raw sockets on that connection */
2023 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2025 struct l2cap_chan_list *l = &conn->chan_list;
2026 struct sk_buff *nskb;
2027 struct sock *sk;
2029 BT_DBG("conn %p", conn);
2031 read_lock(&l->lock);
2032 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2033 if (sk->sk_type != SOCK_RAW)
2034 continue;
2036 /* Don't send frame to the socket it came from */
2037 if (skb->sk == sk)
2038 continue;
2039 nskb = skb_clone(skb, GFP_ATOMIC);
2040 if (!nskb)
2041 continue;
2043 if (sock_queue_rcv_skb(sk, nskb))
2044 kfree_skb(nskb);
2046 read_unlock(&l->lock);
2049 /* ---- L2CAP signalling commands ---- */
2050 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2051 u8 code, u8 ident, u16 dlen, void *data)
2053 struct sk_buff *skb, **frag;
2054 struct l2cap_cmd_hdr *cmd;
2055 struct l2cap_hdr *lh;
2056 int len, count;
2058 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2059 conn, code, ident, dlen);
2061 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2062 count = min_t(unsigned int, conn->mtu, len);
2064 skb = bt_skb_alloc(count, GFP_ATOMIC);
2065 if (!skb)
2066 return NULL;
2068 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2069 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2070 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2072 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2073 cmd->code = code;
2074 cmd->ident = ident;
2075 cmd->len = cpu_to_le16(dlen);
2077 if (dlen) {
2078 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2079 memcpy(skb_put(skb, count), data, count);
2080 data += count;
2083 len -= skb->len;
2085 /* Continuation fragments (no L2CAP header) */
2086 frag = &skb_shinfo(skb)->frag_list;
2087 while (len) {
2088 count = min_t(unsigned int, conn->mtu, len);
2090 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2091 if (!*frag)
2092 goto fail;
2094 memcpy(skb_put(*frag, count), data, count);
2096 len -= count;
2097 data += count;
2099 frag = &(*frag)->next;
2102 return skb;
2104 fail:
2105 kfree_skb(skb);
2106 return NULL;
2109 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2111 struct l2cap_conf_opt *opt = *ptr;
2112 int len;
2114 len = L2CAP_CONF_OPT_SIZE + opt->len;
2115 *ptr += len;
2117 *type = opt->type;
2118 *olen = opt->len;
2120 switch (opt->len) {
2121 case 1:
2122 *val = *((u8 *) opt->val);
2123 break;
2125 case 2:
2126 *val = __le16_to_cpu(*((__le16 *) opt->val));
2127 break;
2129 case 4:
2130 *val = __le32_to_cpu(*((__le32 *) opt->val));
2131 break;
2133 default:
2134 *val = (unsigned long) opt->val;
2135 break;
2138 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2139 return len;
2142 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2144 struct l2cap_conf_opt *opt = *ptr;
2146 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2148 opt->type = type;
2149 opt->len = len;
2151 switch (len) {
2152 case 1:
2153 *((u8 *) opt->val) = val;
2154 break;
2156 case 2:
2157 *((__le16 *) opt->val) = cpu_to_le16(val);
2158 break;
2160 case 4:
2161 *((__le32 *) opt->val) = cpu_to_le32(val);
2162 break;
2164 default:
2165 memcpy(opt->val, (void *) val, len);
2166 break;
2169 *ptr += L2CAP_CONF_OPT_SIZE + len;
2172 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2174 u32 local_feat_mask = l2cap_feat_mask;
2175 if (enable_ertm)
2176 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2178 switch (mode) {
2179 case L2CAP_MODE_ERTM:
2180 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2181 case L2CAP_MODE_STREAMING:
2182 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2183 default:
2184 return 0x00;
2188 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2190 switch (mode) {
2191 case L2CAP_MODE_STREAMING:
2192 case L2CAP_MODE_ERTM:
2193 if (l2cap_mode_supported(mode, remote_feat_mask))
2194 return mode;
2195 /* fall through */
2196 default:
2197 return L2CAP_MODE_BASIC;
2201 static int l2cap_build_conf_req(struct sock *sk, void *data)
2203 struct l2cap_pinfo *pi = l2cap_pi(sk);
2204 struct l2cap_conf_req *req = data;
2205 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
2206 void *ptr = req->data;
2208 BT_DBG("sk %p", sk);
2210 if (pi->num_conf_req || pi->num_conf_rsp)
2211 goto done;
2213 switch (pi->mode) {
2214 case L2CAP_MODE_STREAMING:
2215 case L2CAP_MODE_ERTM:
2216 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2217 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2218 l2cap_send_disconn_req(pi->conn, sk);
2219 break;
2220 default:
2221 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2222 break;
2225 done:
2226 switch (pi->mode) {
2227 case L2CAP_MODE_BASIC:
2228 if (pi->imtu != L2CAP_DEFAULT_MTU)
2229 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2230 break;
2232 case L2CAP_MODE_ERTM:
2233 rfc.mode = L2CAP_MODE_ERTM;
2234 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2235 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2236 rfc.retrans_timeout = 0;
2237 rfc.monitor_timeout = 0;
2238 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2241 sizeof(rfc), (unsigned long) &rfc);
2243 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2244 break;
2246 if (pi->fcs == L2CAP_FCS_NONE ||
2247 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2248 pi->fcs = L2CAP_FCS_NONE;
2249 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2251 break;
2253 case L2CAP_MODE_STREAMING:
2254 rfc.mode = L2CAP_MODE_STREAMING;
2255 rfc.txwin_size = 0;
2256 rfc.max_transmit = 0;
2257 rfc.retrans_timeout = 0;
2258 rfc.monitor_timeout = 0;
2259 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2262 sizeof(rfc), (unsigned long) &rfc);
2264 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2265 break;
2267 if (pi->fcs == L2CAP_FCS_NONE ||
2268 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2269 pi->fcs = L2CAP_FCS_NONE;
2270 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2272 break;
2275 /* FIXME: Need actual value of the flush timeout */
2276 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2277 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2279 req->dcid = cpu_to_le16(pi->dcid);
2280 req->flags = cpu_to_le16(0);
2282 return ptr - data;
2285 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2287 struct l2cap_pinfo *pi = l2cap_pi(sk);
2288 struct l2cap_conf_rsp *rsp = data;
2289 void *ptr = rsp->data;
2290 void *req = pi->conf_req;
2291 int len = pi->conf_len;
2292 int type, hint, olen;
2293 unsigned long val;
2294 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2295 u16 mtu = L2CAP_DEFAULT_MTU;
2296 u16 result = L2CAP_CONF_SUCCESS;
2298 BT_DBG("sk %p", sk);
2300 while (len >= L2CAP_CONF_OPT_SIZE) {
2301 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2303 hint = type & L2CAP_CONF_HINT;
2304 type &= L2CAP_CONF_MASK;
2306 switch (type) {
2307 case L2CAP_CONF_MTU:
2308 mtu = val;
2309 break;
2311 case L2CAP_CONF_FLUSH_TO:
2312 pi->flush_to = val;
2313 break;
2315 case L2CAP_CONF_QOS:
2316 break;
2318 case L2CAP_CONF_RFC:
2319 if (olen == sizeof(rfc))
2320 memcpy(&rfc, (void *) val, olen);
2321 break;
2323 case L2CAP_CONF_FCS:
2324 if (val == L2CAP_FCS_NONE)
2325 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2327 break;
2329 default:
2330 if (hint)
2331 break;
2333 result = L2CAP_CONF_UNKNOWN;
2334 *((u8 *) ptr++) = type;
2335 break;
2339 if (pi->num_conf_rsp || pi->num_conf_req)
2340 goto done;
2342 switch (pi->mode) {
2343 case L2CAP_MODE_STREAMING:
2344 case L2CAP_MODE_ERTM:
2345 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2346 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2347 return -ECONNREFUSED;
2348 break;
2349 default:
2350 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2351 break;
2354 done:
2355 if (pi->mode != rfc.mode) {
2356 result = L2CAP_CONF_UNACCEPT;
2357 rfc.mode = pi->mode;
2359 if (pi->num_conf_rsp == 1)
2360 return -ECONNREFUSED;
2362 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2363 sizeof(rfc), (unsigned long) &rfc);
2367 if (result == L2CAP_CONF_SUCCESS) {
2368 /* Configure output options and let the other side know
2369 * which ones we don't like. */
2371 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2372 result = L2CAP_CONF_UNACCEPT;
2373 else {
2374 pi->omtu = mtu;
2375 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2377 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2379 switch (rfc.mode) {
2380 case L2CAP_MODE_BASIC:
2381 pi->fcs = L2CAP_FCS_NONE;
2382 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2383 break;
2385 case L2CAP_MODE_ERTM:
2386 pi->remote_tx_win = rfc.txwin_size;
2387 pi->remote_max_tx = rfc.max_transmit;
2388 pi->max_pdu_size = rfc.max_pdu_size;
2390 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2391 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2393 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2394 break;
2396 case L2CAP_MODE_STREAMING:
2397 pi->remote_tx_win = rfc.txwin_size;
2398 pi->max_pdu_size = rfc.max_pdu_size;
2400 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2401 break;
2403 default:
2404 result = L2CAP_CONF_UNACCEPT;
2406 memset(&rfc, 0, sizeof(rfc));
2407 rfc.mode = pi->mode;
2410 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2411 sizeof(rfc), (unsigned long) &rfc);
2413 if (result == L2CAP_CONF_SUCCESS)
2414 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2416 rsp->scid = cpu_to_le16(pi->dcid);
2417 rsp->result = cpu_to_le16(result);
2418 rsp->flags = cpu_to_le16(0x0000);
2420 return ptr - data;
2423 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2425 struct l2cap_pinfo *pi = l2cap_pi(sk);
2426 struct l2cap_conf_req *req = data;
2427 void *ptr = req->data;
2428 int type, olen;
2429 unsigned long val;
2430 struct l2cap_conf_rfc rfc;
2432 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2434 while (len >= L2CAP_CONF_OPT_SIZE) {
2435 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2437 switch (type) {
2438 case L2CAP_CONF_MTU:
2439 if (val < L2CAP_DEFAULT_MIN_MTU) {
2440 *result = L2CAP_CONF_UNACCEPT;
2441 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2442 } else
2443 pi->omtu = val;
2444 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2445 break;
2447 case L2CAP_CONF_FLUSH_TO:
2448 pi->flush_to = val;
2449 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2450 2, pi->flush_to);
2451 break;
2453 case L2CAP_CONF_RFC:
2454 if (olen == sizeof(rfc))
2455 memcpy(&rfc, (void *)val, olen);
2457 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2458 rfc.mode != pi->mode)
2459 return -ECONNREFUSED;
2461 pi->mode = rfc.mode;
2462 pi->fcs = 0;
2464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2465 sizeof(rfc), (unsigned long) &rfc);
2466 break;
2470 if (*result == L2CAP_CONF_SUCCESS) {
2471 switch (rfc.mode) {
2472 case L2CAP_MODE_ERTM:
2473 pi->remote_tx_win = rfc.txwin_size;
2474 pi->retrans_timeout = rfc.retrans_timeout;
2475 pi->monitor_timeout = rfc.monitor_timeout;
2476 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2477 break;
2478 case L2CAP_MODE_STREAMING:
2479 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2480 break;
2484 req->dcid = cpu_to_le16(pi->dcid);
2485 req->flags = cpu_to_le16(0x0000);
2487 return ptr - data;
2490 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2492 struct l2cap_conf_rsp *rsp = data;
2493 void *ptr = rsp->data;
2495 BT_DBG("sk %p", sk);
2497 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2498 rsp->result = cpu_to_le16(result);
2499 rsp->flags = cpu_to_le16(flags);
2501 return ptr - data;
2504 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2506 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2508 if (rej->reason != 0x0000)
2509 return 0;
2511 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2512 cmd->ident == conn->info_ident) {
2513 del_timer(&conn->info_timer);
2515 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2516 conn->info_ident = 0;
2518 l2cap_conn_start(conn);
2521 return 0;
2524 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2526 struct l2cap_chan_list *list = &conn->chan_list;
2527 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2528 struct l2cap_conn_rsp rsp;
2529 struct sock *sk, *parent;
2530 int result, status = L2CAP_CS_NO_INFO;
2532 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2533 __le16 psm = req->psm;
2535 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2537 /* Check if we have socket listening on psm */
2538 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2539 if (!parent) {
2540 result = L2CAP_CR_BAD_PSM;
2541 goto sendresp;
2544 /* Check if the ACL is secure enough (if not SDP) */
2545 if (psm != cpu_to_le16(0x0001) &&
2546 !hci_conn_check_link_mode(conn->hcon)) {
2547 conn->disc_reason = 0x05;
2548 result = L2CAP_CR_SEC_BLOCK;
2549 goto response;
2552 result = L2CAP_CR_NO_MEM;
2554 /* Check for backlog size */
2555 if (sk_acceptq_is_full(parent)) {
2556 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2557 goto response;
2560 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2561 if (!sk)
2562 goto response;
2564 write_lock_bh(&list->lock);
2566 /* Check if we already have channel with that dcid */
2567 if (__l2cap_get_chan_by_dcid(list, scid)) {
2568 write_unlock_bh(&list->lock);
2569 sock_set_flag(sk, SOCK_ZAPPED);
2570 l2cap_sock_kill(sk);
2571 goto response;
2574 hci_conn_hold(conn->hcon);
2576 l2cap_sock_init(sk, parent);
2577 bacpy(&bt_sk(sk)->src, conn->src);
2578 bacpy(&bt_sk(sk)->dst, conn->dst);
2579 l2cap_pi(sk)->psm = psm;
2580 l2cap_pi(sk)->dcid = scid;
2582 __l2cap_chan_add(conn, sk, parent);
2583 dcid = l2cap_pi(sk)->scid;
2585 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2587 l2cap_pi(sk)->ident = cmd->ident;
2589 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2590 if (l2cap_check_security(sk)) {
2591 if (bt_sk(sk)->defer_setup) {
2592 sk->sk_state = BT_CONNECT2;
2593 result = L2CAP_CR_PEND;
2594 status = L2CAP_CS_AUTHOR_PEND;
2595 parent->sk_data_ready(parent, 0);
2596 } else {
2597 sk->sk_state = BT_CONFIG;
2598 result = L2CAP_CR_SUCCESS;
2599 status = L2CAP_CS_NO_INFO;
2601 } else {
2602 sk->sk_state = BT_CONNECT2;
2603 result = L2CAP_CR_PEND;
2604 status = L2CAP_CS_AUTHEN_PEND;
2606 } else {
2607 sk->sk_state = BT_CONNECT2;
2608 result = L2CAP_CR_PEND;
2609 status = L2CAP_CS_NO_INFO;
2612 write_unlock_bh(&list->lock);
2614 response:
2615 bh_unlock_sock(parent);
2617 sendresp:
2618 rsp.scid = cpu_to_le16(scid);
2619 rsp.dcid = cpu_to_le16(dcid);
2620 rsp.result = cpu_to_le16(result);
2621 rsp.status = cpu_to_le16(status);
2622 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2624 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2625 struct l2cap_info_req info;
2626 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2628 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2629 conn->info_ident = l2cap_get_ident(conn);
2631 mod_timer(&conn->info_timer, jiffies +
2632 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2634 l2cap_send_cmd(conn, conn->info_ident,
2635 L2CAP_INFO_REQ, sizeof(info), &info);
2638 return 0;
2641 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2643 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2644 u16 scid, dcid, result, status;
2645 struct sock *sk;
2646 u8 req[128];
2648 scid = __le16_to_cpu(rsp->scid);
2649 dcid = __le16_to_cpu(rsp->dcid);
2650 result = __le16_to_cpu(rsp->result);
2651 status = __le16_to_cpu(rsp->status);
2653 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2655 if (scid) {
2656 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2657 if (!sk)
2658 return 0;
2659 } else {
2660 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2661 if (!sk)
2662 return 0;
2665 switch (result) {
2666 case L2CAP_CR_SUCCESS:
2667 sk->sk_state = BT_CONFIG;
2668 l2cap_pi(sk)->ident = 0;
2669 l2cap_pi(sk)->dcid = dcid;
2670 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2672 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2674 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2675 l2cap_build_conf_req(sk, req), req);
2676 l2cap_pi(sk)->num_conf_req++;
2677 break;
2679 case L2CAP_CR_PEND:
2680 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2681 break;
2683 default:
2684 l2cap_chan_del(sk, ECONNREFUSED);
2685 break;
2688 bh_unlock_sock(sk);
2689 return 0;
2692 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2694 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2695 u16 dcid, flags;
2696 u8 rsp[64];
2697 struct sock *sk;
2698 int len;
2700 dcid = __le16_to_cpu(req->dcid);
2701 flags = __le16_to_cpu(req->flags);
2703 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2705 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2706 if (!sk)
2707 return -ENOENT;
2709 if (sk->sk_state == BT_DISCONN)
2710 goto unlock;
2712 /* Reject if config buffer is too small. */
2713 len = cmd_len - sizeof(*req);
2714 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2715 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2716 l2cap_build_conf_rsp(sk, rsp,
2717 L2CAP_CONF_REJECT, flags), rsp);
2718 goto unlock;
2721 /* Store config. */
2722 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2723 l2cap_pi(sk)->conf_len += len;
2725 if (flags & 0x0001) {
2726 /* Incomplete config. Send empty response. */
2727 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2728 l2cap_build_conf_rsp(sk, rsp,
2729 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2730 goto unlock;
2733 /* Complete config. */
2734 len = l2cap_parse_conf_req(sk, rsp);
2735 if (len < 0) {
2736 l2cap_send_disconn_req(conn, sk);
2737 goto unlock;
2740 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2741 l2cap_pi(sk)->num_conf_rsp++;
2743 /* Reset config buffer. */
2744 l2cap_pi(sk)->conf_len = 0;
2746 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2747 goto unlock;
2749 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2750 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2751 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2752 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2754 sk->sk_state = BT_CONNECTED;
2755 l2cap_pi(sk)->next_tx_seq = 0;
2756 l2cap_pi(sk)->expected_ack_seq = 0;
2757 l2cap_pi(sk)->unacked_frames = 0;
2759 setup_timer(&l2cap_pi(sk)->retrans_timer,
2760 l2cap_retrans_timeout, (unsigned long) sk);
2761 setup_timer(&l2cap_pi(sk)->monitor_timer,
2762 l2cap_monitor_timeout, (unsigned long) sk);
2764 __skb_queue_head_init(TX_QUEUE(sk));
2765 __skb_queue_head_init(SREJ_QUEUE(sk));
2766 l2cap_chan_ready(sk);
2767 goto unlock;
2770 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2771 u8 buf[64];
2772 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2773 l2cap_build_conf_req(sk, buf), buf);
2774 l2cap_pi(sk)->num_conf_req++;
2777 unlock:
2778 bh_unlock_sock(sk);
2779 return 0;
2782 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2784 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2785 u16 scid, flags, result;
2786 struct sock *sk;
2788 scid = __le16_to_cpu(rsp->scid);
2789 flags = __le16_to_cpu(rsp->flags);
2790 result = __le16_to_cpu(rsp->result);
2792 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2793 scid, flags, result);
2795 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2796 if (!sk)
2797 return 0;
2799 switch (result) {
2800 case L2CAP_CONF_SUCCESS:
2801 break;
2803 case L2CAP_CONF_UNACCEPT:
2804 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2805 int len = cmd->len - sizeof(*rsp);
2806 char req[64];
2808 /* throw out any old stored conf requests */
2809 result = L2CAP_CONF_SUCCESS;
2810 len = l2cap_parse_conf_rsp(sk, rsp->data,
2811 len, req, &result);
2812 if (len < 0) {
2813 l2cap_send_disconn_req(conn, sk);
2814 goto done;
2817 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2818 L2CAP_CONF_REQ, len, req);
2819 l2cap_pi(sk)->num_conf_req++;
2820 if (result != L2CAP_CONF_SUCCESS)
2821 goto done;
2822 break;
2825 default:
2826 sk->sk_state = BT_DISCONN;
2827 sk->sk_err = ECONNRESET;
2828 l2cap_sock_set_timer(sk, HZ * 5);
2829 l2cap_send_disconn_req(conn, sk);
2830 goto done;
2833 if (flags & 0x01)
2834 goto done;
2836 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2838 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2839 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2840 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2841 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2843 sk->sk_state = BT_CONNECTED;
2844 l2cap_pi(sk)->expected_tx_seq = 0;
2845 l2cap_pi(sk)->buffer_seq = 0;
2846 l2cap_pi(sk)->num_to_ack = 0;
2847 __skb_queue_head_init(TX_QUEUE(sk));
2848 __skb_queue_head_init(SREJ_QUEUE(sk));
2849 l2cap_chan_ready(sk);
2852 done:
2853 bh_unlock_sock(sk);
2854 return 0;
2857 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2859 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2860 struct l2cap_disconn_rsp rsp;
2861 u16 dcid, scid;
2862 struct sock *sk;
2864 scid = __le16_to_cpu(req->scid);
2865 dcid = __le16_to_cpu(req->dcid);
2867 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2869 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2870 if (!sk)
2871 return 0;
2873 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2874 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2875 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2877 sk->sk_shutdown = SHUTDOWN_MASK;
2879 skb_queue_purge(TX_QUEUE(sk));
2880 skb_queue_purge(SREJ_QUEUE(sk));
2881 del_timer(&l2cap_pi(sk)->retrans_timer);
2882 del_timer(&l2cap_pi(sk)->monitor_timer);
2884 l2cap_chan_del(sk, ECONNRESET);
2885 bh_unlock_sock(sk);
2887 l2cap_sock_kill(sk);
2888 return 0;
2891 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2893 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2894 u16 dcid, scid;
2895 struct sock *sk;
2897 scid = __le16_to_cpu(rsp->scid);
2898 dcid = __le16_to_cpu(rsp->dcid);
2900 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2902 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2903 if (!sk)
2904 return 0;
2906 skb_queue_purge(TX_QUEUE(sk));
2907 skb_queue_purge(SREJ_QUEUE(sk));
2908 del_timer(&l2cap_pi(sk)->retrans_timer);
2909 del_timer(&l2cap_pi(sk)->monitor_timer);
2911 l2cap_chan_del(sk, 0);
2912 bh_unlock_sock(sk);
2914 l2cap_sock_kill(sk);
2915 return 0;
2918 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2920 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2921 u16 type;
2923 type = __le16_to_cpu(req->type);
2925 BT_DBG("type 0x%4.4x", type);
2927 if (type == L2CAP_IT_FEAT_MASK) {
2928 u8 buf[8];
2929 u32 feat_mask = l2cap_feat_mask;
2930 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2931 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2932 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2933 if (enable_ertm)
2934 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2935 | L2CAP_FEAT_FCS;
2936 put_unaligned_le32(feat_mask, rsp->data);
2937 l2cap_send_cmd(conn, cmd->ident,
2938 L2CAP_INFO_RSP, sizeof(buf), buf);
2939 } else if (type == L2CAP_IT_FIXED_CHAN) {
2940 u8 buf[12];
2941 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2942 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2943 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2944 memcpy(buf + 4, l2cap_fixed_chan, 8);
2945 l2cap_send_cmd(conn, cmd->ident,
2946 L2CAP_INFO_RSP, sizeof(buf), buf);
2947 } else {
2948 struct l2cap_info_rsp rsp;
2949 rsp.type = cpu_to_le16(type);
2950 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2951 l2cap_send_cmd(conn, cmd->ident,
2952 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2955 return 0;
2958 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2960 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2961 u16 type, result;
2963 type = __le16_to_cpu(rsp->type);
2964 result = __le16_to_cpu(rsp->result);
2966 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2968 del_timer(&conn->info_timer);
2970 if (type == L2CAP_IT_FEAT_MASK) {
2971 conn->feat_mask = get_unaligned_le32(rsp->data);
2973 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2974 struct l2cap_info_req req;
2975 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2977 conn->info_ident = l2cap_get_ident(conn);
2979 l2cap_send_cmd(conn, conn->info_ident,
2980 L2CAP_INFO_REQ, sizeof(req), &req);
2981 } else {
2982 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2983 conn->info_ident = 0;
2985 l2cap_conn_start(conn);
2987 } else if (type == L2CAP_IT_FIXED_CHAN) {
2988 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2989 conn->info_ident = 0;
2991 l2cap_conn_start(conn);
2994 return 0;
2997 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2999 u8 *data = skb->data;
3000 int len = skb->len;
3001 struct l2cap_cmd_hdr cmd;
3002 int err = 0;
3004 l2cap_raw_recv(conn, skb);
3006 while (len >= L2CAP_CMD_HDR_SIZE) {
3007 u16 cmd_len;
3008 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3009 data += L2CAP_CMD_HDR_SIZE;
3010 len -= L2CAP_CMD_HDR_SIZE;
3012 cmd_len = le16_to_cpu(cmd.len);
3014 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3016 if (cmd_len > len || !cmd.ident) {
3017 BT_DBG("corrupted command");
3018 break;
3021 switch (cmd.code) {
3022 case L2CAP_COMMAND_REJ:
3023 l2cap_command_rej(conn, &cmd, data);
3024 break;
3026 case L2CAP_CONN_REQ:
3027 err = l2cap_connect_req(conn, &cmd, data);
3028 break;
3030 case L2CAP_CONN_RSP:
3031 err = l2cap_connect_rsp(conn, &cmd, data);
3032 break;
3034 case L2CAP_CONF_REQ:
3035 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3036 break;
3038 case L2CAP_CONF_RSP:
3039 err = l2cap_config_rsp(conn, &cmd, data);
3040 break;
3042 case L2CAP_DISCONN_REQ:
3043 err = l2cap_disconnect_req(conn, &cmd, data);
3044 break;
3046 case L2CAP_DISCONN_RSP:
3047 err = l2cap_disconnect_rsp(conn, &cmd, data);
3048 break;
3050 case L2CAP_ECHO_REQ:
3051 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3052 break;
3054 case L2CAP_ECHO_RSP:
3055 break;
3057 case L2CAP_INFO_REQ:
3058 err = l2cap_information_req(conn, &cmd, data);
3059 break;
3061 case L2CAP_INFO_RSP:
3062 err = l2cap_information_rsp(conn, &cmd, data);
3063 break;
3065 default:
3066 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3067 err = -EINVAL;
3068 break;
3071 if (err) {
3072 struct l2cap_cmd_rej rej;
3073 BT_DBG("error %d", err);
3075 /* FIXME: Map err to a valid reason */
3076 rej.reason = cpu_to_le16(0);
3077 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3080 data += cmd_len;
3081 len -= cmd_len;
3084 kfree_skb(skb);
3087 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3089 u16 our_fcs, rcv_fcs;
3090 int hdr_size = L2CAP_HDR_SIZE + 2;
3092 if (pi->fcs == L2CAP_FCS_CRC16) {
3093 skb_trim(skb, skb->len - 2);
3094 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3095 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3097 if (our_fcs != rcv_fcs)
3098 return -EINVAL;
3100 return 0;
3103 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3105 struct sk_buff *next_skb;
3107 bt_cb(skb)->tx_seq = tx_seq;
3108 bt_cb(skb)->sar = sar;
3110 next_skb = skb_peek(SREJ_QUEUE(sk));
3111 if (!next_skb) {
3112 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3113 return;
3116 do {
3117 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3118 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3119 return;
3122 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3123 break;
3125 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3127 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3130 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3132 struct l2cap_pinfo *pi = l2cap_pi(sk);
3133 struct sk_buff *_skb;
3134 int err = -EINVAL;
3136 switch (control & L2CAP_CTRL_SAR) {
3137 case L2CAP_SDU_UNSEGMENTED:
3138 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3139 kfree_skb(pi->sdu);
3140 break;
3143 err = sock_queue_rcv_skb(sk, skb);
3144 if (!err)
3145 return 0;
3147 break;
3149 case L2CAP_SDU_START:
3150 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3151 kfree_skb(pi->sdu);
3152 break;
3155 pi->sdu_len = get_unaligned_le16(skb->data);
3156 skb_pull(skb, 2);
3158 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3159 if (!pi->sdu) {
3160 err = -ENOMEM;
3161 break;
3164 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3166 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3167 pi->partial_sdu_len = skb->len;
3168 err = 0;
3169 break;
3171 case L2CAP_SDU_CONTINUE:
3172 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3173 break;
3175 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3177 pi->partial_sdu_len += skb->len;
3178 if (pi->partial_sdu_len > pi->sdu_len)
3179 kfree_skb(pi->sdu);
3180 else
3181 err = 0;
3183 break;
3185 case L2CAP_SDU_END:
3186 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3187 break;
3189 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3191 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3192 pi->partial_sdu_len += skb->len;
3194 if (pi->partial_sdu_len == pi->sdu_len) {
3195 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3196 err = sock_queue_rcv_skb(sk, _skb);
3197 if (err < 0)
3198 kfree_skb(_skb);
3200 kfree_skb(pi->sdu);
3201 err = 0;
3203 break;
3206 kfree_skb(skb);
3207 return err;
3210 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3212 struct sk_buff *skb;
3213 u16 control = 0;
3215 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3216 if (bt_cb(skb)->tx_seq != tx_seq)
3217 break;
3219 skb = skb_dequeue(SREJ_QUEUE(sk));
3220 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3221 l2cap_sar_reassembly_sdu(sk, skb, control);
3222 l2cap_pi(sk)->buffer_seq_srej =
3223 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3224 tx_seq++;
3228 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3230 struct l2cap_pinfo *pi = l2cap_pi(sk);
3231 struct srej_list *l, *tmp;
3232 u16 control;
3234 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3235 if (l->tx_seq == tx_seq) {
3236 list_del(&l->list);
3237 kfree(l);
3238 return;
3240 control = L2CAP_SUPER_SELECT_REJECT;
3241 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3242 l2cap_send_sframe(pi, control);
3243 list_del(&l->list);
3244 list_add_tail(&l->list, SREJ_LIST(sk));
3248 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3250 struct l2cap_pinfo *pi = l2cap_pi(sk);
3251 struct srej_list *new;
3252 u16 control;
3254 while (tx_seq != pi->expected_tx_seq) {
3255 control = L2CAP_SUPER_SELECT_REJECT;
3256 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3257 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3258 control |= L2CAP_CTRL_POLL;
3259 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3261 l2cap_send_sframe(pi, control);
3263 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3264 new->tx_seq = pi->expected_tx_seq++;
3265 list_add_tail(&new->list, SREJ_LIST(sk));
3267 pi->expected_tx_seq++;
3270 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3272 struct l2cap_pinfo *pi = l2cap_pi(sk);
3273 u8 tx_seq = __get_txseq(rx_control);
3274 u16 tx_control = 0;
3275 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3276 int err = 0;
3278 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3280 if (tx_seq == pi->expected_tx_seq)
3281 goto expected;
3283 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3284 struct srej_list *first;
3286 first = list_first_entry(SREJ_LIST(sk),
3287 struct srej_list, list);
3288 if (tx_seq == first->tx_seq) {
3289 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3290 l2cap_check_srej_gap(sk, tx_seq);
3292 list_del(&first->list);
3293 kfree(first);
3295 if (list_empty(SREJ_LIST(sk))) {
3296 pi->buffer_seq = pi->buffer_seq_srej;
3297 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3299 } else {
3300 struct srej_list *l;
3301 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3303 list_for_each_entry(l, SREJ_LIST(sk), list) {
3304 if (l->tx_seq == tx_seq) {
3305 l2cap_resend_srejframe(sk, tx_seq);
3306 return 0;
3309 l2cap_send_srejframe(sk, tx_seq);
3311 } else {
3312 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3314 INIT_LIST_HEAD(SREJ_LIST(sk));
3315 pi->buffer_seq_srej = pi->buffer_seq;
3317 __skb_queue_head_init(SREJ_QUEUE(sk));
3318 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3320 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3322 l2cap_send_srejframe(sk, tx_seq);
3324 return 0;
3326 expected:
3327 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3329 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3330 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3331 return 0;
3334 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3336 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3337 if (err < 0)
3338 return err;
3340 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3341 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3342 tx_control |= L2CAP_SUPER_RCV_READY;
3343 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3344 l2cap_send_sframe(pi, tx_control);
3346 return 0;
3349 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3351 struct l2cap_pinfo *pi = l2cap_pi(sk);
3352 u8 tx_seq = __get_reqseq(rx_control);
3354 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3356 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3357 case L2CAP_SUPER_RCV_READY:
3358 if (rx_control & L2CAP_CTRL_POLL) {
3359 u16 control = L2CAP_CTRL_FINAL;
3360 control |= L2CAP_SUPER_RCV_READY |
3361 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3362 l2cap_send_sframe(l2cap_pi(sk), control);
3363 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3365 } else if (rx_control & L2CAP_CTRL_FINAL) {
3366 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3367 pi->expected_ack_seq = tx_seq;
3368 l2cap_drop_acked_frames(sk);
3370 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3371 break;
3373 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3374 del_timer(&pi->monitor_timer);
3376 if (pi->unacked_frames > 0)
3377 __mod_retrans_timer();
3378 } else {
3379 pi->expected_ack_seq = tx_seq;
3380 l2cap_drop_acked_frames(sk);
3382 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3383 && (pi->unacked_frames > 0))
3384 __mod_retrans_timer();
3386 l2cap_ertm_send(sk);
3387 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3389 break;
3391 case L2CAP_SUPER_REJECT:
3392 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3394 pi->expected_ack_seq = __get_reqseq(rx_control);
3395 l2cap_drop_acked_frames(sk);
3397 sk->sk_send_head = TX_QUEUE(sk)->next;
3398 pi->next_tx_seq = pi->expected_ack_seq;
3400 l2cap_ertm_send(sk);
3402 break;
3404 case L2CAP_SUPER_SELECT_REJECT:
3405 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3407 if (rx_control & L2CAP_CTRL_POLL) {
3408 l2cap_retransmit_frame(sk, tx_seq);
3409 pi->expected_ack_seq = tx_seq;
3410 l2cap_drop_acked_frames(sk);
3411 l2cap_ertm_send(sk);
3412 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3413 pi->srej_save_reqseq = tx_seq;
3414 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3416 } else if (rx_control & L2CAP_CTRL_FINAL) {
3417 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3418 pi->srej_save_reqseq == tx_seq)
3419 pi->srej_save_reqseq &= ~L2CAP_CONN_SREJ_ACT;
3420 else
3421 l2cap_retransmit_frame(sk, tx_seq);
3423 else {
3424 l2cap_retransmit_frame(sk, tx_seq);
3425 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3426 pi->srej_save_reqseq = tx_seq;
3427 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3430 break;
3432 case L2CAP_SUPER_RCV_NOT_READY:
3433 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3434 pi->expected_ack_seq = tx_seq;
3435 l2cap_drop_acked_frames(sk);
3437 del_timer(&l2cap_pi(sk)->retrans_timer);
3438 if (rx_control & L2CAP_CTRL_POLL) {
3439 u16 control = L2CAP_CTRL_FINAL;
3440 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3442 break;
3445 return 0;
3448 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3450 struct sock *sk;
3451 struct l2cap_pinfo *pi;
3452 u16 control, len;
3453 u8 tx_seq;
3454 int err;
3456 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3457 if (!sk) {
3458 BT_DBG("unknown cid 0x%4.4x", cid);
3459 goto drop;
3462 pi = l2cap_pi(sk);
3464 BT_DBG("sk %p, len %d", sk, skb->len);
3466 if (sk->sk_state != BT_CONNECTED)
3467 goto drop;
3469 switch (pi->mode) {
3470 case L2CAP_MODE_BASIC:
3471 /* If socket recv buffers overflows we drop data here
3472 * which is *bad* because L2CAP has to be reliable.
3473 * But we don't have any other choice. L2CAP doesn't
3474 * provide flow control mechanism. */
3476 if (pi->imtu < skb->len)
3477 goto drop;
3479 if (!sock_queue_rcv_skb(sk, skb))
3480 goto done;
3481 break;
3483 case L2CAP_MODE_ERTM:
3484 control = get_unaligned_le16(skb->data);
3485 skb_pull(skb, 2);
3486 len = skb->len;
3488 if (__is_sar_start(control))
3489 len -= 2;
3491 if (pi->fcs == L2CAP_FCS_CRC16)
3492 len -= 2;
3495 * We can just drop the corrupted I-frame here.
3496 * Receiver will miss it and start proper recovery
3497 * procedures and ask retransmission.
3499 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3500 goto drop;
3502 if (l2cap_check_fcs(pi, skb))
3503 goto drop;
3505 if (__is_iframe(control))
3506 err = l2cap_data_channel_iframe(sk, control, skb);
3507 else
3508 err = l2cap_data_channel_sframe(sk, control, skb);
3510 if (!err)
3511 goto done;
3512 break;
3514 case L2CAP_MODE_STREAMING:
3515 control = get_unaligned_le16(skb->data);
3516 skb_pull(skb, 2);
3517 len = skb->len;
3519 if (__is_sar_start(control))
3520 len -= 2;
3522 if (pi->fcs == L2CAP_FCS_CRC16)
3523 len -= 2;
3525 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3526 goto drop;
3528 if (l2cap_check_fcs(pi, skb))
3529 goto drop;
3531 tx_seq = __get_txseq(control);
3533 if (pi->expected_tx_seq == tx_seq)
3534 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3535 else
3536 pi->expected_tx_seq = tx_seq + 1;
3538 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3540 goto done;
3542 default:
3543 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3544 break;
3547 drop:
3548 kfree_skb(skb);
3550 done:
3551 if (sk)
3552 bh_unlock_sock(sk);
3554 return 0;
3557 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3559 struct sock *sk;
3561 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3562 if (!sk)
3563 goto drop;
3565 BT_DBG("sk %p, len %d", sk, skb->len);
3567 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3568 goto drop;
3570 if (l2cap_pi(sk)->imtu < skb->len)
3571 goto drop;
3573 if (!sock_queue_rcv_skb(sk, skb))
3574 goto done;
3576 drop:
3577 kfree_skb(skb);
3579 done:
3580 if (sk)
3581 bh_unlock_sock(sk);
3582 return 0;
3585 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3587 struct l2cap_hdr *lh = (void *) skb->data;
3588 u16 cid, len;
3589 __le16 psm;
3591 skb_pull(skb, L2CAP_HDR_SIZE);
3592 cid = __le16_to_cpu(lh->cid);
3593 len = __le16_to_cpu(lh->len);
3595 if (len != skb->len) {
3596 kfree_skb(skb);
3597 return;
3600 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3602 switch (cid) {
3603 case L2CAP_CID_SIGNALING:
3604 l2cap_sig_channel(conn, skb);
3605 break;
3607 case L2CAP_CID_CONN_LESS:
3608 psm = get_unaligned_le16(skb->data);
3609 skb_pull(skb, 2);
3610 l2cap_conless_channel(conn, psm, skb);
3611 break;
3613 default:
3614 l2cap_data_channel(conn, cid, skb);
3615 break;
3619 /* ---- L2CAP interface with lower layer (HCI) ---- */
3621 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3623 int exact = 0, lm1 = 0, lm2 = 0;
3624 register struct sock *sk;
3625 struct hlist_node *node;
3627 if (type != ACL_LINK)
3628 return 0;
3630 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3632 /* Find listening sockets and check their link_mode */
3633 read_lock(&l2cap_sk_list.lock);
3634 sk_for_each(sk, node, &l2cap_sk_list.head) {
3635 if (sk->sk_state != BT_LISTEN)
3636 continue;
3638 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3639 lm1 |= HCI_LM_ACCEPT;
3640 if (l2cap_pi(sk)->role_switch)
3641 lm1 |= HCI_LM_MASTER;
3642 exact++;
3643 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3644 lm2 |= HCI_LM_ACCEPT;
3645 if (l2cap_pi(sk)->role_switch)
3646 lm2 |= HCI_LM_MASTER;
3649 read_unlock(&l2cap_sk_list.lock);
3651 return exact ? lm1 : lm2;
3654 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3656 struct l2cap_conn *conn;
3658 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3660 if (hcon->type != ACL_LINK)
3661 return 0;
3663 if (!status) {
3664 conn = l2cap_conn_add(hcon, status);
3665 if (conn)
3666 l2cap_conn_ready(conn);
3667 } else
3668 l2cap_conn_del(hcon, bt_err(status));
3670 return 0;
3673 static int l2cap_disconn_ind(struct hci_conn *hcon)
3675 struct l2cap_conn *conn = hcon->l2cap_data;
3677 BT_DBG("hcon %p", hcon);
3679 if (hcon->type != ACL_LINK || !conn)
3680 return 0x13;
3682 return conn->disc_reason;
3685 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3687 BT_DBG("hcon %p reason %d", hcon, reason);
3689 if (hcon->type != ACL_LINK)
3690 return 0;
3692 l2cap_conn_del(hcon, bt_err(reason));
3694 return 0;
3697 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3699 if (sk->sk_type != SOCK_SEQPACKET)
3700 return;
3702 if (encrypt == 0x00) {
3703 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3704 l2cap_sock_clear_timer(sk);
3705 l2cap_sock_set_timer(sk, HZ * 5);
3706 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3707 __l2cap_sock_close(sk, ECONNREFUSED);
3708 } else {
3709 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3710 l2cap_sock_clear_timer(sk);
3714 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3716 struct l2cap_chan_list *l;
3717 struct l2cap_conn *conn = hcon->l2cap_data;
3718 struct sock *sk;
3720 if (!conn)
3721 return 0;
3723 l = &conn->chan_list;
3725 BT_DBG("conn %p", conn);
3727 read_lock(&l->lock);
3729 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3730 bh_lock_sock(sk);
3732 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3733 bh_unlock_sock(sk);
3734 continue;
3737 if (!status && (sk->sk_state == BT_CONNECTED ||
3738 sk->sk_state == BT_CONFIG)) {
3739 l2cap_check_encryption(sk, encrypt);
3740 bh_unlock_sock(sk);
3741 continue;
3744 if (sk->sk_state == BT_CONNECT) {
3745 if (!status) {
3746 struct l2cap_conn_req req;
3747 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3748 req.psm = l2cap_pi(sk)->psm;
3750 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3752 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3753 L2CAP_CONN_REQ, sizeof(req), &req);
3754 } else {
3755 l2cap_sock_clear_timer(sk);
3756 l2cap_sock_set_timer(sk, HZ / 10);
3758 } else if (sk->sk_state == BT_CONNECT2) {
3759 struct l2cap_conn_rsp rsp;
3760 __u16 result;
3762 if (!status) {
3763 sk->sk_state = BT_CONFIG;
3764 result = L2CAP_CR_SUCCESS;
3765 } else {
3766 sk->sk_state = BT_DISCONN;
3767 l2cap_sock_set_timer(sk, HZ / 10);
3768 result = L2CAP_CR_SEC_BLOCK;
3771 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3772 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3773 rsp.result = cpu_to_le16(result);
3774 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3775 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3776 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3779 bh_unlock_sock(sk);
3782 read_unlock(&l->lock);
3784 return 0;
3787 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3789 struct l2cap_conn *conn = hcon->l2cap_data;
3791 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3792 goto drop;
3794 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3796 if (flags & ACL_START) {
3797 struct l2cap_hdr *hdr;
3798 int len;
3800 if (conn->rx_len) {
3801 BT_ERR("Unexpected start frame (len %d)", skb->len);
3802 kfree_skb(conn->rx_skb);
3803 conn->rx_skb = NULL;
3804 conn->rx_len = 0;
3805 l2cap_conn_unreliable(conn, ECOMM);
3808 if (skb->len < 2) {
3809 BT_ERR("Frame is too short (len %d)", skb->len);
3810 l2cap_conn_unreliable(conn, ECOMM);
3811 goto drop;
3814 hdr = (struct l2cap_hdr *) skb->data;
3815 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3817 if (len == skb->len) {
3818 /* Complete frame received */
3819 l2cap_recv_frame(conn, skb);
3820 return 0;
3823 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3825 if (skb->len > len) {
3826 BT_ERR("Frame is too long (len %d, expected len %d)",
3827 skb->len, len);
3828 l2cap_conn_unreliable(conn, ECOMM);
3829 goto drop;
3832 /* Allocate skb for the complete frame (with header) */
3833 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3834 if (!conn->rx_skb)
3835 goto drop;
3837 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3838 skb->len);
3839 conn->rx_len = len - skb->len;
3840 } else {
3841 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3843 if (!conn->rx_len) {
3844 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3845 l2cap_conn_unreliable(conn, ECOMM);
3846 goto drop;
3849 if (skb->len > conn->rx_len) {
3850 BT_ERR("Fragment is too long (len %d, expected %d)",
3851 skb->len, conn->rx_len);
3852 kfree_skb(conn->rx_skb);
3853 conn->rx_skb = NULL;
3854 conn->rx_len = 0;
3855 l2cap_conn_unreliable(conn, ECOMM);
3856 goto drop;
3859 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3860 skb->len);
3861 conn->rx_len -= skb->len;
3863 if (!conn->rx_len) {
3864 /* Complete frame received */
3865 l2cap_recv_frame(conn, conn->rx_skb);
3866 conn->rx_skb = NULL;
3870 drop:
3871 kfree_skb(skb);
3872 return 0;
3875 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3877 struct sock *sk;
3878 struct hlist_node *node;
3879 char *str = buf;
3881 read_lock_bh(&l2cap_sk_list.lock);
3883 sk_for_each(sk, node, &l2cap_sk_list.head) {
3884 struct l2cap_pinfo *pi = l2cap_pi(sk);
3886 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3887 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3888 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3889 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3892 read_unlock_bh(&l2cap_sk_list.lock);
3894 return str - buf;
3897 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3899 static const struct proto_ops l2cap_sock_ops = {
3900 .family = PF_BLUETOOTH,
3901 .owner = THIS_MODULE,
3902 .release = l2cap_sock_release,
3903 .bind = l2cap_sock_bind,
3904 .connect = l2cap_sock_connect,
3905 .listen = l2cap_sock_listen,
3906 .accept = l2cap_sock_accept,
3907 .getname = l2cap_sock_getname,
3908 .sendmsg = l2cap_sock_sendmsg,
3909 .recvmsg = l2cap_sock_recvmsg,
3910 .poll = bt_sock_poll,
3911 .ioctl = bt_sock_ioctl,
3912 .mmap = sock_no_mmap,
3913 .socketpair = sock_no_socketpair,
3914 .shutdown = l2cap_sock_shutdown,
3915 .setsockopt = l2cap_sock_setsockopt,
3916 .getsockopt = l2cap_sock_getsockopt
3919 static struct net_proto_family l2cap_sock_family_ops = {
3920 .family = PF_BLUETOOTH,
3921 .owner = THIS_MODULE,
3922 .create = l2cap_sock_create,
3925 static struct hci_proto l2cap_hci_proto = {
3926 .name = "L2CAP",
3927 .id = HCI_PROTO_L2CAP,
3928 .connect_ind = l2cap_connect_ind,
3929 .connect_cfm = l2cap_connect_cfm,
3930 .disconn_ind = l2cap_disconn_ind,
3931 .disconn_cfm = l2cap_disconn_cfm,
3932 .security_cfm = l2cap_security_cfm,
3933 .recv_acldata = l2cap_recv_acldata
3936 static int __init l2cap_init(void)
3938 int err;
3940 err = proto_register(&l2cap_proto, 0);
3941 if (err < 0)
3942 return err;
3944 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3945 if (err < 0) {
3946 BT_ERR("L2CAP socket registration failed");
3947 goto error;
3950 err = hci_register_proto(&l2cap_hci_proto);
3951 if (err < 0) {
3952 BT_ERR("L2CAP protocol registration failed");
3953 bt_sock_unregister(BTPROTO_L2CAP);
3954 goto error;
3957 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3958 BT_ERR("Failed to create L2CAP info file");
3960 BT_INFO("L2CAP ver %s", VERSION);
3961 BT_INFO("L2CAP socket layer initialized");
3963 return 0;
3965 error:
3966 proto_unregister(&l2cap_proto);
3967 return err;
3970 static void __exit l2cap_exit(void)
3972 class_remove_file(bt_class, &class_attr_l2cap);
3974 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3975 BT_ERR("L2CAP socket unregistration failed");
3977 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3978 BT_ERR("L2CAP protocol unregistration failed");
3980 proto_unregister(&l2cap_proto);
3983 void l2cap_load(void)
3985 /* Dummy function to trigger automatic L2CAP module loading by
3986 * other modules that use L2CAP sockets but don't use any other
3987 * symbols from it. */
3988 return;
3990 EXPORT_SYMBOL(l2cap_load);
3992 module_init(l2cap_init);
3993 module_exit(l2cap_exit);
3995 module_param(enable_ertm, bool, 0644);
3996 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
3998 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
3999 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4000 MODULE_VERSION(VERSION);
4001 MODULE_LICENSE("GPL");
4002 MODULE_ALIAS("bt-proto-0");