ocfs2: Fix contiguousness check in ocfs2_try_to_merge_extent_map()
[linux-2.6/x86.git] / net / bluetooth / l2cap.c
blob1120cf14a5484b0bd9ac3ba331874e81121c1182
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
45 #include <net/sock.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm = 0;
57 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
59 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
60 static u8 l2cap_fixed_chan[8] = { 0x02, };
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
79 int reason;
81 BT_DBG("sock %p state %d", sk, sk->sk_state);
83 bh_lock_sock(sk);
85 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
86 reason = ECONNREFUSED;
87 else if (sk->sk_state == BT_CONNECT &&
88 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
89 reason = ECONNREFUSED;
90 else
91 reason = ETIMEDOUT;
93 __l2cap_sock_close(sk, reason);
95 bh_unlock_sock(sk);
97 l2cap_sock_kill(sk);
98 sock_put(sk);
101 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
103 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
104 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
107 static void l2cap_sock_clear_timer(struct sock *sk)
109 BT_DBG("sock %p state %d", sk, sk->sk_state);
110 sk_stop_timer(sk, &sk->sk_timer);
113 /* ---- L2CAP channels ---- */
114 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
116 struct sock *s;
117 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
118 if (l2cap_pi(s)->dcid == cid)
119 break;
121 return s;
124 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
126 struct sock *s;
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->scid == cid)
129 break;
131 return s;
134 /* Find channel with given SCID.
135 * Returns locked socket */
136 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
138 struct sock *s;
139 read_lock(&l->lock);
140 s = __l2cap_get_chan_by_scid(l, cid);
141 if (s)
142 bh_lock_sock(s);
143 read_unlock(&l->lock);
144 return s;
147 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
149 struct sock *s;
150 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
151 if (l2cap_pi(s)->ident == ident)
152 break;
154 return s;
157 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 struct sock *s;
160 read_lock(&l->lock);
161 s = __l2cap_get_chan_by_ident(l, ident);
162 if (s)
163 bh_lock_sock(s);
164 read_unlock(&l->lock);
165 return s;
168 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
170 u16 cid = L2CAP_CID_DYN_START;
172 for (; cid < L2CAP_CID_DYN_END; cid++) {
173 if (!__l2cap_get_chan_by_scid(l, cid))
174 return cid;
177 return 0;
180 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
182 sock_hold(sk);
184 if (l->head)
185 l2cap_pi(l->head)->prev_c = sk;
187 l2cap_pi(sk)->next_c = l->head;
188 l2cap_pi(sk)->prev_c = NULL;
189 l->head = sk;
192 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
194 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
196 write_lock_bh(&l->lock);
197 if (sk == l->head)
198 l->head = next;
200 if (next)
201 l2cap_pi(next)->prev_c = prev;
202 if (prev)
203 l2cap_pi(prev)->next_c = next;
204 write_unlock_bh(&l->lock);
206 __sock_put(sk);
209 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
211 struct l2cap_chan_list *l = &conn->chan_list;
213 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
214 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
216 conn->disc_reason = 0x13;
218 l2cap_pi(sk)->conn = conn;
220 if (sk->sk_type == SOCK_SEQPACKET) {
221 /* Alloc CID for connection-oriented socket */
222 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
223 } else if (sk->sk_type == SOCK_DGRAM) {
224 /* Connectionless socket */
225 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
227 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
228 } else {
229 /* Raw socket can send/recv signalling messages only */
230 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
235 __l2cap_chan_link(l, sk);
237 if (parent)
238 bt_accept_enqueue(parent, sk);
241 /* Delete channel.
242 * Must be called on the locked socket. */
243 static void l2cap_chan_del(struct sock *sk, int err)
245 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
246 struct sock *parent = bt_sk(sk)->parent;
248 l2cap_sock_clear_timer(sk);
250 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
252 if (conn) {
253 /* Unlink from channel list */
254 l2cap_chan_unlink(&conn->chan_list, sk);
255 l2cap_pi(sk)->conn = NULL;
256 hci_conn_put(conn->hcon);
259 sk->sk_state = BT_CLOSED;
260 sock_set_flag(sk, SOCK_ZAPPED);
262 if (err)
263 sk->sk_err = err;
265 if (parent) {
266 bt_accept_unlink(sk);
267 parent->sk_data_ready(parent, 0);
268 } else
269 sk->sk_state_change(sk);
272 /* Service level security */
273 static inline int l2cap_check_security(struct sock *sk)
275 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
276 __u8 auth_type;
278 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
279 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
280 auth_type = HCI_AT_NO_BONDING_MITM;
281 else
282 auth_type = HCI_AT_NO_BONDING;
284 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
285 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
286 } else {
287 switch (l2cap_pi(sk)->sec_level) {
288 case BT_SECURITY_HIGH:
289 auth_type = HCI_AT_GENERAL_BONDING_MITM;
290 break;
291 case BT_SECURITY_MEDIUM:
292 auth_type = HCI_AT_GENERAL_BONDING;
293 break;
294 default:
295 auth_type = HCI_AT_NO_BONDING;
296 break;
300 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
301 auth_type);
304 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
306 u8 id;
308 /* Get next available identificator.
309 * 1 - 128 are used by kernel.
310 * 129 - 199 are reserved.
311 * 200 - 254 are used by utilities like l2ping, etc.
314 spin_lock_bh(&conn->lock);
316 if (++conn->tx_ident > 128)
317 conn->tx_ident = 1;
319 id = conn->tx_ident;
321 spin_unlock_bh(&conn->lock);
323 return id;
326 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
328 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
330 BT_DBG("code 0x%2.2x", code);
332 if (!skb)
333 return -ENOMEM;
335 return hci_send_acl(conn->hcon, skb, 0);
338 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
340 struct sk_buff *skb;
341 struct l2cap_hdr *lh;
342 struct l2cap_conn *conn = pi->conn;
343 int count, hlen = L2CAP_HDR_SIZE + 2;
345 if (pi->fcs == L2CAP_FCS_CRC16)
346 hlen += 2;
348 BT_DBG("pi %p, control 0x%2.2x", pi, control);
350 count = min_t(unsigned int, conn->mtu, hlen);
351 control |= L2CAP_CTRL_FRAME_TYPE;
353 skb = bt_skb_alloc(count, GFP_ATOMIC);
354 if (!skb)
355 return -ENOMEM;
357 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
358 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
359 lh->cid = cpu_to_le16(pi->dcid);
360 put_unaligned_le16(control, skb_put(skb, 2));
362 if (pi->fcs == L2CAP_FCS_CRC16) {
363 u16 fcs = crc16(0, (u8 *)lh, count - 2);
364 put_unaligned_le16(fcs, skb_put(skb, 2));
367 return hci_send_acl(pi->conn->hcon, skb, 0);
370 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
372 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
373 control |= L2CAP_SUPER_RCV_NOT_READY;
374 else
375 control |= L2CAP_SUPER_RCV_READY;
377 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
379 return l2cap_send_sframe(pi, control);
382 static void l2cap_do_start(struct sock *sk)
384 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
386 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
387 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
388 return;
390 if (l2cap_check_security(sk)) {
391 struct l2cap_conn_req req;
392 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
393 req.psm = l2cap_pi(sk)->psm;
395 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
397 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
398 L2CAP_CONN_REQ, sizeof(req), &req);
400 } else {
401 struct l2cap_info_req req;
402 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
404 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
405 conn->info_ident = l2cap_get_ident(conn);
407 mod_timer(&conn->info_timer, jiffies +
408 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
410 l2cap_send_cmd(conn, conn->info_ident,
411 L2CAP_INFO_REQ, sizeof(req), &req);
415 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
417 struct l2cap_disconn_req req;
419 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
420 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
421 l2cap_send_cmd(conn, l2cap_get_ident(conn),
422 L2CAP_DISCONN_REQ, sizeof(req), &req);
425 /* ---- L2CAP connections ---- */
426 static void l2cap_conn_start(struct l2cap_conn *conn)
428 struct l2cap_chan_list *l = &conn->chan_list;
429 struct sock *sk;
431 BT_DBG("conn %p", conn);
433 read_lock(&l->lock);
435 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
436 bh_lock_sock(sk);
438 if (sk->sk_type != SOCK_SEQPACKET) {
439 bh_unlock_sock(sk);
440 continue;
443 if (sk->sk_state == BT_CONNECT) {
444 if (l2cap_check_security(sk)) {
445 struct l2cap_conn_req req;
446 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
447 req.psm = l2cap_pi(sk)->psm;
449 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
451 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
452 L2CAP_CONN_REQ, sizeof(req), &req);
454 } else if (sk->sk_state == BT_CONNECT2) {
455 struct l2cap_conn_rsp rsp;
456 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
457 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
459 if (l2cap_check_security(sk)) {
460 if (bt_sk(sk)->defer_setup) {
461 struct sock *parent = bt_sk(sk)->parent;
462 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
463 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
464 parent->sk_data_ready(parent, 0);
466 } else {
467 sk->sk_state = BT_CONFIG;
468 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
469 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
471 } else {
472 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
473 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
476 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
477 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
480 bh_unlock_sock(sk);
483 read_unlock(&l->lock);
486 static void l2cap_conn_ready(struct l2cap_conn *conn)
488 struct l2cap_chan_list *l = &conn->chan_list;
489 struct sock *sk;
491 BT_DBG("conn %p", conn);
493 read_lock(&l->lock);
495 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
496 bh_lock_sock(sk);
498 if (sk->sk_type != SOCK_SEQPACKET) {
499 l2cap_sock_clear_timer(sk);
500 sk->sk_state = BT_CONNECTED;
501 sk->sk_state_change(sk);
502 } else if (sk->sk_state == BT_CONNECT)
503 l2cap_do_start(sk);
505 bh_unlock_sock(sk);
508 read_unlock(&l->lock);
511 /* Notify sockets that we cannot guaranty reliability anymore */
512 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
514 struct l2cap_chan_list *l = &conn->chan_list;
515 struct sock *sk;
517 BT_DBG("conn %p", conn);
519 read_lock(&l->lock);
521 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
522 if (l2cap_pi(sk)->force_reliable)
523 sk->sk_err = err;
526 read_unlock(&l->lock);
529 static void l2cap_info_timeout(unsigned long arg)
531 struct l2cap_conn *conn = (void *) arg;
533 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
534 conn->info_ident = 0;
536 l2cap_conn_start(conn);
539 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
541 struct l2cap_conn *conn = hcon->l2cap_data;
543 if (conn || status)
544 return conn;
546 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
547 if (!conn)
548 return NULL;
550 hcon->l2cap_data = conn;
551 conn->hcon = hcon;
553 BT_DBG("hcon %p conn %p", hcon, conn);
555 conn->mtu = hcon->hdev->acl_mtu;
556 conn->src = &hcon->hdev->bdaddr;
557 conn->dst = &hcon->dst;
559 conn->feat_mask = 0;
561 spin_lock_init(&conn->lock);
562 rwlock_init(&conn->chan_list.lock);
564 setup_timer(&conn->info_timer, l2cap_info_timeout,
565 (unsigned long) conn);
567 conn->disc_reason = 0x13;
569 return conn;
572 static void l2cap_conn_del(struct hci_conn *hcon, int err)
574 struct l2cap_conn *conn = hcon->l2cap_data;
575 struct sock *sk;
577 if (!conn)
578 return;
580 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
582 kfree_skb(conn->rx_skb);
584 /* Kill channels */
585 while ((sk = conn->chan_list.head)) {
586 bh_lock_sock(sk);
587 l2cap_chan_del(sk, err);
588 bh_unlock_sock(sk);
589 l2cap_sock_kill(sk);
592 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
593 del_timer_sync(&conn->info_timer);
595 hcon->l2cap_data = NULL;
596 kfree(conn);
599 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
601 struct l2cap_chan_list *l = &conn->chan_list;
602 write_lock_bh(&l->lock);
603 __l2cap_chan_add(conn, sk, parent);
604 write_unlock_bh(&l->lock);
607 /* ---- Socket interface ---- */
608 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
610 struct sock *sk;
611 struct hlist_node *node;
612 sk_for_each(sk, node, &l2cap_sk_list.head)
613 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
614 goto found;
615 sk = NULL;
616 found:
617 return sk;
620 /* Find socket with psm and source bdaddr.
621 * Returns closest match.
623 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
625 struct sock *sk = NULL, *sk1 = NULL;
626 struct hlist_node *node;
628 sk_for_each(sk, node, &l2cap_sk_list.head) {
629 if (state && sk->sk_state != state)
630 continue;
632 if (l2cap_pi(sk)->psm == psm) {
633 /* Exact match. */
634 if (!bacmp(&bt_sk(sk)->src, src))
635 break;
637 /* Closest match */
638 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
639 sk1 = sk;
642 return node ? sk : sk1;
645 /* Find socket with given address (psm, src).
646 * Returns locked socket */
647 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
649 struct sock *s;
650 read_lock(&l2cap_sk_list.lock);
651 s = __l2cap_get_sock_by_psm(state, psm, src);
652 if (s)
653 bh_lock_sock(s);
654 read_unlock(&l2cap_sk_list.lock);
655 return s;
658 static void l2cap_sock_destruct(struct sock *sk)
660 BT_DBG("sk %p", sk);
662 skb_queue_purge(&sk->sk_receive_queue);
663 skb_queue_purge(&sk->sk_write_queue);
666 static void l2cap_sock_cleanup_listen(struct sock *parent)
668 struct sock *sk;
670 BT_DBG("parent %p", parent);
672 /* Close not yet accepted channels */
673 while ((sk = bt_accept_dequeue(parent, NULL)))
674 l2cap_sock_close(sk);
676 parent->sk_state = BT_CLOSED;
677 sock_set_flag(parent, SOCK_ZAPPED);
680 /* Kill socket (only if zapped and orphan)
681 * Must be called on unlocked socket.
683 static void l2cap_sock_kill(struct sock *sk)
685 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
686 return;
688 BT_DBG("sk %p state %d", sk, sk->sk_state);
690 /* Kill poor orphan */
691 bt_sock_unlink(&l2cap_sk_list, sk);
692 sock_set_flag(sk, SOCK_DEAD);
693 sock_put(sk);
696 static void __l2cap_sock_close(struct sock *sk, int reason)
698 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
700 switch (sk->sk_state) {
701 case BT_LISTEN:
702 l2cap_sock_cleanup_listen(sk);
703 break;
705 case BT_CONNECTED:
706 case BT_CONFIG:
707 if (sk->sk_type == SOCK_SEQPACKET) {
708 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
710 sk->sk_state = BT_DISCONN;
711 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
712 l2cap_send_disconn_req(conn, sk);
713 } else
714 l2cap_chan_del(sk, reason);
715 break;
717 case BT_CONNECT2:
718 if (sk->sk_type == SOCK_SEQPACKET) {
719 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
720 struct l2cap_conn_rsp rsp;
721 __u16 result;
723 if (bt_sk(sk)->defer_setup)
724 result = L2CAP_CR_SEC_BLOCK;
725 else
726 result = L2CAP_CR_BAD_PSM;
728 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
729 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
730 rsp.result = cpu_to_le16(result);
731 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
732 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
733 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
734 } else
735 l2cap_chan_del(sk, reason);
736 break;
738 case BT_CONNECT:
739 case BT_DISCONN:
740 l2cap_chan_del(sk, reason);
741 break;
743 default:
744 sock_set_flag(sk, SOCK_ZAPPED);
745 break;
749 /* Must be called on unlocked socket. */
750 static void l2cap_sock_close(struct sock *sk)
752 l2cap_sock_clear_timer(sk);
753 lock_sock(sk);
754 __l2cap_sock_close(sk, ECONNRESET);
755 release_sock(sk);
756 l2cap_sock_kill(sk);
759 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
761 struct l2cap_pinfo *pi = l2cap_pi(sk);
763 BT_DBG("sk %p", sk);
765 if (parent) {
766 sk->sk_type = parent->sk_type;
767 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
769 pi->imtu = l2cap_pi(parent)->imtu;
770 pi->omtu = l2cap_pi(parent)->omtu;
771 pi->mode = l2cap_pi(parent)->mode;
772 pi->fcs = l2cap_pi(parent)->fcs;
773 pi->sec_level = l2cap_pi(parent)->sec_level;
774 pi->role_switch = l2cap_pi(parent)->role_switch;
775 pi->force_reliable = l2cap_pi(parent)->force_reliable;
776 } else {
777 pi->imtu = L2CAP_DEFAULT_MTU;
778 pi->omtu = 0;
779 pi->mode = L2CAP_MODE_BASIC;
780 pi->fcs = L2CAP_FCS_CRC16;
781 pi->sec_level = BT_SECURITY_LOW;
782 pi->role_switch = 0;
783 pi->force_reliable = 0;
786 /* Default config options */
787 pi->conf_len = 0;
788 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
789 skb_queue_head_init(TX_QUEUE(sk));
790 skb_queue_head_init(SREJ_QUEUE(sk));
791 INIT_LIST_HEAD(SREJ_LIST(sk));
794 static struct proto l2cap_proto = {
795 .name = "L2CAP",
796 .owner = THIS_MODULE,
797 .obj_size = sizeof(struct l2cap_pinfo)
800 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
802 struct sock *sk;
804 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
805 if (!sk)
806 return NULL;
808 sock_init_data(sock, sk);
809 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
811 sk->sk_destruct = l2cap_sock_destruct;
812 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
814 sock_reset_flag(sk, SOCK_ZAPPED);
816 sk->sk_protocol = proto;
817 sk->sk_state = BT_OPEN;
819 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
821 bt_sock_link(&l2cap_sk_list, sk);
822 return sk;
825 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
826 int kern)
828 struct sock *sk;
830 BT_DBG("sock %p", sock);
832 sock->state = SS_UNCONNECTED;
834 if (sock->type != SOCK_SEQPACKET &&
835 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
836 return -ESOCKTNOSUPPORT;
838 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
839 return -EPERM;
841 sock->ops = &l2cap_sock_ops;
843 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
844 if (!sk)
845 return -ENOMEM;
847 l2cap_sock_init(sk, NULL);
848 return 0;
851 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
853 struct sock *sk = sock->sk;
854 struct sockaddr_l2 la;
855 int len, err = 0;
857 BT_DBG("sk %p", sk);
859 if (!addr || addr->sa_family != AF_BLUETOOTH)
860 return -EINVAL;
862 memset(&la, 0, sizeof(la));
863 len = min_t(unsigned int, sizeof(la), alen);
864 memcpy(&la, addr, len);
866 if (la.l2_cid)
867 return -EINVAL;
869 lock_sock(sk);
871 if (sk->sk_state != BT_OPEN) {
872 err = -EBADFD;
873 goto done;
876 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
877 !capable(CAP_NET_BIND_SERVICE)) {
878 err = -EACCES;
879 goto done;
882 write_lock_bh(&l2cap_sk_list.lock);
884 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
885 err = -EADDRINUSE;
886 } else {
887 /* Save source address */
888 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
889 l2cap_pi(sk)->psm = la.l2_psm;
890 l2cap_pi(sk)->sport = la.l2_psm;
891 sk->sk_state = BT_BOUND;
893 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
894 __le16_to_cpu(la.l2_psm) == 0x0003)
895 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
898 write_unlock_bh(&l2cap_sk_list.lock);
900 done:
901 release_sock(sk);
902 return err;
905 static int l2cap_do_connect(struct sock *sk)
907 bdaddr_t *src = &bt_sk(sk)->src;
908 bdaddr_t *dst = &bt_sk(sk)->dst;
909 struct l2cap_conn *conn;
910 struct hci_conn *hcon;
911 struct hci_dev *hdev;
912 __u8 auth_type;
913 int err;
915 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
916 l2cap_pi(sk)->psm);
918 hdev = hci_get_route(dst, src);
919 if (!hdev)
920 return -EHOSTUNREACH;
922 hci_dev_lock_bh(hdev);
924 err = -ENOMEM;
926 if (sk->sk_type == SOCK_RAW) {
927 switch (l2cap_pi(sk)->sec_level) {
928 case BT_SECURITY_HIGH:
929 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
930 break;
931 case BT_SECURITY_MEDIUM:
932 auth_type = HCI_AT_DEDICATED_BONDING;
933 break;
934 default:
935 auth_type = HCI_AT_NO_BONDING;
936 break;
938 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
939 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
940 auth_type = HCI_AT_NO_BONDING_MITM;
941 else
942 auth_type = HCI_AT_NO_BONDING;
944 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
945 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
946 } else {
947 switch (l2cap_pi(sk)->sec_level) {
948 case BT_SECURITY_HIGH:
949 auth_type = HCI_AT_GENERAL_BONDING_MITM;
950 break;
951 case BT_SECURITY_MEDIUM:
952 auth_type = HCI_AT_GENERAL_BONDING;
953 break;
954 default:
955 auth_type = HCI_AT_NO_BONDING;
956 break;
960 hcon = hci_connect(hdev, ACL_LINK, dst,
961 l2cap_pi(sk)->sec_level, auth_type);
962 if (!hcon)
963 goto done;
965 conn = l2cap_conn_add(hcon, 0);
966 if (!conn) {
967 hci_conn_put(hcon);
968 goto done;
971 err = 0;
973 /* Update source addr of the socket */
974 bacpy(src, conn->src);
976 l2cap_chan_add(conn, sk, NULL);
978 sk->sk_state = BT_CONNECT;
979 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
981 if (hcon->state == BT_CONNECTED) {
982 if (sk->sk_type != SOCK_SEQPACKET) {
983 l2cap_sock_clear_timer(sk);
984 sk->sk_state = BT_CONNECTED;
985 } else
986 l2cap_do_start(sk);
989 done:
990 hci_dev_unlock_bh(hdev);
991 hci_dev_put(hdev);
992 return err;
995 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
997 struct sock *sk = sock->sk;
998 struct sockaddr_l2 la;
999 int len, err = 0;
1001 BT_DBG("sk %p", sk);
1003 if (!addr || addr->sa_family != AF_BLUETOOTH)
1004 return -EINVAL;
1006 memset(&la, 0, sizeof(la));
1007 len = min_t(unsigned int, sizeof(la), alen);
1008 memcpy(&la, addr, len);
1010 if (la.l2_cid)
1011 return -EINVAL;
1013 lock_sock(sk);
1015 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1016 err = -EINVAL;
1017 goto done;
1020 switch (l2cap_pi(sk)->mode) {
1021 case L2CAP_MODE_BASIC:
1022 break;
1023 case L2CAP_MODE_ERTM:
1024 case L2CAP_MODE_STREAMING:
1025 if (enable_ertm)
1026 break;
1027 /* fall through */
1028 default:
1029 err = -ENOTSUPP;
1030 goto done;
1033 switch (sk->sk_state) {
1034 case BT_CONNECT:
1035 case BT_CONNECT2:
1036 case BT_CONFIG:
1037 /* Already connecting */
1038 goto wait;
1040 case BT_CONNECTED:
1041 /* Already connected */
1042 goto done;
1044 case BT_OPEN:
1045 case BT_BOUND:
1046 /* Can connect */
1047 break;
1049 default:
1050 err = -EBADFD;
1051 goto done;
1054 /* Set destination address and psm */
1055 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1056 l2cap_pi(sk)->psm = la.l2_psm;
1058 err = l2cap_do_connect(sk);
1059 if (err)
1060 goto done;
1062 wait:
1063 err = bt_sock_wait_state(sk, BT_CONNECTED,
1064 sock_sndtimeo(sk, flags & O_NONBLOCK));
1065 done:
1066 release_sock(sk);
1067 return err;
1070 static int l2cap_sock_listen(struct socket *sock, int backlog)
1072 struct sock *sk = sock->sk;
1073 int err = 0;
1075 BT_DBG("sk %p backlog %d", sk, backlog);
1077 lock_sock(sk);
1079 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1080 err = -EBADFD;
1081 goto done;
1084 switch (l2cap_pi(sk)->mode) {
1085 case L2CAP_MODE_BASIC:
1086 break;
1087 case L2CAP_MODE_ERTM:
1088 case L2CAP_MODE_STREAMING:
1089 if (enable_ertm)
1090 break;
1091 /* fall through */
1092 default:
1093 err = -ENOTSUPP;
1094 goto done;
1097 if (!l2cap_pi(sk)->psm) {
1098 bdaddr_t *src = &bt_sk(sk)->src;
1099 u16 psm;
1101 err = -EINVAL;
1103 write_lock_bh(&l2cap_sk_list.lock);
1105 for (psm = 0x1001; psm < 0x1100; psm += 2)
1106 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1107 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1108 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1109 err = 0;
1110 break;
1113 write_unlock_bh(&l2cap_sk_list.lock);
1115 if (err < 0)
1116 goto done;
1119 sk->sk_max_ack_backlog = backlog;
1120 sk->sk_ack_backlog = 0;
1121 sk->sk_state = BT_LISTEN;
1123 done:
1124 release_sock(sk);
1125 return err;
1128 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1130 DECLARE_WAITQUEUE(wait, current);
1131 struct sock *sk = sock->sk, *nsk;
1132 long timeo;
1133 int err = 0;
1135 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1137 if (sk->sk_state != BT_LISTEN) {
1138 err = -EBADFD;
1139 goto done;
1142 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1144 BT_DBG("sk %p timeo %ld", sk, timeo);
1146 /* Wait for an incoming connection. (wake-one). */
1147 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1148 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1149 set_current_state(TASK_INTERRUPTIBLE);
1150 if (!timeo) {
1151 err = -EAGAIN;
1152 break;
1155 release_sock(sk);
1156 timeo = schedule_timeout(timeo);
1157 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1159 if (sk->sk_state != BT_LISTEN) {
1160 err = -EBADFD;
1161 break;
1164 if (signal_pending(current)) {
1165 err = sock_intr_errno(timeo);
1166 break;
1169 set_current_state(TASK_RUNNING);
1170 remove_wait_queue(sk->sk_sleep, &wait);
1172 if (err)
1173 goto done;
1175 newsock->state = SS_CONNECTED;
1177 BT_DBG("new socket %p", nsk);
1179 done:
1180 release_sock(sk);
1181 return err;
1184 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1186 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1187 struct sock *sk = sock->sk;
1189 BT_DBG("sock %p, sk %p", sock, sk);
1191 addr->sa_family = AF_BLUETOOTH;
1192 *len = sizeof(struct sockaddr_l2);
1194 if (peer) {
1195 la->l2_psm = l2cap_pi(sk)->psm;
1196 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1197 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1198 } else {
1199 la->l2_psm = l2cap_pi(sk)->sport;
1200 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1201 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1204 return 0;
1207 static void l2cap_monitor_timeout(unsigned long arg)
1209 struct sock *sk = (void *) arg;
1210 u16 control;
1212 bh_lock_sock(sk);
1213 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1214 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1215 bh_unlock_sock(sk);
1216 return;
1219 l2cap_pi(sk)->retry_count++;
1220 __mod_monitor_timer();
1222 control = L2CAP_CTRL_POLL;
1223 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1224 bh_unlock_sock(sk);
1227 static void l2cap_retrans_timeout(unsigned long arg)
1229 struct sock *sk = (void *) arg;
1230 u16 control;
1232 bh_lock_sock(sk);
1233 l2cap_pi(sk)->retry_count = 1;
1234 __mod_monitor_timer();
1236 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1238 control = L2CAP_CTRL_POLL;
1239 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1240 bh_unlock_sock(sk);
1243 static void l2cap_drop_acked_frames(struct sock *sk)
1245 struct sk_buff *skb;
1247 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1248 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1249 break;
1251 skb = skb_dequeue(TX_QUEUE(sk));
1252 kfree_skb(skb);
1254 l2cap_pi(sk)->unacked_frames--;
1257 if (!l2cap_pi(sk)->unacked_frames)
1258 del_timer(&l2cap_pi(sk)->retrans_timer);
1260 return;
1263 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1265 struct l2cap_pinfo *pi = l2cap_pi(sk);
1266 int err;
1268 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1270 err = hci_send_acl(pi->conn->hcon, skb, 0);
1271 if (err < 0)
1272 kfree_skb(skb);
1274 return err;
1277 static int l2cap_streaming_send(struct sock *sk)
1279 struct sk_buff *skb, *tx_skb;
1280 struct l2cap_pinfo *pi = l2cap_pi(sk);
1281 u16 control, fcs;
1282 int err;
1284 while ((skb = sk->sk_send_head)) {
1285 tx_skb = skb_clone(skb, GFP_ATOMIC);
1287 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1288 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1289 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1291 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1292 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1293 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1296 err = l2cap_do_send(sk, tx_skb);
1297 if (err < 0) {
1298 l2cap_send_disconn_req(pi->conn, sk);
1299 return err;
1302 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1304 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1305 sk->sk_send_head = NULL;
1306 else
1307 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1309 skb = skb_dequeue(TX_QUEUE(sk));
1310 kfree_skb(skb);
1312 return 0;
1315 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1317 struct l2cap_pinfo *pi = l2cap_pi(sk);
1318 struct sk_buff *skb, *tx_skb;
1319 u16 control, fcs;
1320 int err;
1322 skb = skb_peek(TX_QUEUE(sk));
1323 do {
1324 if (bt_cb(skb)->tx_seq != tx_seq) {
1325 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1326 break;
1327 skb = skb_queue_next(TX_QUEUE(sk), skb);
1328 continue;
1331 if (pi->remote_max_tx &&
1332 bt_cb(skb)->retries == pi->remote_max_tx) {
1333 l2cap_send_disconn_req(pi->conn, sk);
1334 break;
1337 tx_skb = skb_clone(skb, GFP_ATOMIC);
1338 bt_cb(skb)->retries++;
1339 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1340 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1341 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1342 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1344 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1345 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1346 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1349 err = l2cap_do_send(sk, tx_skb);
1350 if (err < 0) {
1351 l2cap_send_disconn_req(pi->conn, sk);
1352 return err;
1354 break;
1355 } while(1);
1356 return 0;
1359 static int l2cap_ertm_send(struct sock *sk)
1361 struct sk_buff *skb, *tx_skb;
1362 struct l2cap_pinfo *pi = l2cap_pi(sk);
1363 u16 control, fcs;
1364 int err;
1366 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1367 return 0;
1369 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1370 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1371 tx_skb = skb_clone(skb, GFP_ATOMIC);
1373 if (pi->remote_max_tx &&
1374 bt_cb(skb)->retries == pi->remote_max_tx) {
1375 l2cap_send_disconn_req(pi->conn, sk);
1376 break;
1379 bt_cb(skb)->retries++;
1381 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1382 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1383 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1384 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1387 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1388 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1389 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1392 err = l2cap_do_send(sk, tx_skb);
1393 if (err < 0) {
1394 l2cap_send_disconn_req(pi->conn, sk);
1395 return err;
1397 __mod_retrans_timer();
1399 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1400 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1402 pi->unacked_frames++;
1404 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1405 sk->sk_send_head = NULL;
1406 else
1407 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1410 return 0;
1413 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1415 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1416 struct sk_buff **frag;
1417 int err, sent = 0;
1419 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1420 return -EFAULT;
1423 sent += count;
1424 len -= count;
1426 /* Continuation fragments (no L2CAP header) */
1427 frag = &skb_shinfo(skb)->frag_list;
1428 while (len) {
1429 count = min_t(unsigned int, conn->mtu, len);
1431 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1432 if (!*frag)
1433 return -EFAULT;
1434 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1435 return -EFAULT;
1437 sent += count;
1438 len -= count;
1440 frag = &(*frag)->next;
1443 return sent;
1446 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1448 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1449 struct sk_buff *skb;
1450 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1451 struct l2cap_hdr *lh;
1453 BT_DBG("sk %p len %d", sk, (int)len);
1455 count = min_t(unsigned int, (conn->mtu - hlen), len);
1456 skb = bt_skb_send_alloc(sk, count + hlen,
1457 msg->msg_flags & MSG_DONTWAIT, &err);
1458 if (!skb)
1459 return ERR_PTR(-ENOMEM);
1461 /* Create L2CAP header */
1462 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1463 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1464 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1465 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1467 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1468 if (unlikely(err < 0)) {
1469 kfree_skb(skb);
1470 return ERR_PTR(err);
1472 return skb;
1475 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1477 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1478 struct sk_buff *skb;
1479 int err, count, hlen = L2CAP_HDR_SIZE;
1480 struct l2cap_hdr *lh;
1482 BT_DBG("sk %p len %d", sk, (int)len);
1484 count = min_t(unsigned int, (conn->mtu - hlen), len);
1485 skb = bt_skb_send_alloc(sk, count + hlen,
1486 msg->msg_flags & MSG_DONTWAIT, &err);
1487 if (!skb)
1488 return ERR_PTR(-ENOMEM);
1490 /* Create L2CAP header */
1491 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1492 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1493 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1495 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1496 if (unlikely(err < 0)) {
1497 kfree_skb(skb);
1498 return ERR_PTR(err);
1500 return skb;
1503 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1505 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1506 struct sk_buff *skb;
1507 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1508 struct l2cap_hdr *lh;
1510 BT_DBG("sk %p len %d", sk, (int)len);
1512 if (sdulen)
1513 hlen += 2;
1515 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1516 hlen += 2;
1518 count = min_t(unsigned int, (conn->mtu - hlen), len);
1519 skb = bt_skb_send_alloc(sk, count + hlen,
1520 msg->msg_flags & MSG_DONTWAIT, &err);
1521 if (!skb)
1522 return ERR_PTR(-ENOMEM);
1524 /* Create L2CAP header */
1525 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1526 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1527 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1528 put_unaligned_le16(control, skb_put(skb, 2));
1529 if (sdulen)
1530 put_unaligned_le16(sdulen, skb_put(skb, 2));
1532 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1533 if (unlikely(err < 0)) {
1534 kfree_skb(skb);
1535 return ERR_PTR(err);
1538 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1539 put_unaligned_le16(0, skb_put(skb, 2));
1541 bt_cb(skb)->retries = 0;
1542 return skb;
1545 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1547 struct l2cap_pinfo *pi = l2cap_pi(sk);
1548 struct sk_buff *skb;
1549 struct sk_buff_head sar_queue;
1550 u16 control;
1551 size_t size = 0;
1553 __skb_queue_head_init(&sar_queue);
1554 control = L2CAP_SDU_START;
1555 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1556 if (IS_ERR(skb))
1557 return PTR_ERR(skb);
1559 __skb_queue_tail(&sar_queue, skb);
1560 len -= pi->max_pdu_size;
1561 size +=pi->max_pdu_size;
1562 control = 0;
1564 while (len > 0) {
1565 size_t buflen;
1567 if (len > pi->max_pdu_size) {
1568 control |= L2CAP_SDU_CONTINUE;
1569 buflen = pi->max_pdu_size;
1570 } else {
1571 control |= L2CAP_SDU_END;
1572 buflen = len;
1575 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1576 if (IS_ERR(skb)) {
1577 skb_queue_purge(&sar_queue);
1578 return PTR_ERR(skb);
1581 __skb_queue_tail(&sar_queue, skb);
1582 len -= buflen;
1583 size += buflen;
1584 control = 0;
1586 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1587 if (sk->sk_send_head == NULL)
1588 sk->sk_send_head = sar_queue.next;
1590 return size;
1593 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1595 struct sock *sk = sock->sk;
1596 struct l2cap_pinfo *pi = l2cap_pi(sk);
1597 struct sk_buff *skb;
1598 u16 control;
1599 int err;
1601 BT_DBG("sock %p, sk %p", sock, sk);
1603 err = sock_error(sk);
1604 if (err)
1605 return err;
1607 if (msg->msg_flags & MSG_OOB)
1608 return -EOPNOTSUPP;
1610 /* Check outgoing MTU */
1611 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
1612 len > pi->omtu)
1613 return -EINVAL;
1615 lock_sock(sk);
1617 if (sk->sk_state != BT_CONNECTED) {
1618 err = -ENOTCONN;
1619 goto done;
1622 /* Connectionless channel */
1623 if (sk->sk_type == SOCK_DGRAM) {
1624 skb = l2cap_create_connless_pdu(sk, msg, len);
1625 err = l2cap_do_send(sk, skb);
1626 goto done;
1629 switch (pi->mode) {
1630 case L2CAP_MODE_BASIC:
1631 /* Create a basic PDU */
1632 skb = l2cap_create_basic_pdu(sk, msg, len);
1633 if (IS_ERR(skb)) {
1634 err = PTR_ERR(skb);
1635 goto done;
1638 err = l2cap_do_send(sk, skb);
1639 if (!err)
1640 err = len;
1641 break;
1643 case L2CAP_MODE_ERTM:
1644 case L2CAP_MODE_STREAMING:
1645 /* Entire SDU fits into one PDU */
1646 if (len <= pi->max_pdu_size) {
1647 control = L2CAP_SDU_UNSEGMENTED;
1648 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1649 if (IS_ERR(skb)) {
1650 err = PTR_ERR(skb);
1651 goto done;
1653 __skb_queue_tail(TX_QUEUE(sk), skb);
1654 if (sk->sk_send_head == NULL)
1655 sk->sk_send_head = skb;
1656 } else {
1657 /* Segment SDU into multiples PDUs */
1658 err = l2cap_sar_segment_sdu(sk, msg, len);
1659 if (err < 0)
1660 goto done;
1663 if (pi->mode == L2CAP_MODE_STREAMING)
1664 err = l2cap_streaming_send(sk);
1665 else
1666 err = l2cap_ertm_send(sk);
1668 if (!err)
1669 err = len;
1670 break;
1672 default:
1673 BT_DBG("bad state %1.1x", pi->mode);
1674 err = -EINVAL;
1677 done:
1678 release_sock(sk);
1679 return err;
1682 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1684 struct sock *sk = sock->sk;
1686 lock_sock(sk);
1688 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1689 struct l2cap_conn_rsp rsp;
1691 sk->sk_state = BT_CONFIG;
1693 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1694 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1695 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1696 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1697 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1698 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1700 release_sock(sk);
1701 return 0;
1704 release_sock(sk);
1706 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1709 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1711 struct sock *sk = sock->sk;
1712 struct l2cap_options opts;
1713 int len, err = 0;
1714 u32 opt;
1716 BT_DBG("sk %p", sk);
1718 lock_sock(sk);
1720 switch (optname) {
1721 case L2CAP_OPTIONS:
1722 opts.imtu = l2cap_pi(sk)->imtu;
1723 opts.omtu = l2cap_pi(sk)->omtu;
1724 opts.flush_to = l2cap_pi(sk)->flush_to;
1725 opts.mode = l2cap_pi(sk)->mode;
1726 opts.fcs = l2cap_pi(sk)->fcs;
1728 len = min_t(unsigned int, sizeof(opts), optlen);
1729 if (copy_from_user((char *) &opts, optval, len)) {
1730 err = -EFAULT;
1731 break;
1734 l2cap_pi(sk)->imtu = opts.imtu;
1735 l2cap_pi(sk)->omtu = opts.omtu;
1736 l2cap_pi(sk)->mode = opts.mode;
1737 l2cap_pi(sk)->fcs = opts.fcs;
1738 break;
1740 case L2CAP_LM:
1741 if (get_user(opt, (u32 __user *) optval)) {
1742 err = -EFAULT;
1743 break;
1746 if (opt & L2CAP_LM_AUTH)
1747 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1748 if (opt & L2CAP_LM_ENCRYPT)
1749 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1750 if (opt & L2CAP_LM_SECURE)
1751 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1753 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1754 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1755 break;
1757 default:
1758 err = -ENOPROTOOPT;
1759 break;
1762 release_sock(sk);
1763 return err;
1766 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1768 struct sock *sk = sock->sk;
1769 struct bt_security sec;
1770 int len, err = 0;
1771 u32 opt;
1773 BT_DBG("sk %p", sk);
1775 if (level == SOL_L2CAP)
1776 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1778 if (level != SOL_BLUETOOTH)
1779 return -ENOPROTOOPT;
1781 lock_sock(sk);
1783 switch (optname) {
1784 case BT_SECURITY:
1785 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1786 err = -EINVAL;
1787 break;
1790 sec.level = BT_SECURITY_LOW;
1792 len = min_t(unsigned int, sizeof(sec), optlen);
1793 if (copy_from_user((char *) &sec, optval, len)) {
1794 err = -EFAULT;
1795 break;
1798 if (sec.level < BT_SECURITY_LOW ||
1799 sec.level > BT_SECURITY_HIGH) {
1800 err = -EINVAL;
1801 break;
1804 l2cap_pi(sk)->sec_level = sec.level;
1805 break;
1807 case BT_DEFER_SETUP:
1808 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1809 err = -EINVAL;
1810 break;
1813 if (get_user(opt, (u32 __user *) optval)) {
1814 err = -EFAULT;
1815 break;
1818 bt_sk(sk)->defer_setup = opt;
1819 break;
1821 default:
1822 err = -ENOPROTOOPT;
1823 break;
1826 release_sock(sk);
1827 return err;
1830 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1832 struct sock *sk = sock->sk;
1833 struct l2cap_options opts;
1834 struct l2cap_conninfo cinfo;
1835 int len, err = 0;
1836 u32 opt;
1838 BT_DBG("sk %p", sk);
1840 if (get_user(len, optlen))
1841 return -EFAULT;
1843 lock_sock(sk);
1845 switch (optname) {
1846 case L2CAP_OPTIONS:
1847 opts.imtu = l2cap_pi(sk)->imtu;
1848 opts.omtu = l2cap_pi(sk)->omtu;
1849 opts.flush_to = l2cap_pi(sk)->flush_to;
1850 opts.mode = l2cap_pi(sk)->mode;
1851 opts.fcs = l2cap_pi(sk)->fcs;
1853 len = min_t(unsigned int, len, sizeof(opts));
1854 if (copy_to_user(optval, (char *) &opts, len))
1855 err = -EFAULT;
1857 break;
1859 case L2CAP_LM:
1860 switch (l2cap_pi(sk)->sec_level) {
1861 case BT_SECURITY_LOW:
1862 opt = L2CAP_LM_AUTH;
1863 break;
1864 case BT_SECURITY_MEDIUM:
1865 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1866 break;
1867 case BT_SECURITY_HIGH:
1868 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1869 L2CAP_LM_SECURE;
1870 break;
1871 default:
1872 opt = 0;
1873 break;
1876 if (l2cap_pi(sk)->role_switch)
1877 opt |= L2CAP_LM_MASTER;
1879 if (l2cap_pi(sk)->force_reliable)
1880 opt |= L2CAP_LM_RELIABLE;
1882 if (put_user(opt, (u32 __user *) optval))
1883 err = -EFAULT;
1884 break;
1886 case L2CAP_CONNINFO:
1887 if (sk->sk_state != BT_CONNECTED &&
1888 !(sk->sk_state == BT_CONNECT2 &&
1889 bt_sk(sk)->defer_setup)) {
1890 err = -ENOTCONN;
1891 break;
1894 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1895 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1897 len = min_t(unsigned int, len, sizeof(cinfo));
1898 if (copy_to_user(optval, (char *) &cinfo, len))
1899 err = -EFAULT;
1901 break;
1903 default:
1904 err = -ENOPROTOOPT;
1905 break;
1908 release_sock(sk);
1909 return err;
1912 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1914 struct sock *sk = sock->sk;
1915 struct bt_security sec;
1916 int len, err = 0;
1918 BT_DBG("sk %p", sk);
1920 if (level == SOL_L2CAP)
1921 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1923 if (level != SOL_BLUETOOTH)
1924 return -ENOPROTOOPT;
1926 if (get_user(len, optlen))
1927 return -EFAULT;
1929 lock_sock(sk);
1931 switch (optname) {
1932 case BT_SECURITY:
1933 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1934 err = -EINVAL;
1935 break;
1938 sec.level = l2cap_pi(sk)->sec_level;
1940 len = min_t(unsigned int, len, sizeof(sec));
1941 if (copy_to_user(optval, (char *) &sec, len))
1942 err = -EFAULT;
1944 break;
1946 case BT_DEFER_SETUP:
1947 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1948 err = -EINVAL;
1949 break;
1952 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1953 err = -EFAULT;
1955 break;
1957 default:
1958 err = -ENOPROTOOPT;
1959 break;
1962 release_sock(sk);
1963 return err;
1966 static int l2cap_sock_shutdown(struct socket *sock, int how)
1968 struct sock *sk = sock->sk;
1969 int err = 0;
1971 BT_DBG("sock %p, sk %p", sock, sk);
1973 if (!sk)
1974 return 0;
1976 lock_sock(sk);
1977 if (!sk->sk_shutdown) {
1978 sk->sk_shutdown = SHUTDOWN_MASK;
1979 l2cap_sock_clear_timer(sk);
1980 __l2cap_sock_close(sk, 0);
1982 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1983 err = bt_sock_wait_state(sk, BT_CLOSED,
1984 sk->sk_lingertime);
1986 release_sock(sk);
1987 return err;
1990 static int l2cap_sock_release(struct socket *sock)
1992 struct sock *sk = sock->sk;
1993 int err;
1995 BT_DBG("sock %p, sk %p", sock, sk);
1997 if (!sk)
1998 return 0;
2000 err = l2cap_sock_shutdown(sock, 2);
2002 sock_orphan(sk);
2003 l2cap_sock_kill(sk);
2004 return err;
2007 static void l2cap_chan_ready(struct sock *sk)
2009 struct sock *parent = bt_sk(sk)->parent;
2011 BT_DBG("sk %p, parent %p", sk, parent);
2013 l2cap_pi(sk)->conf_state = 0;
2014 l2cap_sock_clear_timer(sk);
2016 if (!parent) {
2017 /* Outgoing channel.
2018 * Wake up socket sleeping on connect.
2020 sk->sk_state = BT_CONNECTED;
2021 sk->sk_state_change(sk);
2022 } else {
2023 /* Incoming channel.
2024 * Wake up socket sleeping on accept.
2026 parent->sk_data_ready(parent, 0);
2030 /* Copy frame to all raw sockets on that connection */
2031 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2033 struct l2cap_chan_list *l = &conn->chan_list;
2034 struct sk_buff *nskb;
2035 struct sock *sk;
2037 BT_DBG("conn %p", conn);
2039 read_lock(&l->lock);
2040 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2041 if (sk->sk_type != SOCK_RAW)
2042 continue;
2044 /* Don't send frame to the socket it came from */
2045 if (skb->sk == sk)
2046 continue;
2047 nskb = skb_clone(skb, GFP_ATOMIC);
2048 if (!nskb)
2049 continue;
2051 if (sock_queue_rcv_skb(sk, nskb))
2052 kfree_skb(nskb);
2054 read_unlock(&l->lock);
2057 /* ---- L2CAP signalling commands ---- */
2058 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2059 u8 code, u8 ident, u16 dlen, void *data)
2061 struct sk_buff *skb, **frag;
2062 struct l2cap_cmd_hdr *cmd;
2063 struct l2cap_hdr *lh;
2064 int len, count;
2066 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2067 conn, code, ident, dlen);
2069 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2070 count = min_t(unsigned int, conn->mtu, len);
2072 skb = bt_skb_alloc(count, GFP_ATOMIC);
2073 if (!skb)
2074 return NULL;
2076 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2077 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2078 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2080 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2081 cmd->code = code;
2082 cmd->ident = ident;
2083 cmd->len = cpu_to_le16(dlen);
2085 if (dlen) {
2086 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2087 memcpy(skb_put(skb, count), data, count);
2088 data += count;
2091 len -= skb->len;
2093 /* Continuation fragments (no L2CAP header) */
2094 frag = &skb_shinfo(skb)->frag_list;
2095 while (len) {
2096 count = min_t(unsigned int, conn->mtu, len);
2098 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2099 if (!*frag)
2100 goto fail;
2102 memcpy(skb_put(*frag, count), data, count);
2104 len -= count;
2105 data += count;
2107 frag = &(*frag)->next;
2110 return skb;
2112 fail:
2113 kfree_skb(skb);
2114 return NULL;
2117 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2119 struct l2cap_conf_opt *opt = *ptr;
2120 int len;
2122 len = L2CAP_CONF_OPT_SIZE + opt->len;
2123 *ptr += len;
2125 *type = opt->type;
2126 *olen = opt->len;
2128 switch (opt->len) {
2129 case 1:
2130 *val = *((u8 *) opt->val);
2131 break;
2133 case 2:
2134 *val = __le16_to_cpu(*((__le16 *) opt->val));
2135 break;
2137 case 4:
2138 *val = __le32_to_cpu(*((__le32 *) opt->val));
2139 break;
2141 default:
2142 *val = (unsigned long) opt->val;
2143 break;
2146 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2147 return len;
2150 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2152 struct l2cap_conf_opt *opt = *ptr;
2154 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2156 opt->type = type;
2157 opt->len = len;
2159 switch (len) {
2160 case 1:
2161 *((u8 *) opt->val) = val;
2162 break;
2164 case 2:
2165 *((__le16 *) opt->val) = cpu_to_le16(val);
2166 break;
2168 case 4:
2169 *((__le32 *) opt->val) = cpu_to_le32(val);
2170 break;
2172 default:
2173 memcpy(opt->val, (void *) val, len);
2174 break;
2177 *ptr += L2CAP_CONF_OPT_SIZE + len;
2180 static inline void l2cap_ertm_init(struct sock *sk)
2182 l2cap_pi(sk)->expected_ack_seq = 0;
2183 l2cap_pi(sk)->unacked_frames = 0;
2184 l2cap_pi(sk)->buffer_seq = 0;
2185 l2cap_pi(sk)->num_to_ack = 0;
2187 setup_timer(&l2cap_pi(sk)->retrans_timer,
2188 l2cap_retrans_timeout, (unsigned long) sk);
2189 setup_timer(&l2cap_pi(sk)->monitor_timer,
2190 l2cap_monitor_timeout, (unsigned long) sk);
2192 __skb_queue_head_init(SREJ_QUEUE(sk));
2195 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2197 u32 local_feat_mask = l2cap_feat_mask;
2198 if (enable_ertm)
2199 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2201 switch (mode) {
2202 case L2CAP_MODE_ERTM:
2203 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2204 case L2CAP_MODE_STREAMING:
2205 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2206 default:
2207 return 0x00;
2211 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2213 switch (mode) {
2214 case L2CAP_MODE_STREAMING:
2215 case L2CAP_MODE_ERTM:
2216 if (l2cap_mode_supported(mode, remote_feat_mask))
2217 return mode;
2218 /* fall through */
2219 default:
2220 return L2CAP_MODE_BASIC;
2224 static int l2cap_build_conf_req(struct sock *sk, void *data)
2226 struct l2cap_pinfo *pi = l2cap_pi(sk);
2227 struct l2cap_conf_req *req = data;
2228 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2229 void *ptr = req->data;
2231 BT_DBG("sk %p", sk);
2233 if (pi->num_conf_req || pi->num_conf_rsp)
2234 goto done;
2236 switch (pi->mode) {
2237 case L2CAP_MODE_STREAMING:
2238 case L2CAP_MODE_ERTM:
2239 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2240 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2241 l2cap_send_disconn_req(pi->conn, sk);
2242 break;
2243 default:
2244 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2245 break;
2248 done:
2249 switch (pi->mode) {
2250 case L2CAP_MODE_BASIC:
2251 if (pi->imtu != L2CAP_DEFAULT_MTU)
2252 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2253 break;
2255 case L2CAP_MODE_ERTM:
2256 rfc.mode = L2CAP_MODE_ERTM;
2257 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2258 rfc.max_transmit = max_transmit;
2259 rfc.retrans_timeout = 0;
2260 rfc.monitor_timeout = 0;
2261 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2263 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2264 sizeof(rfc), (unsigned long) &rfc);
2266 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2267 break;
2269 if (pi->fcs == L2CAP_FCS_NONE ||
2270 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2271 pi->fcs = L2CAP_FCS_NONE;
2272 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2274 break;
2276 case L2CAP_MODE_STREAMING:
2277 rfc.mode = L2CAP_MODE_STREAMING;
2278 rfc.txwin_size = 0;
2279 rfc.max_transmit = 0;
2280 rfc.retrans_timeout = 0;
2281 rfc.monitor_timeout = 0;
2282 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2284 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2285 sizeof(rfc), (unsigned long) &rfc);
2287 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2288 break;
2290 if (pi->fcs == L2CAP_FCS_NONE ||
2291 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2292 pi->fcs = L2CAP_FCS_NONE;
2293 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2295 break;
2298 /* FIXME: Need actual value of the flush timeout */
2299 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2300 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2302 req->dcid = cpu_to_le16(pi->dcid);
2303 req->flags = cpu_to_le16(0);
2305 return ptr - data;
2308 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2310 struct l2cap_pinfo *pi = l2cap_pi(sk);
2311 struct l2cap_conf_rsp *rsp = data;
2312 void *ptr = rsp->data;
2313 void *req = pi->conf_req;
2314 int len = pi->conf_len;
2315 int type, hint, olen;
2316 unsigned long val;
2317 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2318 u16 mtu = L2CAP_DEFAULT_MTU;
2319 u16 result = L2CAP_CONF_SUCCESS;
2321 BT_DBG("sk %p", sk);
2323 while (len >= L2CAP_CONF_OPT_SIZE) {
2324 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2326 hint = type & L2CAP_CONF_HINT;
2327 type &= L2CAP_CONF_MASK;
2329 switch (type) {
2330 case L2CAP_CONF_MTU:
2331 mtu = val;
2332 break;
2334 case L2CAP_CONF_FLUSH_TO:
2335 pi->flush_to = val;
2336 break;
2338 case L2CAP_CONF_QOS:
2339 break;
2341 case L2CAP_CONF_RFC:
2342 if (olen == sizeof(rfc))
2343 memcpy(&rfc, (void *) val, olen);
2344 break;
2346 case L2CAP_CONF_FCS:
2347 if (val == L2CAP_FCS_NONE)
2348 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2350 break;
2352 default:
2353 if (hint)
2354 break;
2356 result = L2CAP_CONF_UNKNOWN;
2357 *((u8 *) ptr++) = type;
2358 break;
2362 if (pi->num_conf_rsp || pi->num_conf_req)
2363 goto done;
2365 switch (pi->mode) {
2366 case L2CAP_MODE_STREAMING:
2367 case L2CAP_MODE_ERTM:
2368 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2369 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2370 return -ECONNREFUSED;
2371 break;
2372 default:
2373 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2374 break;
2377 done:
2378 if (pi->mode != rfc.mode) {
2379 result = L2CAP_CONF_UNACCEPT;
2380 rfc.mode = pi->mode;
2382 if (pi->num_conf_rsp == 1)
2383 return -ECONNREFUSED;
2385 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2386 sizeof(rfc), (unsigned long) &rfc);
2390 if (result == L2CAP_CONF_SUCCESS) {
2391 /* Configure output options and let the other side know
2392 * which ones we don't like. */
2394 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2395 result = L2CAP_CONF_UNACCEPT;
2396 else {
2397 pi->omtu = mtu;
2398 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2400 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2402 switch (rfc.mode) {
2403 case L2CAP_MODE_BASIC:
2404 pi->fcs = L2CAP_FCS_NONE;
2405 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2406 break;
2408 case L2CAP_MODE_ERTM:
2409 pi->remote_tx_win = rfc.txwin_size;
2410 pi->remote_max_tx = rfc.max_transmit;
2411 pi->max_pdu_size = rfc.max_pdu_size;
2413 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2414 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2416 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2418 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2419 sizeof(rfc), (unsigned long) &rfc);
2421 break;
2423 case L2CAP_MODE_STREAMING:
2424 pi->remote_tx_win = rfc.txwin_size;
2425 pi->max_pdu_size = rfc.max_pdu_size;
2427 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2429 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2430 sizeof(rfc), (unsigned long) &rfc);
2432 break;
2434 default:
2435 result = L2CAP_CONF_UNACCEPT;
2437 memset(&rfc, 0, sizeof(rfc));
2438 rfc.mode = pi->mode;
2441 if (result == L2CAP_CONF_SUCCESS)
2442 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2444 rsp->scid = cpu_to_le16(pi->dcid);
2445 rsp->result = cpu_to_le16(result);
2446 rsp->flags = cpu_to_le16(0x0000);
2448 return ptr - data;
2451 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2453 struct l2cap_pinfo *pi = l2cap_pi(sk);
2454 struct l2cap_conf_req *req = data;
2455 void *ptr = req->data;
2456 int type, olen;
2457 unsigned long val;
2458 struct l2cap_conf_rfc rfc;
2460 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2462 while (len >= L2CAP_CONF_OPT_SIZE) {
2463 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2465 switch (type) {
2466 case L2CAP_CONF_MTU:
2467 if (val < L2CAP_DEFAULT_MIN_MTU) {
2468 *result = L2CAP_CONF_UNACCEPT;
2469 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2470 } else
2471 pi->omtu = val;
2472 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2473 break;
2475 case L2CAP_CONF_FLUSH_TO:
2476 pi->flush_to = val;
2477 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2478 2, pi->flush_to);
2479 break;
2481 case L2CAP_CONF_RFC:
2482 if (olen == sizeof(rfc))
2483 memcpy(&rfc, (void *)val, olen);
2485 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2486 rfc.mode != pi->mode)
2487 return -ECONNREFUSED;
2489 pi->mode = rfc.mode;
2490 pi->fcs = 0;
2492 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2493 sizeof(rfc), (unsigned long) &rfc);
2494 break;
2498 if (*result == L2CAP_CONF_SUCCESS) {
2499 switch (rfc.mode) {
2500 case L2CAP_MODE_ERTM:
2501 pi->remote_tx_win = rfc.txwin_size;
2502 pi->retrans_timeout = rfc.retrans_timeout;
2503 pi->monitor_timeout = rfc.monitor_timeout;
2504 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2505 break;
2506 case L2CAP_MODE_STREAMING:
2507 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2508 break;
2512 req->dcid = cpu_to_le16(pi->dcid);
2513 req->flags = cpu_to_le16(0x0000);
2515 return ptr - data;
2518 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2520 struct l2cap_conf_rsp *rsp = data;
2521 void *ptr = rsp->data;
2523 BT_DBG("sk %p", sk);
2525 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2526 rsp->result = cpu_to_le16(result);
2527 rsp->flags = cpu_to_le16(flags);
2529 return ptr - data;
2532 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2534 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2536 if (rej->reason != 0x0000)
2537 return 0;
2539 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2540 cmd->ident == conn->info_ident) {
2541 del_timer(&conn->info_timer);
2543 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2544 conn->info_ident = 0;
2546 l2cap_conn_start(conn);
2549 return 0;
2552 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2554 struct l2cap_chan_list *list = &conn->chan_list;
2555 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2556 struct l2cap_conn_rsp rsp;
2557 struct sock *sk, *parent;
2558 int result, status = L2CAP_CS_NO_INFO;
2560 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2561 __le16 psm = req->psm;
2563 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2565 /* Check if we have socket listening on psm */
2566 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2567 if (!parent) {
2568 result = L2CAP_CR_BAD_PSM;
2569 goto sendresp;
2572 /* Check if the ACL is secure enough (if not SDP) */
2573 if (psm != cpu_to_le16(0x0001) &&
2574 !hci_conn_check_link_mode(conn->hcon)) {
2575 conn->disc_reason = 0x05;
2576 result = L2CAP_CR_SEC_BLOCK;
2577 goto response;
2580 result = L2CAP_CR_NO_MEM;
2582 /* Check for backlog size */
2583 if (sk_acceptq_is_full(parent)) {
2584 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2585 goto response;
2588 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2589 if (!sk)
2590 goto response;
2592 write_lock_bh(&list->lock);
2594 /* Check if we already have channel with that dcid */
2595 if (__l2cap_get_chan_by_dcid(list, scid)) {
2596 write_unlock_bh(&list->lock);
2597 sock_set_flag(sk, SOCK_ZAPPED);
2598 l2cap_sock_kill(sk);
2599 goto response;
2602 hci_conn_hold(conn->hcon);
2604 l2cap_sock_init(sk, parent);
2605 bacpy(&bt_sk(sk)->src, conn->src);
2606 bacpy(&bt_sk(sk)->dst, conn->dst);
2607 l2cap_pi(sk)->psm = psm;
2608 l2cap_pi(sk)->dcid = scid;
2610 __l2cap_chan_add(conn, sk, parent);
2611 dcid = l2cap_pi(sk)->scid;
2613 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2615 l2cap_pi(sk)->ident = cmd->ident;
2617 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2618 if (l2cap_check_security(sk)) {
2619 if (bt_sk(sk)->defer_setup) {
2620 sk->sk_state = BT_CONNECT2;
2621 result = L2CAP_CR_PEND;
2622 status = L2CAP_CS_AUTHOR_PEND;
2623 parent->sk_data_ready(parent, 0);
2624 } else {
2625 sk->sk_state = BT_CONFIG;
2626 result = L2CAP_CR_SUCCESS;
2627 status = L2CAP_CS_NO_INFO;
2629 } else {
2630 sk->sk_state = BT_CONNECT2;
2631 result = L2CAP_CR_PEND;
2632 status = L2CAP_CS_AUTHEN_PEND;
2634 } else {
2635 sk->sk_state = BT_CONNECT2;
2636 result = L2CAP_CR_PEND;
2637 status = L2CAP_CS_NO_INFO;
2640 write_unlock_bh(&list->lock);
2642 response:
2643 bh_unlock_sock(parent);
2645 sendresp:
2646 rsp.scid = cpu_to_le16(scid);
2647 rsp.dcid = cpu_to_le16(dcid);
2648 rsp.result = cpu_to_le16(result);
2649 rsp.status = cpu_to_le16(status);
2650 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2652 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2653 struct l2cap_info_req info;
2654 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2657 conn->info_ident = l2cap_get_ident(conn);
2659 mod_timer(&conn->info_timer, jiffies +
2660 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2662 l2cap_send_cmd(conn, conn->info_ident,
2663 L2CAP_INFO_REQ, sizeof(info), &info);
2666 return 0;
2669 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2671 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2672 u16 scid, dcid, result, status;
2673 struct sock *sk;
2674 u8 req[128];
2676 scid = __le16_to_cpu(rsp->scid);
2677 dcid = __le16_to_cpu(rsp->dcid);
2678 result = __le16_to_cpu(rsp->result);
2679 status = __le16_to_cpu(rsp->status);
2681 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2683 if (scid) {
2684 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2685 if (!sk)
2686 return 0;
2687 } else {
2688 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2689 if (!sk)
2690 return 0;
2693 switch (result) {
2694 case L2CAP_CR_SUCCESS:
2695 sk->sk_state = BT_CONFIG;
2696 l2cap_pi(sk)->ident = 0;
2697 l2cap_pi(sk)->dcid = dcid;
2698 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2700 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2702 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2703 l2cap_build_conf_req(sk, req), req);
2704 l2cap_pi(sk)->num_conf_req++;
2705 break;
2707 case L2CAP_CR_PEND:
2708 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2709 break;
2711 default:
2712 l2cap_chan_del(sk, ECONNREFUSED);
2713 break;
2716 bh_unlock_sock(sk);
2717 return 0;
2720 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2722 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2723 u16 dcid, flags;
2724 u8 rsp[64];
2725 struct sock *sk;
2726 int len;
2728 dcid = __le16_to_cpu(req->dcid);
2729 flags = __le16_to_cpu(req->flags);
2731 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2733 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2734 if (!sk)
2735 return -ENOENT;
2737 if (sk->sk_state == BT_DISCONN)
2738 goto unlock;
2740 /* Reject if config buffer is too small. */
2741 len = cmd_len - sizeof(*req);
2742 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2743 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2744 l2cap_build_conf_rsp(sk, rsp,
2745 L2CAP_CONF_REJECT, flags), rsp);
2746 goto unlock;
2749 /* Store config. */
2750 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2751 l2cap_pi(sk)->conf_len += len;
2753 if (flags & 0x0001) {
2754 /* Incomplete config. Send empty response. */
2755 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2756 l2cap_build_conf_rsp(sk, rsp,
2757 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2758 goto unlock;
2761 /* Complete config. */
2762 len = l2cap_parse_conf_req(sk, rsp);
2763 if (len < 0) {
2764 l2cap_send_disconn_req(conn, sk);
2765 goto unlock;
2768 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2769 l2cap_pi(sk)->num_conf_rsp++;
2771 /* Reset config buffer. */
2772 l2cap_pi(sk)->conf_len = 0;
2774 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2775 goto unlock;
2777 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2778 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2779 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2780 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2782 sk->sk_state = BT_CONNECTED;
2784 l2cap_pi(sk)->next_tx_seq = 0;
2785 l2cap_pi(sk)->expected_tx_seq = 0;
2786 __skb_queue_head_init(TX_QUEUE(sk));
2787 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2788 l2cap_ertm_init(sk);
2790 l2cap_chan_ready(sk);
2791 goto unlock;
2794 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2795 u8 buf[64];
2796 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2797 l2cap_build_conf_req(sk, buf), buf);
2798 l2cap_pi(sk)->num_conf_req++;
2801 unlock:
2802 bh_unlock_sock(sk);
2803 return 0;
2806 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2808 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2809 u16 scid, flags, result;
2810 struct sock *sk;
2812 scid = __le16_to_cpu(rsp->scid);
2813 flags = __le16_to_cpu(rsp->flags);
2814 result = __le16_to_cpu(rsp->result);
2816 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2817 scid, flags, result);
2819 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2820 if (!sk)
2821 return 0;
2823 switch (result) {
2824 case L2CAP_CONF_SUCCESS:
2825 break;
2827 case L2CAP_CONF_UNACCEPT:
2828 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2829 int len = cmd->len - sizeof(*rsp);
2830 char req[64];
2832 /* throw out any old stored conf requests */
2833 result = L2CAP_CONF_SUCCESS;
2834 len = l2cap_parse_conf_rsp(sk, rsp->data,
2835 len, req, &result);
2836 if (len < 0) {
2837 l2cap_send_disconn_req(conn, sk);
2838 goto done;
2841 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2842 L2CAP_CONF_REQ, len, req);
2843 l2cap_pi(sk)->num_conf_req++;
2844 if (result != L2CAP_CONF_SUCCESS)
2845 goto done;
2846 break;
2849 default:
2850 sk->sk_state = BT_DISCONN;
2851 sk->sk_err = ECONNRESET;
2852 l2cap_sock_set_timer(sk, HZ * 5);
2853 l2cap_send_disconn_req(conn, sk);
2854 goto done;
2857 if (flags & 0x01)
2858 goto done;
2860 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2862 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2863 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2864 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2865 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2867 sk->sk_state = BT_CONNECTED;
2868 l2cap_pi(sk)->next_tx_seq = 0;
2869 l2cap_pi(sk)->expected_tx_seq = 0;
2870 __skb_queue_head_init(TX_QUEUE(sk));
2871 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2872 l2cap_ertm_init(sk);
2874 l2cap_chan_ready(sk);
2877 done:
2878 bh_unlock_sock(sk);
2879 return 0;
2882 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2884 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2885 struct l2cap_disconn_rsp rsp;
2886 u16 dcid, scid;
2887 struct sock *sk;
2889 scid = __le16_to_cpu(req->scid);
2890 dcid = __le16_to_cpu(req->dcid);
2892 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2894 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2895 if (!sk)
2896 return 0;
2898 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2899 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2900 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2902 sk->sk_shutdown = SHUTDOWN_MASK;
2904 skb_queue_purge(TX_QUEUE(sk));
2906 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2907 skb_queue_purge(SREJ_QUEUE(sk));
2908 del_timer(&l2cap_pi(sk)->retrans_timer);
2909 del_timer(&l2cap_pi(sk)->monitor_timer);
2912 l2cap_chan_del(sk, ECONNRESET);
2913 bh_unlock_sock(sk);
2915 l2cap_sock_kill(sk);
2916 return 0;
2919 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2921 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2922 u16 dcid, scid;
2923 struct sock *sk;
2925 scid = __le16_to_cpu(rsp->scid);
2926 dcid = __le16_to_cpu(rsp->dcid);
2928 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2930 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2931 if (!sk)
2932 return 0;
2934 skb_queue_purge(TX_QUEUE(sk));
2936 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2937 skb_queue_purge(SREJ_QUEUE(sk));
2938 del_timer(&l2cap_pi(sk)->retrans_timer);
2939 del_timer(&l2cap_pi(sk)->monitor_timer);
2942 l2cap_chan_del(sk, 0);
2943 bh_unlock_sock(sk);
2945 l2cap_sock_kill(sk);
2946 return 0;
2949 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2951 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2952 u16 type;
2954 type = __le16_to_cpu(req->type);
2956 BT_DBG("type 0x%4.4x", type);
2958 if (type == L2CAP_IT_FEAT_MASK) {
2959 u8 buf[8];
2960 u32 feat_mask = l2cap_feat_mask;
2961 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2962 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2963 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2964 if (enable_ertm)
2965 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2966 | L2CAP_FEAT_FCS;
2967 put_unaligned_le32(feat_mask, rsp->data);
2968 l2cap_send_cmd(conn, cmd->ident,
2969 L2CAP_INFO_RSP, sizeof(buf), buf);
2970 } else if (type == L2CAP_IT_FIXED_CHAN) {
2971 u8 buf[12];
2972 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2973 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2974 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2975 memcpy(buf + 4, l2cap_fixed_chan, 8);
2976 l2cap_send_cmd(conn, cmd->ident,
2977 L2CAP_INFO_RSP, sizeof(buf), buf);
2978 } else {
2979 struct l2cap_info_rsp rsp;
2980 rsp.type = cpu_to_le16(type);
2981 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2982 l2cap_send_cmd(conn, cmd->ident,
2983 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2986 return 0;
2989 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2991 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2992 u16 type, result;
2994 type = __le16_to_cpu(rsp->type);
2995 result = __le16_to_cpu(rsp->result);
2997 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2999 del_timer(&conn->info_timer);
3001 if (type == L2CAP_IT_FEAT_MASK) {
3002 conn->feat_mask = get_unaligned_le32(rsp->data);
3004 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3005 struct l2cap_info_req req;
3006 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3008 conn->info_ident = l2cap_get_ident(conn);
3010 l2cap_send_cmd(conn, conn->info_ident,
3011 L2CAP_INFO_REQ, sizeof(req), &req);
3012 } else {
3013 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3014 conn->info_ident = 0;
3016 l2cap_conn_start(conn);
3018 } else if (type == L2CAP_IT_FIXED_CHAN) {
3019 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3020 conn->info_ident = 0;
3022 l2cap_conn_start(conn);
3025 return 0;
3028 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3030 u8 *data = skb->data;
3031 int len = skb->len;
3032 struct l2cap_cmd_hdr cmd;
3033 int err = 0;
3035 l2cap_raw_recv(conn, skb);
3037 while (len >= L2CAP_CMD_HDR_SIZE) {
3038 u16 cmd_len;
3039 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3040 data += L2CAP_CMD_HDR_SIZE;
3041 len -= L2CAP_CMD_HDR_SIZE;
3043 cmd_len = le16_to_cpu(cmd.len);
3045 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3047 if (cmd_len > len || !cmd.ident) {
3048 BT_DBG("corrupted command");
3049 break;
3052 switch (cmd.code) {
3053 case L2CAP_COMMAND_REJ:
3054 l2cap_command_rej(conn, &cmd, data);
3055 break;
3057 case L2CAP_CONN_REQ:
3058 err = l2cap_connect_req(conn, &cmd, data);
3059 break;
3061 case L2CAP_CONN_RSP:
3062 err = l2cap_connect_rsp(conn, &cmd, data);
3063 break;
3065 case L2CAP_CONF_REQ:
3066 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3067 break;
3069 case L2CAP_CONF_RSP:
3070 err = l2cap_config_rsp(conn, &cmd, data);
3071 break;
3073 case L2CAP_DISCONN_REQ:
3074 err = l2cap_disconnect_req(conn, &cmd, data);
3075 break;
3077 case L2CAP_DISCONN_RSP:
3078 err = l2cap_disconnect_rsp(conn, &cmd, data);
3079 break;
3081 case L2CAP_ECHO_REQ:
3082 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3083 break;
3085 case L2CAP_ECHO_RSP:
3086 break;
3088 case L2CAP_INFO_REQ:
3089 err = l2cap_information_req(conn, &cmd, data);
3090 break;
3092 case L2CAP_INFO_RSP:
3093 err = l2cap_information_rsp(conn, &cmd, data);
3094 break;
3096 default:
3097 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3098 err = -EINVAL;
3099 break;
3102 if (err) {
3103 struct l2cap_cmd_rej rej;
3104 BT_DBG("error %d", err);
3106 /* FIXME: Map err to a valid reason */
3107 rej.reason = cpu_to_le16(0);
3108 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3111 data += cmd_len;
3112 len -= cmd_len;
3115 kfree_skb(skb);
3118 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3120 u16 our_fcs, rcv_fcs;
3121 int hdr_size = L2CAP_HDR_SIZE + 2;
3123 if (pi->fcs == L2CAP_FCS_CRC16) {
3124 skb_trim(skb, skb->len - 2);
3125 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3126 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3128 if (our_fcs != rcv_fcs)
3129 return -EINVAL;
3131 return 0;
3134 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3136 struct sk_buff *next_skb;
3138 bt_cb(skb)->tx_seq = tx_seq;
3139 bt_cb(skb)->sar = sar;
3141 next_skb = skb_peek(SREJ_QUEUE(sk));
3142 if (!next_skb) {
3143 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3144 return;
3147 do {
3148 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3149 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3150 return;
3153 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3154 break;
3156 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3158 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3161 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3163 struct l2cap_pinfo *pi = l2cap_pi(sk);
3164 struct sk_buff *_skb;
3165 int err = -EINVAL;
3167 switch (control & L2CAP_CTRL_SAR) {
3168 case L2CAP_SDU_UNSEGMENTED:
3169 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3170 kfree_skb(pi->sdu);
3171 break;
3174 err = sock_queue_rcv_skb(sk, skb);
3175 if (!err)
3176 return 0;
3178 break;
3180 case L2CAP_SDU_START:
3181 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3182 kfree_skb(pi->sdu);
3183 break;
3186 pi->sdu_len = get_unaligned_le16(skb->data);
3187 skb_pull(skb, 2);
3189 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3190 if (!pi->sdu) {
3191 err = -ENOMEM;
3192 break;
3195 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3197 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3198 pi->partial_sdu_len = skb->len;
3199 err = 0;
3200 break;
3202 case L2CAP_SDU_CONTINUE:
3203 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3204 break;
3206 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3208 pi->partial_sdu_len += skb->len;
3209 if (pi->partial_sdu_len > pi->sdu_len)
3210 kfree_skb(pi->sdu);
3211 else
3212 err = 0;
3214 break;
3216 case L2CAP_SDU_END:
3217 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3218 break;
3220 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3222 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3223 pi->partial_sdu_len += skb->len;
3225 if (pi->partial_sdu_len == pi->sdu_len) {
3226 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3227 err = sock_queue_rcv_skb(sk, _skb);
3228 if (err < 0)
3229 kfree_skb(_skb);
3231 kfree_skb(pi->sdu);
3232 err = 0;
3234 break;
3237 kfree_skb(skb);
3238 return err;
3241 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3243 struct sk_buff *skb;
3244 u16 control = 0;
3246 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3247 if (bt_cb(skb)->tx_seq != tx_seq)
3248 break;
3250 skb = skb_dequeue(SREJ_QUEUE(sk));
3251 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3252 l2cap_sar_reassembly_sdu(sk, skb, control);
3253 l2cap_pi(sk)->buffer_seq_srej =
3254 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3255 tx_seq++;
3259 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3261 struct l2cap_pinfo *pi = l2cap_pi(sk);
3262 struct srej_list *l, *tmp;
3263 u16 control;
3265 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3266 if (l->tx_seq == tx_seq) {
3267 list_del(&l->list);
3268 kfree(l);
3269 return;
3271 control = L2CAP_SUPER_SELECT_REJECT;
3272 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3273 l2cap_send_sframe(pi, control);
3274 list_del(&l->list);
3275 list_add_tail(&l->list, SREJ_LIST(sk));
3279 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3281 struct l2cap_pinfo *pi = l2cap_pi(sk);
3282 struct srej_list *new;
3283 u16 control;
3285 while (tx_seq != pi->expected_tx_seq) {
3286 control = L2CAP_SUPER_SELECT_REJECT;
3287 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3288 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3289 control |= L2CAP_CTRL_POLL;
3290 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3292 l2cap_send_sframe(pi, control);
3294 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3295 new->tx_seq = pi->expected_tx_seq++;
3296 list_add_tail(&new->list, SREJ_LIST(sk));
3298 pi->expected_tx_seq++;
3301 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3303 struct l2cap_pinfo *pi = l2cap_pi(sk);
3304 u8 tx_seq = __get_txseq(rx_control);
3305 u8 req_seq = __get_reqseq(rx_control);
3306 u16 tx_control = 0;
3307 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3308 int err = 0;
3310 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3312 pi->expected_ack_seq = req_seq;
3313 l2cap_drop_acked_frames(sk);
3315 if (tx_seq == pi->expected_tx_seq)
3316 goto expected;
3318 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3319 struct srej_list *first;
3321 first = list_first_entry(SREJ_LIST(sk),
3322 struct srej_list, list);
3323 if (tx_seq == first->tx_seq) {
3324 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3325 l2cap_check_srej_gap(sk, tx_seq);
3327 list_del(&first->list);
3328 kfree(first);
3330 if (list_empty(SREJ_LIST(sk))) {
3331 pi->buffer_seq = pi->buffer_seq_srej;
3332 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3334 } else {
3335 struct srej_list *l;
3336 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3338 list_for_each_entry(l, SREJ_LIST(sk), list) {
3339 if (l->tx_seq == tx_seq) {
3340 l2cap_resend_srejframe(sk, tx_seq);
3341 return 0;
3344 l2cap_send_srejframe(sk, tx_seq);
3346 } else {
3347 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3349 INIT_LIST_HEAD(SREJ_LIST(sk));
3350 pi->buffer_seq_srej = pi->buffer_seq;
3352 __skb_queue_head_init(SREJ_QUEUE(sk));
3353 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3355 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3357 l2cap_send_srejframe(sk, tx_seq);
3359 return 0;
3361 expected:
3362 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3364 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3365 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3366 return 0;
3369 if (rx_control & L2CAP_CTRL_FINAL) {
3370 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3371 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3372 else {
3373 sk->sk_send_head = TX_QUEUE(sk)->next;
3374 pi->next_tx_seq = pi->expected_ack_seq;
3375 l2cap_ertm_send(sk);
3379 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3381 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3382 if (err < 0)
3383 return err;
3385 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3386 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3387 tx_control |= L2CAP_SUPER_RCV_READY;
3388 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3389 l2cap_send_sframe(pi, tx_control);
3391 return 0;
3394 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3396 struct l2cap_pinfo *pi = l2cap_pi(sk);
3397 u8 tx_seq = __get_reqseq(rx_control);
3399 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3401 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3402 case L2CAP_SUPER_RCV_READY:
3403 if (rx_control & L2CAP_CTRL_POLL) {
3404 u16 control = L2CAP_CTRL_FINAL;
3405 control |= L2CAP_SUPER_RCV_READY |
3406 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3407 l2cap_send_sframe(l2cap_pi(sk), control);
3408 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3410 } else if (rx_control & L2CAP_CTRL_FINAL) {
3411 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3412 pi->expected_ack_seq = tx_seq;
3413 l2cap_drop_acked_frames(sk);
3415 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3416 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3417 else {
3418 sk->sk_send_head = TX_QUEUE(sk)->next;
3419 pi->next_tx_seq = pi->expected_ack_seq;
3420 l2cap_ertm_send(sk);
3423 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3424 break;
3426 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3427 del_timer(&pi->monitor_timer);
3429 if (pi->unacked_frames > 0)
3430 __mod_retrans_timer();
3431 } else {
3432 pi->expected_ack_seq = tx_seq;
3433 l2cap_drop_acked_frames(sk);
3435 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3436 (pi->unacked_frames > 0))
3437 __mod_retrans_timer();
3439 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3440 l2cap_ertm_send(sk);
3442 break;
3444 case L2CAP_SUPER_REJECT:
3445 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3447 pi->expected_ack_seq = __get_reqseq(rx_control);
3448 l2cap_drop_acked_frames(sk);
3450 if (rx_control & L2CAP_CTRL_FINAL) {
3451 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3452 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3453 else {
3454 sk->sk_send_head = TX_QUEUE(sk)->next;
3455 pi->next_tx_seq = pi->expected_ack_seq;
3456 l2cap_ertm_send(sk);
3458 } else {
3459 sk->sk_send_head = TX_QUEUE(sk)->next;
3460 pi->next_tx_seq = pi->expected_ack_seq;
3461 l2cap_ertm_send(sk);
3463 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3464 pi->srej_save_reqseq = tx_seq;
3465 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3469 break;
3471 case L2CAP_SUPER_SELECT_REJECT:
3472 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3474 if (rx_control & L2CAP_CTRL_POLL) {
3475 pi->expected_ack_seq = tx_seq;
3476 l2cap_drop_acked_frames(sk);
3477 l2cap_retransmit_frame(sk, tx_seq);
3478 l2cap_ertm_send(sk);
3479 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3480 pi->srej_save_reqseq = tx_seq;
3481 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3483 } else if (rx_control & L2CAP_CTRL_FINAL) {
3484 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3485 pi->srej_save_reqseq == tx_seq)
3486 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3487 else
3488 l2cap_retransmit_frame(sk, tx_seq);
3490 else {
3491 l2cap_retransmit_frame(sk, tx_seq);
3492 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3493 pi->srej_save_reqseq = tx_seq;
3494 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3497 break;
3499 case L2CAP_SUPER_RCV_NOT_READY:
3500 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3501 pi->expected_ack_seq = tx_seq;
3502 l2cap_drop_acked_frames(sk);
3504 del_timer(&l2cap_pi(sk)->retrans_timer);
3505 if (rx_control & L2CAP_CTRL_POLL) {
3506 u16 control = L2CAP_CTRL_FINAL;
3507 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3509 break;
3512 return 0;
3515 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3517 struct sock *sk;
3518 struct l2cap_pinfo *pi;
3519 u16 control, len;
3520 u8 tx_seq;
3521 int err;
3523 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3524 if (!sk) {
3525 BT_DBG("unknown cid 0x%4.4x", cid);
3526 goto drop;
3529 pi = l2cap_pi(sk);
3531 BT_DBG("sk %p, len %d", sk, skb->len);
3533 if (sk->sk_state != BT_CONNECTED)
3534 goto drop;
3536 switch (pi->mode) {
3537 case L2CAP_MODE_BASIC:
3538 /* If socket recv buffers overflows we drop data here
3539 * which is *bad* because L2CAP has to be reliable.
3540 * But we don't have any other choice. L2CAP doesn't
3541 * provide flow control mechanism. */
3543 if (pi->imtu < skb->len)
3544 goto drop;
3546 if (!sock_queue_rcv_skb(sk, skb))
3547 goto done;
3548 break;
3550 case L2CAP_MODE_ERTM:
3551 control = get_unaligned_le16(skb->data);
3552 skb_pull(skb, 2);
3553 len = skb->len;
3555 if (__is_sar_start(control))
3556 len -= 2;
3558 if (pi->fcs == L2CAP_FCS_CRC16)
3559 len -= 2;
3562 * We can just drop the corrupted I-frame here.
3563 * Receiver will miss it and start proper recovery
3564 * procedures and ask retransmission.
3566 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3567 goto drop;
3569 if (l2cap_check_fcs(pi, skb))
3570 goto drop;
3572 if (__is_iframe(control))
3573 err = l2cap_data_channel_iframe(sk, control, skb);
3574 else
3575 err = l2cap_data_channel_sframe(sk, control, skb);
3577 if (!err)
3578 goto done;
3579 break;
3581 case L2CAP_MODE_STREAMING:
3582 control = get_unaligned_le16(skb->data);
3583 skb_pull(skb, 2);
3584 len = skb->len;
3586 if (__is_sar_start(control))
3587 len -= 2;
3589 if (pi->fcs == L2CAP_FCS_CRC16)
3590 len -= 2;
3592 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3593 goto drop;
3595 if (l2cap_check_fcs(pi, skb))
3596 goto drop;
3598 tx_seq = __get_txseq(control);
3600 if (pi->expected_tx_seq == tx_seq)
3601 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3602 else
3603 pi->expected_tx_seq = tx_seq + 1;
3605 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3607 goto done;
3609 default:
3610 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3611 break;
3614 drop:
3615 kfree_skb(skb);
3617 done:
3618 if (sk)
3619 bh_unlock_sock(sk);
3621 return 0;
3624 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3626 struct sock *sk;
3628 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3629 if (!sk)
3630 goto drop;
3632 BT_DBG("sk %p, len %d", sk, skb->len);
3634 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3635 goto drop;
3637 if (l2cap_pi(sk)->imtu < skb->len)
3638 goto drop;
3640 if (!sock_queue_rcv_skb(sk, skb))
3641 goto done;
3643 drop:
3644 kfree_skb(skb);
3646 done:
3647 if (sk)
3648 bh_unlock_sock(sk);
3649 return 0;
3652 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3654 struct l2cap_hdr *lh = (void *) skb->data;
3655 u16 cid, len;
3656 __le16 psm;
3658 skb_pull(skb, L2CAP_HDR_SIZE);
3659 cid = __le16_to_cpu(lh->cid);
3660 len = __le16_to_cpu(lh->len);
3662 if (len != skb->len) {
3663 kfree_skb(skb);
3664 return;
3667 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3669 switch (cid) {
3670 case L2CAP_CID_SIGNALING:
3671 l2cap_sig_channel(conn, skb);
3672 break;
3674 case L2CAP_CID_CONN_LESS:
3675 psm = get_unaligned_le16(skb->data);
3676 skb_pull(skb, 2);
3677 l2cap_conless_channel(conn, psm, skb);
3678 break;
3680 default:
3681 l2cap_data_channel(conn, cid, skb);
3682 break;
3686 /* ---- L2CAP interface with lower layer (HCI) ---- */
3688 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3690 int exact = 0, lm1 = 0, lm2 = 0;
3691 register struct sock *sk;
3692 struct hlist_node *node;
3694 if (type != ACL_LINK)
3695 return 0;
3697 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3699 /* Find listening sockets and check their link_mode */
3700 read_lock(&l2cap_sk_list.lock);
3701 sk_for_each(sk, node, &l2cap_sk_list.head) {
3702 if (sk->sk_state != BT_LISTEN)
3703 continue;
3705 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3706 lm1 |= HCI_LM_ACCEPT;
3707 if (l2cap_pi(sk)->role_switch)
3708 lm1 |= HCI_LM_MASTER;
3709 exact++;
3710 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3711 lm2 |= HCI_LM_ACCEPT;
3712 if (l2cap_pi(sk)->role_switch)
3713 lm2 |= HCI_LM_MASTER;
3716 read_unlock(&l2cap_sk_list.lock);
3718 return exact ? lm1 : lm2;
3721 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3723 struct l2cap_conn *conn;
3725 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3727 if (hcon->type != ACL_LINK)
3728 return 0;
3730 if (!status) {
3731 conn = l2cap_conn_add(hcon, status);
3732 if (conn)
3733 l2cap_conn_ready(conn);
3734 } else
3735 l2cap_conn_del(hcon, bt_err(status));
3737 return 0;
3740 static int l2cap_disconn_ind(struct hci_conn *hcon)
3742 struct l2cap_conn *conn = hcon->l2cap_data;
3744 BT_DBG("hcon %p", hcon);
3746 if (hcon->type != ACL_LINK || !conn)
3747 return 0x13;
3749 return conn->disc_reason;
3752 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3754 BT_DBG("hcon %p reason %d", hcon, reason);
3756 if (hcon->type != ACL_LINK)
3757 return 0;
3759 l2cap_conn_del(hcon, bt_err(reason));
3761 return 0;
3764 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3766 if (sk->sk_type != SOCK_SEQPACKET)
3767 return;
3769 if (encrypt == 0x00) {
3770 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3771 l2cap_sock_clear_timer(sk);
3772 l2cap_sock_set_timer(sk, HZ * 5);
3773 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3774 __l2cap_sock_close(sk, ECONNREFUSED);
3775 } else {
3776 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3777 l2cap_sock_clear_timer(sk);
3781 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3783 struct l2cap_chan_list *l;
3784 struct l2cap_conn *conn = hcon->l2cap_data;
3785 struct sock *sk;
3787 if (!conn)
3788 return 0;
3790 l = &conn->chan_list;
3792 BT_DBG("conn %p", conn);
3794 read_lock(&l->lock);
3796 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3797 bh_lock_sock(sk);
3799 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3800 bh_unlock_sock(sk);
3801 continue;
3804 if (!status && (sk->sk_state == BT_CONNECTED ||
3805 sk->sk_state == BT_CONFIG)) {
3806 l2cap_check_encryption(sk, encrypt);
3807 bh_unlock_sock(sk);
3808 continue;
3811 if (sk->sk_state == BT_CONNECT) {
3812 if (!status) {
3813 struct l2cap_conn_req req;
3814 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3815 req.psm = l2cap_pi(sk)->psm;
3817 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3819 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3820 L2CAP_CONN_REQ, sizeof(req), &req);
3821 } else {
3822 l2cap_sock_clear_timer(sk);
3823 l2cap_sock_set_timer(sk, HZ / 10);
3825 } else if (sk->sk_state == BT_CONNECT2) {
3826 struct l2cap_conn_rsp rsp;
3827 __u16 result;
3829 if (!status) {
3830 sk->sk_state = BT_CONFIG;
3831 result = L2CAP_CR_SUCCESS;
3832 } else {
3833 sk->sk_state = BT_DISCONN;
3834 l2cap_sock_set_timer(sk, HZ / 10);
3835 result = L2CAP_CR_SEC_BLOCK;
3838 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3839 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3840 rsp.result = cpu_to_le16(result);
3841 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3842 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3843 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3846 bh_unlock_sock(sk);
3849 read_unlock(&l->lock);
3851 return 0;
3854 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3856 struct l2cap_conn *conn = hcon->l2cap_data;
3858 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3859 goto drop;
3861 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3863 if (flags & ACL_START) {
3864 struct l2cap_hdr *hdr;
3865 int len;
3867 if (conn->rx_len) {
3868 BT_ERR("Unexpected start frame (len %d)", skb->len);
3869 kfree_skb(conn->rx_skb);
3870 conn->rx_skb = NULL;
3871 conn->rx_len = 0;
3872 l2cap_conn_unreliable(conn, ECOMM);
3875 if (skb->len < 2) {
3876 BT_ERR("Frame is too short (len %d)", skb->len);
3877 l2cap_conn_unreliable(conn, ECOMM);
3878 goto drop;
3881 hdr = (struct l2cap_hdr *) skb->data;
3882 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3884 if (len == skb->len) {
3885 /* Complete frame received */
3886 l2cap_recv_frame(conn, skb);
3887 return 0;
3890 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3892 if (skb->len > len) {
3893 BT_ERR("Frame is too long (len %d, expected len %d)",
3894 skb->len, len);
3895 l2cap_conn_unreliable(conn, ECOMM);
3896 goto drop;
3899 /* Allocate skb for the complete frame (with header) */
3900 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3901 if (!conn->rx_skb)
3902 goto drop;
3904 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3905 skb->len);
3906 conn->rx_len = len - skb->len;
3907 } else {
3908 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3910 if (!conn->rx_len) {
3911 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3912 l2cap_conn_unreliable(conn, ECOMM);
3913 goto drop;
3916 if (skb->len > conn->rx_len) {
3917 BT_ERR("Fragment is too long (len %d, expected %d)",
3918 skb->len, conn->rx_len);
3919 kfree_skb(conn->rx_skb);
3920 conn->rx_skb = NULL;
3921 conn->rx_len = 0;
3922 l2cap_conn_unreliable(conn, ECOMM);
3923 goto drop;
3926 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3927 skb->len);
3928 conn->rx_len -= skb->len;
3930 if (!conn->rx_len) {
3931 /* Complete frame received */
3932 l2cap_recv_frame(conn, conn->rx_skb);
3933 conn->rx_skb = NULL;
3937 drop:
3938 kfree_skb(skb);
3939 return 0;
3942 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3944 struct sock *sk;
3945 struct hlist_node *node;
3946 char *str = buf;
3948 read_lock_bh(&l2cap_sk_list.lock);
3950 sk_for_each(sk, node, &l2cap_sk_list.head) {
3951 struct l2cap_pinfo *pi = l2cap_pi(sk);
3953 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3954 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3955 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3956 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3959 read_unlock_bh(&l2cap_sk_list.lock);
3961 return str - buf;
3964 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3966 static const struct proto_ops l2cap_sock_ops = {
3967 .family = PF_BLUETOOTH,
3968 .owner = THIS_MODULE,
3969 .release = l2cap_sock_release,
3970 .bind = l2cap_sock_bind,
3971 .connect = l2cap_sock_connect,
3972 .listen = l2cap_sock_listen,
3973 .accept = l2cap_sock_accept,
3974 .getname = l2cap_sock_getname,
3975 .sendmsg = l2cap_sock_sendmsg,
3976 .recvmsg = l2cap_sock_recvmsg,
3977 .poll = bt_sock_poll,
3978 .ioctl = bt_sock_ioctl,
3979 .mmap = sock_no_mmap,
3980 .socketpair = sock_no_socketpair,
3981 .shutdown = l2cap_sock_shutdown,
3982 .setsockopt = l2cap_sock_setsockopt,
3983 .getsockopt = l2cap_sock_getsockopt
3986 static const struct net_proto_family l2cap_sock_family_ops = {
3987 .family = PF_BLUETOOTH,
3988 .owner = THIS_MODULE,
3989 .create = l2cap_sock_create,
3992 static struct hci_proto l2cap_hci_proto = {
3993 .name = "L2CAP",
3994 .id = HCI_PROTO_L2CAP,
3995 .connect_ind = l2cap_connect_ind,
3996 .connect_cfm = l2cap_connect_cfm,
3997 .disconn_ind = l2cap_disconn_ind,
3998 .disconn_cfm = l2cap_disconn_cfm,
3999 .security_cfm = l2cap_security_cfm,
4000 .recv_acldata = l2cap_recv_acldata
4003 static int __init l2cap_init(void)
4005 int err;
4007 err = proto_register(&l2cap_proto, 0);
4008 if (err < 0)
4009 return err;
4011 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4012 if (err < 0) {
4013 BT_ERR("L2CAP socket registration failed");
4014 goto error;
4017 err = hci_register_proto(&l2cap_hci_proto);
4018 if (err < 0) {
4019 BT_ERR("L2CAP protocol registration failed");
4020 bt_sock_unregister(BTPROTO_L2CAP);
4021 goto error;
4024 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
4025 BT_ERR("Failed to create L2CAP info file");
4027 BT_INFO("L2CAP ver %s", VERSION);
4028 BT_INFO("L2CAP socket layer initialized");
4030 return 0;
4032 error:
4033 proto_unregister(&l2cap_proto);
4034 return err;
4037 static void __exit l2cap_exit(void)
4039 class_remove_file(bt_class, &class_attr_l2cap);
4041 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4042 BT_ERR("L2CAP socket unregistration failed");
4044 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4045 BT_ERR("L2CAP protocol unregistration failed");
4047 proto_unregister(&l2cap_proto);
4050 void l2cap_load(void)
4052 /* Dummy function to trigger automatic L2CAP module loading by
4053 * other modules that use L2CAP sockets but don't use any other
4054 * symbols from it. */
4055 return;
4057 EXPORT_SYMBOL(l2cap_load);
4059 module_init(l2cap_init);
4060 module_exit(l2cap_exit);
4062 module_param(enable_ertm, bool, 0644);
4063 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4065 module_param(max_transmit, uint, 0644);
4066 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4068 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4069 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4070 MODULE_VERSION(VERSION);
4071 MODULE_LICENSE("GPL");
4072 MODULE_ALIAS("bt-proto-0");