davinci: mach/common.h: add missing includes
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blob9753b690a8b356b9bd24e88efb45d8d6e2b5ca56
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static const struct proto_ops l2cap_sock_ops;
66 static struct bt_sock_list l2cap_sk_list = {
67 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
70 static void __l2cap_sock_close(struct sock *sk, int reason);
71 static void l2cap_sock_close(struct sock *sk);
72 static void l2cap_sock_kill(struct sock *sk);
74 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
75 u8 code, u8 ident, u16 dlen, void *data);
77 /* ---- L2CAP timers ---- */
78 static void l2cap_sock_timeout(unsigned long arg)
80 struct sock *sk = (struct sock *) arg;
81 int reason;
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
85 bh_lock_sock(sk);
87 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
88 reason = ECONNREFUSED;
89 else if (sk->sk_state == BT_CONNECT &&
90 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
91 reason = ECONNREFUSED;
92 else
93 reason = ETIMEDOUT;
95 __l2cap_sock_close(sk, reason);
97 bh_unlock_sock(sk);
99 l2cap_sock_kill(sk);
100 sock_put(sk);
103 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
105 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
106 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
109 static void l2cap_sock_clear_timer(struct sock *sk)
111 BT_DBG("sock %p state %d", sk, sk->sk_state);
112 sk_stop_timer(sk, &sk->sk_timer);
115 /* ---- L2CAP channels ---- */
116 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
118 struct sock *s;
119 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
120 if (l2cap_pi(s)->dcid == cid)
121 break;
123 return s;
126 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
128 struct sock *s;
129 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
130 if (l2cap_pi(s)->scid == cid)
131 break;
133 return s;
136 /* Find channel with given SCID.
137 * Returns locked socket */
138 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
140 struct sock *s;
141 read_lock(&l->lock);
142 s = __l2cap_get_chan_by_scid(l, cid);
143 if (s)
144 bh_lock_sock(s);
145 read_unlock(&l->lock);
146 return s;
149 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
151 struct sock *s;
152 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
153 if (l2cap_pi(s)->ident == ident)
154 break;
156 return s;
159 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
161 struct sock *s;
162 read_lock(&l->lock);
163 s = __l2cap_get_chan_by_ident(l, ident);
164 if (s)
165 bh_lock_sock(s);
166 read_unlock(&l->lock);
167 return s;
170 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
172 u16 cid = L2CAP_CID_DYN_START;
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(l, cid))
176 return cid;
179 return 0;
182 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
184 sock_hold(sk);
186 if (l->head)
187 l2cap_pi(l->head)->prev_c = sk;
189 l2cap_pi(sk)->next_c = l->head;
190 l2cap_pi(sk)->prev_c = NULL;
191 l->head = sk;
194 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
196 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
198 write_lock_bh(&l->lock);
199 if (sk == l->head)
200 l->head = next;
202 if (next)
203 l2cap_pi(next)->prev_c = prev;
204 if (prev)
205 l2cap_pi(prev)->next_c = next;
206 write_unlock_bh(&l->lock);
208 __sock_put(sk);
211 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
213 struct l2cap_chan_list *l = &conn->chan_list;
215 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
216 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
218 conn->disc_reason = 0x13;
220 l2cap_pi(sk)->conn = conn;
222 if (sk->sk_type == SOCK_SEQPACKET) {
223 /* Alloc CID for connection-oriented socket */
224 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
225 } else if (sk->sk_type == SOCK_DGRAM) {
226 /* Connectionless socket */
227 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
228 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
230 } else {
231 /* Raw socket can send/recv signalling messages only */
232 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
233 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
237 __l2cap_chan_link(l, sk);
239 if (parent)
240 bt_accept_enqueue(parent, sk);
243 /* Delete channel.
244 * Must be called on the locked socket. */
245 static void l2cap_chan_del(struct sock *sk, int err)
247 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
248 struct sock *parent = bt_sk(sk)->parent;
250 l2cap_sock_clear_timer(sk);
252 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
254 if (conn) {
255 /* Unlink from channel list */
256 l2cap_chan_unlink(&conn->chan_list, sk);
257 l2cap_pi(sk)->conn = NULL;
258 hci_conn_put(conn->hcon);
261 sk->sk_state = BT_CLOSED;
262 sock_set_flag(sk, SOCK_ZAPPED);
264 if (err)
265 sk->sk_err = err;
267 if (parent) {
268 bt_accept_unlink(sk);
269 parent->sk_data_ready(parent, 0);
270 } else
271 sk->sk_state_change(sk);
274 /* Service level security */
275 static inline int l2cap_check_security(struct sock *sk)
277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
278 __u8 auth_type;
280 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
281 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
282 auth_type = HCI_AT_NO_BONDING_MITM;
283 else
284 auth_type = HCI_AT_NO_BONDING;
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
287 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
288 } else {
289 switch (l2cap_pi(sk)->sec_level) {
290 case BT_SECURITY_HIGH:
291 auth_type = HCI_AT_GENERAL_BONDING_MITM;
292 break;
293 case BT_SECURITY_MEDIUM:
294 auth_type = HCI_AT_GENERAL_BONDING;
295 break;
296 default:
297 auth_type = HCI_AT_NO_BONDING;
298 break;
302 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 auth_type);
306 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
308 u8 id;
310 /* Get next available identificator.
311 * 1 - 128 are used by kernel.
312 * 129 - 199 are reserved.
313 * 200 - 254 are used by utilities like l2ping, etc.
316 spin_lock_bh(&conn->lock);
318 if (++conn->tx_ident > 128)
319 conn->tx_ident = 1;
321 id = conn->tx_ident;
323 spin_unlock_bh(&conn->lock);
325 return id;
328 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
330 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
332 BT_DBG("code 0x%2.2x", code);
334 if (!skb)
335 return -ENOMEM;
337 return hci_send_acl(conn->hcon, skb, 0);
340 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
342 struct sk_buff *skb;
343 struct l2cap_hdr *lh;
344 struct l2cap_conn *conn = pi->conn;
345 int count, hlen = L2CAP_HDR_SIZE + 2;
347 if (pi->fcs == L2CAP_FCS_CRC16)
348 hlen += 2;
350 BT_DBG("pi %p, control 0x%2.2x", pi, control);
352 count = min_t(unsigned int, conn->mtu, hlen);
353 control |= L2CAP_CTRL_FRAME_TYPE;
355 skb = bt_skb_alloc(count, GFP_ATOMIC);
356 if (!skb)
357 return -ENOMEM;
359 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
360 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
361 lh->cid = cpu_to_le16(pi->dcid);
362 put_unaligned_le16(control, skb_put(skb, 2));
364 if (pi->fcs == L2CAP_FCS_CRC16) {
365 u16 fcs = crc16(0, (u8 *)lh, count - 2);
366 put_unaligned_le16(fcs, skb_put(skb, 2));
369 return hci_send_acl(pi->conn->hcon, skb, 0);
372 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
374 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
375 control |= L2CAP_SUPER_RCV_NOT_READY;
376 else
377 control |= L2CAP_SUPER_RCV_READY;
379 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
381 return l2cap_send_sframe(pi, control);
384 static void l2cap_do_start(struct sock *sk)
386 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
388 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
389 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
390 return;
392 if (l2cap_check_security(sk)) {
393 struct l2cap_conn_req req;
394 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
395 req.psm = l2cap_pi(sk)->psm;
397 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
399 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
400 L2CAP_CONN_REQ, sizeof(req), &req);
402 } else {
403 struct l2cap_info_req req;
404 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
406 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
407 conn->info_ident = l2cap_get_ident(conn);
409 mod_timer(&conn->info_timer, jiffies +
410 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
412 l2cap_send_cmd(conn, conn->info_ident,
413 L2CAP_INFO_REQ, sizeof(req), &req);
417 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
419 struct l2cap_disconn_req req;
421 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
422 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
423 l2cap_send_cmd(conn, l2cap_get_ident(conn),
424 L2CAP_DISCONN_REQ, sizeof(req), &req);
427 /* ---- L2CAP connections ---- */
428 static void l2cap_conn_start(struct l2cap_conn *conn)
430 struct l2cap_chan_list *l = &conn->chan_list;
431 struct sock *sk;
433 BT_DBG("conn %p", conn);
435 read_lock(&l->lock);
437 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
438 bh_lock_sock(sk);
440 if (sk->sk_type != SOCK_SEQPACKET) {
441 bh_unlock_sock(sk);
442 continue;
445 if (sk->sk_state == BT_CONNECT) {
446 if (l2cap_check_security(sk)) {
447 struct l2cap_conn_req req;
448 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
449 req.psm = l2cap_pi(sk)->psm;
451 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
453 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
454 L2CAP_CONN_REQ, sizeof(req), &req);
456 } else if (sk->sk_state == BT_CONNECT2) {
457 struct l2cap_conn_rsp rsp;
458 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
459 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
461 if (l2cap_check_security(sk)) {
462 if (bt_sk(sk)->defer_setup) {
463 struct sock *parent = bt_sk(sk)->parent;
464 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
465 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
466 parent->sk_data_ready(parent, 0);
468 } else {
469 sk->sk_state = BT_CONFIG;
470 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
471 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
473 } else {
474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
478 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
479 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
482 bh_unlock_sock(sk);
485 read_unlock(&l->lock);
488 static void l2cap_conn_ready(struct l2cap_conn *conn)
490 struct l2cap_chan_list *l = &conn->chan_list;
491 struct sock *sk;
493 BT_DBG("conn %p", conn);
495 read_lock(&l->lock);
497 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
498 bh_lock_sock(sk);
500 if (sk->sk_type != SOCK_SEQPACKET) {
501 l2cap_sock_clear_timer(sk);
502 sk->sk_state = BT_CONNECTED;
503 sk->sk_state_change(sk);
504 } else if (sk->sk_state == BT_CONNECT)
505 l2cap_do_start(sk);
507 bh_unlock_sock(sk);
510 read_unlock(&l->lock);
513 /* Notify sockets that we cannot guaranty reliability anymore */
514 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
516 struct l2cap_chan_list *l = &conn->chan_list;
517 struct sock *sk;
519 BT_DBG("conn %p", conn);
521 read_lock(&l->lock);
523 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
524 if (l2cap_pi(sk)->force_reliable)
525 sk->sk_err = err;
528 read_unlock(&l->lock);
531 static void l2cap_info_timeout(unsigned long arg)
533 struct l2cap_conn *conn = (void *) arg;
535 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
536 conn->info_ident = 0;
538 l2cap_conn_start(conn);
541 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
543 struct l2cap_conn *conn = hcon->l2cap_data;
545 if (conn || status)
546 return conn;
548 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
549 if (!conn)
550 return NULL;
552 hcon->l2cap_data = conn;
553 conn->hcon = hcon;
555 BT_DBG("hcon %p conn %p", hcon, conn);
557 conn->mtu = hcon->hdev->acl_mtu;
558 conn->src = &hcon->hdev->bdaddr;
559 conn->dst = &hcon->dst;
561 conn->feat_mask = 0;
563 spin_lock_init(&conn->lock);
564 rwlock_init(&conn->chan_list.lock);
566 setup_timer(&conn->info_timer, l2cap_info_timeout,
567 (unsigned long) conn);
569 conn->disc_reason = 0x13;
571 return conn;
574 static void l2cap_conn_del(struct hci_conn *hcon, int err)
576 struct l2cap_conn *conn = hcon->l2cap_data;
577 struct sock *sk;
579 if (!conn)
580 return;
582 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
584 kfree_skb(conn->rx_skb);
586 /* Kill channels */
587 while ((sk = conn->chan_list.head)) {
588 bh_lock_sock(sk);
589 l2cap_chan_del(sk, err);
590 bh_unlock_sock(sk);
591 l2cap_sock_kill(sk);
594 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
595 del_timer_sync(&conn->info_timer);
597 hcon->l2cap_data = NULL;
598 kfree(conn);
601 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
603 struct l2cap_chan_list *l = &conn->chan_list;
604 write_lock_bh(&l->lock);
605 __l2cap_chan_add(conn, sk, parent);
606 write_unlock_bh(&l->lock);
609 /* ---- Socket interface ---- */
610 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
612 struct sock *sk;
613 struct hlist_node *node;
614 sk_for_each(sk, node, &l2cap_sk_list.head)
615 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
616 goto found;
617 sk = NULL;
618 found:
619 return sk;
622 /* Find socket with psm and source bdaddr.
623 * Returns closest match.
625 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
627 struct sock *sk = NULL, *sk1 = NULL;
628 struct hlist_node *node;
630 sk_for_each(sk, node, &l2cap_sk_list.head) {
631 if (state && sk->sk_state != state)
632 continue;
634 if (l2cap_pi(sk)->psm == psm) {
635 /* Exact match. */
636 if (!bacmp(&bt_sk(sk)->src, src))
637 break;
639 /* Closest match */
640 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
641 sk1 = sk;
644 return node ? sk : sk1;
647 /* Find socket with given address (psm, src).
648 * Returns locked socket */
649 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
651 struct sock *s;
652 read_lock(&l2cap_sk_list.lock);
653 s = __l2cap_get_sock_by_psm(state, psm, src);
654 if (s)
655 bh_lock_sock(s);
656 read_unlock(&l2cap_sk_list.lock);
657 return s;
660 static void l2cap_sock_destruct(struct sock *sk)
662 BT_DBG("sk %p", sk);
664 skb_queue_purge(&sk->sk_receive_queue);
665 skb_queue_purge(&sk->sk_write_queue);
668 static void l2cap_sock_cleanup_listen(struct sock *parent)
670 struct sock *sk;
672 BT_DBG("parent %p", parent);
674 /* Close not yet accepted channels */
675 while ((sk = bt_accept_dequeue(parent, NULL)))
676 l2cap_sock_close(sk);
678 parent->sk_state = BT_CLOSED;
679 sock_set_flag(parent, SOCK_ZAPPED);
682 /* Kill socket (only if zapped and orphan)
683 * Must be called on unlocked socket.
685 static void l2cap_sock_kill(struct sock *sk)
687 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
688 return;
690 BT_DBG("sk %p state %d", sk, sk->sk_state);
692 /* Kill poor orphan */
693 bt_sock_unlink(&l2cap_sk_list, sk);
694 sock_set_flag(sk, SOCK_DEAD);
695 sock_put(sk);
698 static void __l2cap_sock_close(struct sock *sk, int reason)
700 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
702 switch (sk->sk_state) {
703 case BT_LISTEN:
704 l2cap_sock_cleanup_listen(sk);
705 break;
707 case BT_CONNECTED:
708 case BT_CONFIG:
709 if (sk->sk_type == SOCK_SEQPACKET) {
710 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
712 sk->sk_state = BT_DISCONN;
713 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
714 l2cap_send_disconn_req(conn, sk);
715 } else
716 l2cap_chan_del(sk, reason);
717 break;
719 case BT_CONNECT2:
720 if (sk->sk_type == SOCK_SEQPACKET) {
721 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
722 struct l2cap_conn_rsp rsp;
723 __u16 result;
725 if (bt_sk(sk)->defer_setup)
726 result = L2CAP_CR_SEC_BLOCK;
727 else
728 result = L2CAP_CR_BAD_PSM;
730 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
731 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
732 rsp.result = cpu_to_le16(result);
733 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
734 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
735 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
736 } else
737 l2cap_chan_del(sk, reason);
738 break;
740 case BT_CONNECT:
741 case BT_DISCONN:
742 l2cap_chan_del(sk, reason);
743 break;
745 default:
746 sock_set_flag(sk, SOCK_ZAPPED);
747 break;
751 /* Must be called on unlocked socket. */
752 static void l2cap_sock_close(struct sock *sk)
754 l2cap_sock_clear_timer(sk);
755 lock_sock(sk);
756 __l2cap_sock_close(sk, ECONNRESET);
757 release_sock(sk);
758 l2cap_sock_kill(sk);
761 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
763 struct l2cap_pinfo *pi = l2cap_pi(sk);
765 BT_DBG("sk %p", sk);
767 if (parent) {
768 sk->sk_type = parent->sk_type;
769 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
771 pi->imtu = l2cap_pi(parent)->imtu;
772 pi->omtu = l2cap_pi(parent)->omtu;
773 pi->mode = l2cap_pi(parent)->mode;
774 pi->fcs = l2cap_pi(parent)->fcs;
775 pi->sec_level = l2cap_pi(parent)->sec_level;
776 pi->role_switch = l2cap_pi(parent)->role_switch;
777 pi->force_reliable = l2cap_pi(parent)->force_reliable;
778 } else {
779 pi->imtu = L2CAP_DEFAULT_MTU;
780 pi->omtu = 0;
781 pi->mode = L2CAP_MODE_BASIC;
782 pi->fcs = L2CAP_FCS_CRC16;
783 pi->sec_level = BT_SECURITY_LOW;
784 pi->role_switch = 0;
785 pi->force_reliable = 0;
788 /* Default config options */
789 pi->conf_len = 0;
790 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
791 skb_queue_head_init(TX_QUEUE(sk));
792 skb_queue_head_init(SREJ_QUEUE(sk));
793 INIT_LIST_HEAD(SREJ_LIST(sk));
796 static struct proto l2cap_proto = {
797 .name = "L2CAP",
798 .owner = THIS_MODULE,
799 .obj_size = sizeof(struct l2cap_pinfo)
802 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
804 struct sock *sk;
806 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
807 if (!sk)
808 return NULL;
810 sock_init_data(sock, sk);
811 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
813 sk->sk_destruct = l2cap_sock_destruct;
814 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
816 sock_reset_flag(sk, SOCK_ZAPPED);
818 sk->sk_protocol = proto;
819 sk->sk_state = BT_OPEN;
821 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
823 bt_sock_link(&l2cap_sk_list, sk);
824 return sk;
827 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
828 int kern)
830 struct sock *sk;
832 BT_DBG("sock %p", sock);
834 sock->state = SS_UNCONNECTED;
836 if (sock->type != SOCK_SEQPACKET &&
837 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
838 return -ESOCKTNOSUPPORT;
840 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
841 return -EPERM;
843 sock->ops = &l2cap_sock_ops;
845 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
846 if (!sk)
847 return -ENOMEM;
849 l2cap_sock_init(sk, NULL);
850 return 0;
853 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
855 struct sock *sk = sock->sk;
856 struct sockaddr_l2 la;
857 int len, err = 0;
859 BT_DBG("sk %p", sk);
861 if (!addr || addr->sa_family != AF_BLUETOOTH)
862 return -EINVAL;
864 memset(&la, 0, sizeof(la));
865 len = min_t(unsigned int, sizeof(la), alen);
866 memcpy(&la, addr, len);
868 if (la.l2_cid)
869 return -EINVAL;
871 lock_sock(sk);
873 if (sk->sk_state != BT_OPEN) {
874 err = -EBADFD;
875 goto done;
878 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
879 !capable(CAP_NET_BIND_SERVICE)) {
880 err = -EACCES;
881 goto done;
884 write_lock_bh(&l2cap_sk_list.lock);
886 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
887 err = -EADDRINUSE;
888 } else {
889 /* Save source address */
890 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
891 l2cap_pi(sk)->psm = la.l2_psm;
892 l2cap_pi(sk)->sport = la.l2_psm;
893 sk->sk_state = BT_BOUND;
895 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
896 __le16_to_cpu(la.l2_psm) == 0x0003)
897 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
900 write_unlock_bh(&l2cap_sk_list.lock);
902 done:
903 release_sock(sk);
904 return err;
907 static int l2cap_do_connect(struct sock *sk)
909 bdaddr_t *src = &bt_sk(sk)->src;
910 bdaddr_t *dst = &bt_sk(sk)->dst;
911 struct l2cap_conn *conn;
912 struct hci_conn *hcon;
913 struct hci_dev *hdev;
914 __u8 auth_type;
915 int err;
917 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
918 l2cap_pi(sk)->psm);
920 hdev = hci_get_route(dst, src);
921 if (!hdev)
922 return -EHOSTUNREACH;
924 hci_dev_lock_bh(hdev);
926 err = -ENOMEM;
928 if (sk->sk_type == SOCK_RAW) {
929 switch (l2cap_pi(sk)->sec_level) {
930 case BT_SECURITY_HIGH:
931 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
932 break;
933 case BT_SECURITY_MEDIUM:
934 auth_type = HCI_AT_DEDICATED_BONDING;
935 break;
936 default:
937 auth_type = HCI_AT_NO_BONDING;
938 break;
940 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
941 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
942 auth_type = HCI_AT_NO_BONDING_MITM;
943 else
944 auth_type = HCI_AT_NO_BONDING;
946 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
947 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
948 } else {
949 switch (l2cap_pi(sk)->sec_level) {
950 case BT_SECURITY_HIGH:
951 auth_type = HCI_AT_GENERAL_BONDING_MITM;
952 break;
953 case BT_SECURITY_MEDIUM:
954 auth_type = HCI_AT_GENERAL_BONDING;
955 break;
956 default:
957 auth_type = HCI_AT_NO_BONDING;
958 break;
962 hcon = hci_connect(hdev, ACL_LINK, dst,
963 l2cap_pi(sk)->sec_level, auth_type);
964 if (!hcon)
965 goto done;
967 conn = l2cap_conn_add(hcon, 0);
968 if (!conn) {
969 hci_conn_put(hcon);
970 goto done;
973 err = 0;
975 /* Update source addr of the socket */
976 bacpy(src, conn->src);
978 l2cap_chan_add(conn, sk, NULL);
980 sk->sk_state = BT_CONNECT;
981 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
983 if (hcon->state == BT_CONNECTED) {
984 if (sk->sk_type != SOCK_SEQPACKET) {
985 l2cap_sock_clear_timer(sk);
986 sk->sk_state = BT_CONNECTED;
987 } else
988 l2cap_do_start(sk);
991 done:
992 hci_dev_unlock_bh(hdev);
993 hci_dev_put(hdev);
994 return err;
997 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
999 struct sock *sk = sock->sk;
1000 struct sockaddr_l2 la;
1001 int len, err = 0;
1003 BT_DBG("sk %p", sk);
1005 if (!addr || alen < sizeof(addr->sa_family) ||
1006 addr->sa_family != AF_BLUETOOTH)
1007 return -EINVAL;
1009 memset(&la, 0, sizeof(la));
1010 len = min_t(unsigned int, sizeof(la), alen);
1011 memcpy(&la, addr, len);
1013 if (la.l2_cid)
1014 return -EINVAL;
1016 lock_sock(sk);
1018 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1019 err = -EINVAL;
1020 goto done;
1023 switch (l2cap_pi(sk)->mode) {
1024 case L2CAP_MODE_BASIC:
1025 break;
1026 case L2CAP_MODE_ERTM:
1027 case L2CAP_MODE_STREAMING:
1028 if (enable_ertm)
1029 break;
1030 /* fall through */
1031 default:
1032 err = -ENOTSUPP;
1033 goto done;
1036 switch (sk->sk_state) {
1037 case BT_CONNECT:
1038 case BT_CONNECT2:
1039 case BT_CONFIG:
1040 /* Already connecting */
1041 goto wait;
1043 case BT_CONNECTED:
1044 /* Already connected */
1045 goto done;
1047 case BT_OPEN:
1048 case BT_BOUND:
1049 /* Can connect */
1050 break;
1052 default:
1053 err = -EBADFD;
1054 goto done;
1057 /* Set destination address and psm */
1058 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1059 l2cap_pi(sk)->psm = la.l2_psm;
1061 err = l2cap_do_connect(sk);
1062 if (err)
1063 goto done;
1065 wait:
1066 err = bt_sock_wait_state(sk, BT_CONNECTED,
1067 sock_sndtimeo(sk, flags & O_NONBLOCK));
1068 done:
1069 release_sock(sk);
1070 return err;
1073 static int l2cap_sock_listen(struct socket *sock, int backlog)
1075 struct sock *sk = sock->sk;
1076 int err = 0;
1078 BT_DBG("sk %p backlog %d", sk, backlog);
1080 lock_sock(sk);
1082 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1083 err = -EBADFD;
1084 goto done;
1087 switch (l2cap_pi(sk)->mode) {
1088 case L2CAP_MODE_BASIC:
1089 break;
1090 case L2CAP_MODE_ERTM:
1091 case L2CAP_MODE_STREAMING:
1092 if (enable_ertm)
1093 break;
1094 /* fall through */
1095 default:
1096 err = -ENOTSUPP;
1097 goto done;
1100 if (!l2cap_pi(sk)->psm) {
1101 bdaddr_t *src = &bt_sk(sk)->src;
1102 u16 psm;
1104 err = -EINVAL;
1106 write_lock_bh(&l2cap_sk_list.lock);
1108 for (psm = 0x1001; psm < 0x1100; psm += 2)
1109 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1110 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1111 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1112 err = 0;
1113 break;
1116 write_unlock_bh(&l2cap_sk_list.lock);
1118 if (err < 0)
1119 goto done;
1122 sk->sk_max_ack_backlog = backlog;
1123 sk->sk_ack_backlog = 0;
1124 sk->sk_state = BT_LISTEN;
1126 done:
1127 release_sock(sk);
1128 return err;
1131 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1133 DECLARE_WAITQUEUE(wait, current);
1134 struct sock *sk = sock->sk, *nsk;
1135 long timeo;
1136 int err = 0;
1138 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1140 if (sk->sk_state != BT_LISTEN) {
1141 err = -EBADFD;
1142 goto done;
1145 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1147 BT_DBG("sk %p timeo %ld", sk, timeo);
1149 /* Wait for an incoming connection. (wake-one). */
1150 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1151 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1152 set_current_state(TASK_INTERRUPTIBLE);
1153 if (!timeo) {
1154 err = -EAGAIN;
1155 break;
1158 release_sock(sk);
1159 timeo = schedule_timeout(timeo);
1160 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1162 if (sk->sk_state != BT_LISTEN) {
1163 err = -EBADFD;
1164 break;
1167 if (signal_pending(current)) {
1168 err = sock_intr_errno(timeo);
1169 break;
1172 set_current_state(TASK_RUNNING);
1173 remove_wait_queue(sk->sk_sleep, &wait);
1175 if (err)
1176 goto done;
1178 newsock->state = SS_CONNECTED;
1180 BT_DBG("new socket %p", nsk);
1182 done:
1183 release_sock(sk);
1184 return err;
1187 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1189 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1190 struct sock *sk = sock->sk;
1192 BT_DBG("sock %p, sk %p", sock, sk);
1194 addr->sa_family = AF_BLUETOOTH;
1195 *len = sizeof(struct sockaddr_l2);
1197 if (peer) {
1198 la->l2_psm = l2cap_pi(sk)->psm;
1199 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1200 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1201 } else {
1202 la->l2_psm = l2cap_pi(sk)->sport;
1203 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1204 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1207 return 0;
1210 static void l2cap_monitor_timeout(unsigned long arg)
1212 struct sock *sk = (void *) arg;
1213 u16 control;
1215 bh_lock_sock(sk);
1216 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1217 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1218 bh_unlock_sock(sk);
1219 return;
1222 l2cap_pi(sk)->retry_count++;
1223 __mod_monitor_timer();
1225 control = L2CAP_CTRL_POLL;
1226 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1227 bh_unlock_sock(sk);
1230 static void l2cap_retrans_timeout(unsigned long arg)
1232 struct sock *sk = (void *) arg;
1233 u16 control;
1235 bh_lock_sock(sk);
1236 l2cap_pi(sk)->retry_count = 1;
1237 __mod_monitor_timer();
1239 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1241 control = L2CAP_CTRL_POLL;
1242 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1243 bh_unlock_sock(sk);
1246 static void l2cap_drop_acked_frames(struct sock *sk)
1248 struct sk_buff *skb;
1250 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1251 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1252 break;
1254 skb = skb_dequeue(TX_QUEUE(sk));
1255 kfree_skb(skb);
1257 l2cap_pi(sk)->unacked_frames--;
1260 if (!l2cap_pi(sk)->unacked_frames)
1261 del_timer(&l2cap_pi(sk)->retrans_timer);
1263 return;
1266 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1268 struct l2cap_pinfo *pi = l2cap_pi(sk);
1269 int err;
1271 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1273 err = hci_send_acl(pi->conn->hcon, skb, 0);
1274 if (err < 0)
1275 kfree_skb(skb);
1277 return err;
1280 static int l2cap_streaming_send(struct sock *sk)
1282 struct sk_buff *skb, *tx_skb;
1283 struct l2cap_pinfo *pi = l2cap_pi(sk);
1284 u16 control, fcs;
1285 int err;
1287 while ((skb = sk->sk_send_head)) {
1288 tx_skb = skb_clone(skb, GFP_ATOMIC);
1290 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1291 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1292 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1294 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1295 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1296 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1299 err = l2cap_do_send(sk, tx_skb);
1300 if (err < 0) {
1301 l2cap_send_disconn_req(pi->conn, sk);
1302 return err;
1305 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1307 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1308 sk->sk_send_head = NULL;
1309 else
1310 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1312 skb = skb_dequeue(TX_QUEUE(sk));
1313 kfree_skb(skb);
1315 return 0;
1318 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1320 struct l2cap_pinfo *pi = l2cap_pi(sk);
1321 struct sk_buff *skb, *tx_skb;
1322 u16 control, fcs;
1323 int err;
1325 skb = skb_peek(TX_QUEUE(sk));
1326 do {
1327 if (bt_cb(skb)->tx_seq != tx_seq) {
1328 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1329 break;
1330 skb = skb_queue_next(TX_QUEUE(sk), skb);
1331 continue;
1334 if (pi->remote_max_tx &&
1335 bt_cb(skb)->retries == pi->remote_max_tx) {
1336 l2cap_send_disconn_req(pi->conn, sk);
1337 break;
1340 tx_skb = skb_clone(skb, GFP_ATOMIC);
1341 bt_cb(skb)->retries++;
1342 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1343 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1344 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1345 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1347 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1348 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1349 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1352 err = l2cap_do_send(sk, tx_skb);
1353 if (err < 0) {
1354 l2cap_send_disconn_req(pi->conn, sk);
1355 return err;
1357 break;
1358 } while(1);
1359 return 0;
1362 static int l2cap_ertm_send(struct sock *sk)
1364 struct sk_buff *skb, *tx_skb;
1365 struct l2cap_pinfo *pi = l2cap_pi(sk);
1366 u16 control, fcs;
1367 int err;
1369 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1370 return 0;
1372 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1373 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1375 if (pi->remote_max_tx &&
1376 bt_cb(skb)->retries == pi->remote_max_tx) {
1377 l2cap_send_disconn_req(pi->conn, sk);
1378 break;
1381 tx_skb = skb_clone(skb, GFP_ATOMIC);
1383 bt_cb(skb)->retries++;
1385 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1386 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1387 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1388 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1391 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1392 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1393 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1396 err = l2cap_do_send(sk, tx_skb);
1397 if (err < 0) {
1398 l2cap_send_disconn_req(pi->conn, sk);
1399 return err;
1401 __mod_retrans_timer();
1403 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1404 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1406 pi->unacked_frames++;
1408 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1409 sk->sk_send_head = NULL;
1410 else
1411 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1414 return 0;
1417 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1419 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1420 struct sk_buff **frag;
1421 int err, sent = 0;
1423 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1424 return -EFAULT;
1427 sent += count;
1428 len -= count;
1430 /* Continuation fragments (no L2CAP header) */
1431 frag = &skb_shinfo(skb)->frag_list;
1432 while (len) {
1433 count = min_t(unsigned int, conn->mtu, len);
1435 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1436 if (!*frag)
1437 return -EFAULT;
1438 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1439 return -EFAULT;
1441 sent += count;
1442 len -= count;
1444 frag = &(*frag)->next;
1447 return sent;
1450 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1452 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1453 struct sk_buff *skb;
1454 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1455 struct l2cap_hdr *lh;
1457 BT_DBG("sk %p len %d", sk, (int)len);
1459 count = min_t(unsigned int, (conn->mtu - hlen), len);
1460 skb = bt_skb_send_alloc(sk, count + hlen,
1461 msg->msg_flags & MSG_DONTWAIT, &err);
1462 if (!skb)
1463 return ERR_PTR(-ENOMEM);
1465 /* Create L2CAP header */
1466 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1467 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1468 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1469 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1471 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1472 if (unlikely(err < 0)) {
1473 kfree_skb(skb);
1474 return ERR_PTR(err);
1476 return skb;
1479 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1481 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1482 struct sk_buff *skb;
1483 int err, count, hlen = L2CAP_HDR_SIZE;
1484 struct l2cap_hdr *lh;
1486 BT_DBG("sk %p len %d", sk, (int)len);
1488 count = min_t(unsigned int, (conn->mtu - hlen), len);
1489 skb = bt_skb_send_alloc(sk, count + hlen,
1490 msg->msg_flags & MSG_DONTWAIT, &err);
1491 if (!skb)
1492 return ERR_PTR(-ENOMEM);
1494 /* Create L2CAP header */
1495 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1496 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1497 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1499 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1500 if (unlikely(err < 0)) {
1501 kfree_skb(skb);
1502 return ERR_PTR(err);
1504 return skb;
1507 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1509 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1510 struct sk_buff *skb;
1511 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1512 struct l2cap_hdr *lh;
1514 BT_DBG("sk %p len %d", sk, (int)len);
1516 if (sdulen)
1517 hlen += 2;
1519 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1520 hlen += 2;
1522 count = min_t(unsigned int, (conn->mtu - hlen), len);
1523 skb = bt_skb_send_alloc(sk, count + hlen,
1524 msg->msg_flags & MSG_DONTWAIT, &err);
1525 if (!skb)
1526 return ERR_PTR(-ENOMEM);
1528 /* Create L2CAP header */
1529 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1530 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1531 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1532 put_unaligned_le16(control, skb_put(skb, 2));
1533 if (sdulen)
1534 put_unaligned_le16(sdulen, skb_put(skb, 2));
1536 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1537 if (unlikely(err < 0)) {
1538 kfree_skb(skb);
1539 return ERR_PTR(err);
1542 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1543 put_unaligned_le16(0, skb_put(skb, 2));
1545 bt_cb(skb)->retries = 0;
1546 return skb;
1549 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1551 struct l2cap_pinfo *pi = l2cap_pi(sk);
1552 struct sk_buff *skb;
1553 struct sk_buff_head sar_queue;
1554 u16 control;
1555 size_t size = 0;
1557 __skb_queue_head_init(&sar_queue);
1558 control = L2CAP_SDU_START;
1559 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1560 if (IS_ERR(skb))
1561 return PTR_ERR(skb);
1563 __skb_queue_tail(&sar_queue, skb);
1564 len -= pi->max_pdu_size;
1565 size +=pi->max_pdu_size;
1566 control = 0;
1568 while (len > 0) {
1569 size_t buflen;
1571 if (len > pi->max_pdu_size) {
1572 control |= L2CAP_SDU_CONTINUE;
1573 buflen = pi->max_pdu_size;
1574 } else {
1575 control |= L2CAP_SDU_END;
1576 buflen = len;
1579 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1580 if (IS_ERR(skb)) {
1581 skb_queue_purge(&sar_queue);
1582 return PTR_ERR(skb);
1585 __skb_queue_tail(&sar_queue, skb);
1586 len -= buflen;
1587 size += buflen;
1588 control = 0;
1590 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1591 if (sk->sk_send_head == NULL)
1592 sk->sk_send_head = sar_queue.next;
1594 return size;
1597 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1599 struct sock *sk = sock->sk;
1600 struct l2cap_pinfo *pi = l2cap_pi(sk);
1601 struct sk_buff *skb;
1602 u16 control;
1603 int err;
1605 BT_DBG("sock %p, sk %p", sock, sk);
1607 err = sock_error(sk);
1608 if (err)
1609 return err;
1611 if (msg->msg_flags & MSG_OOB)
1612 return -EOPNOTSUPP;
1614 /* Check outgoing MTU */
1615 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
1616 len > pi->omtu)
1617 return -EINVAL;
1619 lock_sock(sk);
1621 if (sk->sk_state != BT_CONNECTED) {
1622 err = -ENOTCONN;
1623 goto done;
1626 /* Connectionless channel */
1627 if (sk->sk_type == SOCK_DGRAM) {
1628 skb = l2cap_create_connless_pdu(sk, msg, len);
1629 if (IS_ERR(skb))
1630 err = PTR_ERR(skb);
1631 else
1632 err = l2cap_do_send(sk, skb);
1633 goto done;
1636 switch (pi->mode) {
1637 case L2CAP_MODE_BASIC:
1638 /* Create a basic PDU */
1639 skb = l2cap_create_basic_pdu(sk, msg, len);
1640 if (IS_ERR(skb)) {
1641 err = PTR_ERR(skb);
1642 goto done;
1645 err = l2cap_do_send(sk, skb);
1646 if (!err)
1647 err = len;
1648 break;
1650 case L2CAP_MODE_ERTM:
1651 case L2CAP_MODE_STREAMING:
1652 /* Entire SDU fits into one PDU */
1653 if (len <= pi->max_pdu_size) {
1654 control = L2CAP_SDU_UNSEGMENTED;
1655 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1656 if (IS_ERR(skb)) {
1657 err = PTR_ERR(skb);
1658 goto done;
1660 __skb_queue_tail(TX_QUEUE(sk), skb);
1661 if (sk->sk_send_head == NULL)
1662 sk->sk_send_head = skb;
1663 } else {
1664 /* Segment SDU into multiples PDUs */
1665 err = l2cap_sar_segment_sdu(sk, msg, len);
1666 if (err < 0)
1667 goto done;
1670 if (pi->mode == L2CAP_MODE_STREAMING)
1671 err = l2cap_streaming_send(sk);
1672 else
1673 err = l2cap_ertm_send(sk);
1675 if (!err)
1676 err = len;
1677 break;
1679 default:
1680 BT_DBG("bad state %1.1x", pi->mode);
1681 err = -EINVAL;
1684 done:
1685 release_sock(sk);
1686 return err;
1689 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1691 struct sock *sk = sock->sk;
1693 lock_sock(sk);
1695 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1696 struct l2cap_conn_rsp rsp;
1698 sk->sk_state = BT_CONFIG;
1700 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1701 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1702 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1703 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1704 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1705 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1707 release_sock(sk);
1708 return 0;
1711 release_sock(sk);
1713 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1716 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1718 struct sock *sk = sock->sk;
1719 struct l2cap_options opts;
1720 int len, err = 0;
1721 u32 opt;
1723 BT_DBG("sk %p", sk);
1725 lock_sock(sk);
1727 switch (optname) {
1728 case L2CAP_OPTIONS:
1729 opts.imtu = l2cap_pi(sk)->imtu;
1730 opts.omtu = l2cap_pi(sk)->omtu;
1731 opts.flush_to = l2cap_pi(sk)->flush_to;
1732 opts.mode = l2cap_pi(sk)->mode;
1733 opts.fcs = l2cap_pi(sk)->fcs;
1735 len = min_t(unsigned int, sizeof(opts), optlen);
1736 if (copy_from_user((char *) &opts, optval, len)) {
1737 err = -EFAULT;
1738 break;
1741 l2cap_pi(sk)->imtu = opts.imtu;
1742 l2cap_pi(sk)->omtu = opts.omtu;
1743 l2cap_pi(sk)->mode = opts.mode;
1744 l2cap_pi(sk)->fcs = opts.fcs;
1745 break;
1747 case L2CAP_LM:
1748 if (get_user(opt, (u32 __user *) optval)) {
1749 err = -EFAULT;
1750 break;
1753 if (opt & L2CAP_LM_AUTH)
1754 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1755 if (opt & L2CAP_LM_ENCRYPT)
1756 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1757 if (opt & L2CAP_LM_SECURE)
1758 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1760 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1761 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1762 break;
1764 default:
1765 err = -ENOPROTOOPT;
1766 break;
1769 release_sock(sk);
1770 return err;
1773 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1775 struct sock *sk = sock->sk;
1776 struct bt_security sec;
1777 int len, err = 0;
1778 u32 opt;
1780 BT_DBG("sk %p", sk);
1782 if (level == SOL_L2CAP)
1783 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1785 if (level != SOL_BLUETOOTH)
1786 return -ENOPROTOOPT;
1788 lock_sock(sk);
1790 switch (optname) {
1791 case BT_SECURITY:
1792 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1793 err = -EINVAL;
1794 break;
1797 sec.level = BT_SECURITY_LOW;
1799 len = min_t(unsigned int, sizeof(sec), optlen);
1800 if (copy_from_user((char *) &sec, optval, len)) {
1801 err = -EFAULT;
1802 break;
1805 if (sec.level < BT_SECURITY_LOW ||
1806 sec.level > BT_SECURITY_HIGH) {
1807 err = -EINVAL;
1808 break;
1811 l2cap_pi(sk)->sec_level = sec.level;
1812 break;
1814 case BT_DEFER_SETUP:
1815 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1816 err = -EINVAL;
1817 break;
1820 if (get_user(opt, (u32 __user *) optval)) {
1821 err = -EFAULT;
1822 break;
1825 bt_sk(sk)->defer_setup = opt;
1826 break;
1828 default:
1829 err = -ENOPROTOOPT;
1830 break;
1833 release_sock(sk);
1834 return err;
1837 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1839 struct sock *sk = sock->sk;
1840 struct l2cap_options opts;
1841 struct l2cap_conninfo cinfo;
1842 int len, err = 0;
1843 u32 opt;
1845 BT_DBG("sk %p", sk);
1847 if (get_user(len, optlen))
1848 return -EFAULT;
1850 lock_sock(sk);
1852 switch (optname) {
1853 case L2CAP_OPTIONS:
1854 opts.imtu = l2cap_pi(sk)->imtu;
1855 opts.omtu = l2cap_pi(sk)->omtu;
1856 opts.flush_to = l2cap_pi(sk)->flush_to;
1857 opts.mode = l2cap_pi(sk)->mode;
1858 opts.fcs = l2cap_pi(sk)->fcs;
1860 len = min_t(unsigned int, len, sizeof(opts));
1861 if (copy_to_user(optval, (char *) &opts, len))
1862 err = -EFAULT;
1864 break;
1866 case L2CAP_LM:
1867 switch (l2cap_pi(sk)->sec_level) {
1868 case BT_SECURITY_LOW:
1869 opt = L2CAP_LM_AUTH;
1870 break;
1871 case BT_SECURITY_MEDIUM:
1872 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1873 break;
1874 case BT_SECURITY_HIGH:
1875 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1876 L2CAP_LM_SECURE;
1877 break;
1878 default:
1879 opt = 0;
1880 break;
1883 if (l2cap_pi(sk)->role_switch)
1884 opt |= L2CAP_LM_MASTER;
1886 if (l2cap_pi(sk)->force_reliable)
1887 opt |= L2CAP_LM_RELIABLE;
1889 if (put_user(opt, (u32 __user *) optval))
1890 err = -EFAULT;
1891 break;
1893 case L2CAP_CONNINFO:
1894 if (sk->sk_state != BT_CONNECTED &&
1895 !(sk->sk_state == BT_CONNECT2 &&
1896 bt_sk(sk)->defer_setup)) {
1897 err = -ENOTCONN;
1898 break;
1901 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1902 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1904 len = min_t(unsigned int, len, sizeof(cinfo));
1905 if (copy_to_user(optval, (char *) &cinfo, len))
1906 err = -EFAULT;
1908 break;
1910 default:
1911 err = -ENOPROTOOPT;
1912 break;
1915 release_sock(sk);
1916 return err;
1919 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1921 struct sock *sk = sock->sk;
1922 struct bt_security sec;
1923 int len, err = 0;
1925 BT_DBG("sk %p", sk);
1927 if (level == SOL_L2CAP)
1928 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1930 if (level != SOL_BLUETOOTH)
1931 return -ENOPROTOOPT;
1933 if (get_user(len, optlen))
1934 return -EFAULT;
1936 lock_sock(sk);
1938 switch (optname) {
1939 case BT_SECURITY:
1940 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1941 err = -EINVAL;
1942 break;
1945 sec.level = l2cap_pi(sk)->sec_level;
1947 len = min_t(unsigned int, len, sizeof(sec));
1948 if (copy_to_user(optval, (char *) &sec, len))
1949 err = -EFAULT;
1951 break;
1953 case BT_DEFER_SETUP:
1954 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1955 err = -EINVAL;
1956 break;
1959 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1960 err = -EFAULT;
1962 break;
1964 default:
1965 err = -ENOPROTOOPT;
1966 break;
1969 release_sock(sk);
1970 return err;
1973 static int l2cap_sock_shutdown(struct socket *sock, int how)
1975 struct sock *sk = sock->sk;
1976 int err = 0;
1978 BT_DBG("sock %p, sk %p", sock, sk);
1980 if (!sk)
1981 return 0;
1983 lock_sock(sk);
1984 if (!sk->sk_shutdown) {
1985 sk->sk_shutdown = SHUTDOWN_MASK;
1986 l2cap_sock_clear_timer(sk);
1987 __l2cap_sock_close(sk, 0);
1989 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1990 err = bt_sock_wait_state(sk, BT_CLOSED,
1991 sk->sk_lingertime);
1993 release_sock(sk);
1994 return err;
1997 static int l2cap_sock_release(struct socket *sock)
1999 struct sock *sk = sock->sk;
2000 int err;
2002 BT_DBG("sock %p, sk %p", sock, sk);
2004 if (!sk)
2005 return 0;
2007 err = l2cap_sock_shutdown(sock, 2);
2009 sock_orphan(sk);
2010 l2cap_sock_kill(sk);
2011 return err;
2014 static void l2cap_chan_ready(struct sock *sk)
2016 struct sock *parent = bt_sk(sk)->parent;
2018 BT_DBG("sk %p, parent %p", sk, parent);
2020 l2cap_pi(sk)->conf_state = 0;
2021 l2cap_sock_clear_timer(sk);
2023 if (!parent) {
2024 /* Outgoing channel.
2025 * Wake up socket sleeping on connect.
2027 sk->sk_state = BT_CONNECTED;
2028 sk->sk_state_change(sk);
2029 } else {
2030 /* Incoming channel.
2031 * Wake up socket sleeping on accept.
2033 parent->sk_data_ready(parent, 0);
2037 /* Copy frame to all raw sockets on that connection */
2038 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2040 struct l2cap_chan_list *l = &conn->chan_list;
2041 struct sk_buff *nskb;
2042 struct sock *sk;
2044 BT_DBG("conn %p", conn);
2046 read_lock(&l->lock);
2047 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2048 if (sk->sk_type != SOCK_RAW)
2049 continue;
2051 /* Don't send frame to the socket it came from */
2052 if (skb->sk == sk)
2053 continue;
2054 nskb = skb_clone(skb, GFP_ATOMIC);
2055 if (!nskb)
2056 continue;
2058 if (sock_queue_rcv_skb(sk, nskb))
2059 kfree_skb(nskb);
2061 read_unlock(&l->lock);
2064 /* ---- L2CAP signalling commands ---- */
2065 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2066 u8 code, u8 ident, u16 dlen, void *data)
2068 struct sk_buff *skb, **frag;
2069 struct l2cap_cmd_hdr *cmd;
2070 struct l2cap_hdr *lh;
2071 int len, count;
2073 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2074 conn, code, ident, dlen);
2076 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2077 count = min_t(unsigned int, conn->mtu, len);
2079 skb = bt_skb_alloc(count, GFP_ATOMIC);
2080 if (!skb)
2081 return NULL;
2083 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2084 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2085 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2087 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2088 cmd->code = code;
2089 cmd->ident = ident;
2090 cmd->len = cpu_to_le16(dlen);
2092 if (dlen) {
2093 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2094 memcpy(skb_put(skb, count), data, count);
2095 data += count;
2098 len -= skb->len;
2100 /* Continuation fragments (no L2CAP header) */
2101 frag = &skb_shinfo(skb)->frag_list;
2102 while (len) {
2103 count = min_t(unsigned int, conn->mtu, len);
2105 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2106 if (!*frag)
2107 goto fail;
2109 memcpy(skb_put(*frag, count), data, count);
2111 len -= count;
2112 data += count;
2114 frag = &(*frag)->next;
2117 return skb;
2119 fail:
2120 kfree_skb(skb);
2121 return NULL;
2124 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2126 struct l2cap_conf_opt *opt = *ptr;
2127 int len;
2129 len = L2CAP_CONF_OPT_SIZE + opt->len;
2130 *ptr += len;
2132 *type = opt->type;
2133 *olen = opt->len;
2135 switch (opt->len) {
2136 case 1:
2137 *val = *((u8 *) opt->val);
2138 break;
2140 case 2:
2141 *val = __le16_to_cpu(*((__le16 *) opt->val));
2142 break;
2144 case 4:
2145 *val = __le32_to_cpu(*((__le32 *) opt->val));
2146 break;
2148 default:
2149 *val = (unsigned long) opt->val;
2150 break;
2153 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2154 return len;
2157 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2159 struct l2cap_conf_opt *opt = *ptr;
2161 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2163 opt->type = type;
2164 opt->len = len;
2166 switch (len) {
2167 case 1:
2168 *((u8 *) opt->val) = val;
2169 break;
2171 case 2:
2172 *((__le16 *) opt->val) = cpu_to_le16(val);
2173 break;
2175 case 4:
2176 *((__le32 *) opt->val) = cpu_to_le32(val);
2177 break;
2179 default:
2180 memcpy(opt->val, (void *) val, len);
2181 break;
2184 *ptr += L2CAP_CONF_OPT_SIZE + len;
2187 static inline void l2cap_ertm_init(struct sock *sk)
2189 l2cap_pi(sk)->expected_ack_seq = 0;
2190 l2cap_pi(sk)->unacked_frames = 0;
2191 l2cap_pi(sk)->buffer_seq = 0;
2192 l2cap_pi(sk)->num_to_ack = 0;
2194 setup_timer(&l2cap_pi(sk)->retrans_timer,
2195 l2cap_retrans_timeout, (unsigned long) sk);
2196 setup_timer(&l2cap_pi(sk)->monitor_timer,
2197 l2cap_monitor_timeout, (unsigned long) sk);
2199 __skb_queue_head_init(SREJ_QUEUE(sk));
2202 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2204 u32 local_feat_mask = l2cap_feat_mask;
2205 if (enable_ertm)
2206 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2208 switch (mode) {
2209 case L2CAP_MODE_ERTM:
2210 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2211 case L2CAP_MODE_STREAMING:
2212 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2213 default:
2214 return 0x00;
2218 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2220 switch (mode) {
2221 case L2CAP_MODE_STREAMING:
2222 case L2CAP_MODE_ERTM:
2223 if (l2cap_mode_supported(mode, remote_feat_mask))
2224 return mode;
2225 /* fall through */
2226 default:
2227 return L2CAP_MODE_BASIC;
2231 static int l2cap_build_conf_req(struct sock *sk, void *data)
2233 struct l2cap_pinfo *pi = l2cap_pi(sk);
2234 struct l2cap_conf_req *req = data;
2235 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2236 void *ptr = req->data;
2238 BT_DBG("sk %p", sk);
2240 if (pi->num_conf_req || pi->num_conf_rsp)
2241 goto done;
2243 switch (pi->mode) {
2244 case L2CAP_MODE_STREAMING:
2245 case L2CAP_MODE_ERTM:
2246 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2247 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2248 l2cap_send_disconn_req(pi->conn, sk);
2249 break;
2250 default:
2251 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2252 break;
2255 done:
2256 switch (pi->mode) {
2257 case L2CAP_MODE_BASIC:
2258 if (pi->imtu != L2CAP_DEFAULT_MTU)
2259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2260 break;
2262 case L2CAP_MODE_ERTM:
2263 rfc.mode = L2CAP_MODE_ERTM;
2264 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2265 rfc.max_transmit = max_transmit;
2266 rfc.retrans_timeout = 0;
2267 rfc.monitor_timeout = 0;
2268 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2270 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2271 sizeof(rfc), (unsigned long) &rfc);
2273 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2274 break;
2276 if (pi->fcs == L2CAP_FCS_NONE ||
2277 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2278 pi->fcs = L2CAP_FCS_NONE;
2279 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2281 break;
2283 case L2CAP_MODE_STREAMING:
2284 rfc.mode = L2CAP_MODE_STREAMING;
2285 rfc.txwin_size = 0;
2286 rfc.max_transmit = 0;
2287 rfc.retrans_timeout = 0;
2288 rfc.monitor_timeout = 0;
2289 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2291 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2292 sizeof(rfc), (unsigned long) &rfc);
2294 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2295 break;
2297 if (pi->fcs == L2CAP_FCS_NONE ||
2298 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2299 pi->fcs = L2CAP_FCS_NONE;
2300 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2302 break;
2305 /* FIXME: Need actual value of the flush timeout */
2306 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2307 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2309 req->dcid = cpu_to_le16(pi->dcid);
2310 req->flags = cpu_to_le16(0);
2312 return ptr - data;
2315 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2317 struct l2cap_pinfo *pi = l2cap_pi(sk);
2318 struct l2cap_conf_rsp *rsp = data;
2319 void *ptr = rsp->data;
2320 void *req = pi->conf_req;
2321 int len = pi->conf_len;
2322 int type, hint, olen;
2323 unsigned long val;
2324 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2325 u16 mtu = L2CAP_DEFAULT_MTU;
2326 u16 result = L2CAP_CONF_SUCCESS;
2328 BT_DBG("sk %p", sk);
2330 while (len >= L2CAP_CONF_OPT_SIZE) {
2331 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2333 hint = type & L2CAP_CONF_HINT;
2334 type &= L2CAP_CONF_MASK;
2336 switch (type) {
2337 case L2CAP_CONF_MTU:
2338 mtu = val;
2339 break;
2341 case L2CAP_CONF_FLUSH_TO:
2342 pi->flush_to = val;
2343 break;
2345 case L2CAP_CONF_QOS:
2346 break;
2348 case L2CAP_CONF_RFC:
2349 if (olen == sizeof(rfc))
2350 memcpy(&rfc, (void *) val, olen);
2351 break;
2353 case L2CAP_CONF_FCS:
2354 if (val == L2CAP_FCS_NONE)
2355 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2357 break;
2359 default:
2360 if (hint)
2361 break;
2363 result = L2CAP_CONF_UNKNOWN;
2364 *((u8 *) ptr++) = type;
2365 break;
2369 if (pi->num_conf_rsp || pi->num_conf_req)
2370 goto done;
2372 switch (pi->mode) {
2373 case L2CAP_MODE_STREAMING:
2374 case L2CAP_MODE_ERTM:
2375 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2376 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2377 return -ECONNREFUSED;
2378 break;
2379 default:
2380 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2381 break;
2384 done:
2385 if (pi->mode != rfc.mode) {
2386 result = L2CAP_CONF_UNACCEPT;
2387 rfc.mode = pi->mode;
2389 if (pi->num_conf_rsp == 1)
2390 return -ECONNREFUSED;
2392 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2393 sizeof(rfc), (unsigned long) &rfc);
2397 if (result == L2CAP_CONF_SUCCESS) {
2398 /* Configure output options and let the other side know
2399 * which ones we don't like. */
2401 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2402 result = L2CAP_CONF_UNACCEPT;
2403 else {
2404 pi->omtu = mtu;
2405 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2407 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2409 switch (rfc.mode) {
2410 case L2CAP_MODE_BASIC:
2411 pi->fcs = L2CAP_FCS_NONE;
2412 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2413 break;
2415 case L2CAP_MODE_ERTM:
2416 pi->remote_tx_win = rfc.txwin_size;
2417 pi->remote_max_tx = rfc.max_transmit;
2418 pi->max_pdu_size = rfc.max_pdu_size;
2420 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2421 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2423 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2425 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2426 sizeof(rfc), (unsigned long) &rfc);
2428 break;
2430 case L2CAP_MODE_STREAMING:
2431 pi->remote_tx_win = rfc.txwin_size;
2432 pi->max_pdu_size = rfc.max_pdu_size;
2434 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2436 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2437 sizeof(rfc), (unsigned long) &rfc);
2439 break;
2441 default:
2442 result = L2CAP_CONF_UNACCEPT;
2444 memset(&rfc, 0, sizeof(rfc));
2445 rfc.mode = pi->mode;
2448 if (result == L2CAP_CONF_SUCCESS)
2449 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2451 rsp->scid = cpu_to_le16(pi->dcid);
2452 rsp->result = cpu_to_le16(result);
2453 rsp->flags = cpu_to_le16(0x0000);
2455 return ptr - data;
2458 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2460 struct l2cap_pinfo *pi = l2cap_pi(sk);
2461 struct l2cap_conf_req *req = data;
2462 void *ptr = req->data;
2463 int type, olen;
2464 unsigned long val;
2465 struct l2cap_conf_rfc rfc;
2467 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2469 while (len >= L2CAP_CONF_OPT_SIZE) {
2470 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2472 switch (type) {
2473 case L2CAP_CONF_MTU:
2474 if (val < L2CAP_DEFAULT_MIN_MTU) {
2475 *result = L2CAP_CONF_UNACCEPT;
2476 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2477 } else
2478 pi->omtu = val;
2479 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2480 break;
2482 case L2CAP_CONF_FLUSH_TO:
2483 pi->flush_to = val;
2484 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2485 2, pi->flush_to);
2486 break;
2488 case L2CAP_CONF_RFC:
2489 if (olen == sizeof(rfc))
2490 memcpy(&rfc, (void *)val, olen);
2492 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2493 rfc.mode != pi->mode)
2494 return -ECONNREFUSED;
2496 pi->mode = rfc.mode;
2497 pi->fcs = 0;
2499 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2500 sizeof(rfc), (unsigned long) &rfc);
2501 break;
2505 if (*result == L2CAP_CONF_SUCCESS) {
2506 switch (rfc.mode) {
2507 case L2CAP_MODE_ERTM:
2508 pi->remote_tx_win = rfc.txwin_size;
2509 pi->retrans_timeout = rfc.retrans_timeout;
2510 pi->monitor_timeout = rfc.monitor_timeout;
2511 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2512 break;
2513 case L2CAP_MODE_STREAMING:
2514 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2515 break;
2519 req->dcid = cpu_to_le16(pi->dcid);
2520 req->flags = cpu_to_le16(0x0000);
2522 return ptr - data;
2525 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2527 struct l2cap_conf_rsp *rsp = data;
2528 void *ptr = rsp->data;
2530 BT_DBG("sk %p", sk);
2532 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2533 rsp->result = cpu_to_le16(result);
2534 rsp->flags = cpu_to_le16(flags);
2536 return ptr - data;
2539 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2541 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2543 if (rej->reason != 0x0000)
2544 return 0;
2546 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2547 cmd->ident == conn->info_ident) {
2548 del_timer(&conn->info_timer);
2550 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2551 conn->info_ident = 0;
2553 l2cap_conn_start(conn);
2556 return 0;
2559 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2561 struct l2cap_chan_list *list = &conn->chan_list;
2562 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2563 struct l2cap_conn_rsp rsp;
2564 struct sock *sk, *parent;
2565 int result, status = L2CAP_CS_NO_INFO;
2567 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2568 __le16 psm = req->psm;
2570 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2572 /* Check if we have socket listening on psm */
2573 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2574 if (!parent) {
2575 result = L2CAP_CR_BAD_PSM;
2576 goto sendresp;
2579 /* Check if the ACL is secure enough (if not SDP) */
2580 if (psm != cpu_to_le16(0x0001) &&
2581 !hci_conn_check_link_mode(conn->hcon)) {
2582 conn->disc_reason = 0x05;
2583 result = L2CAP_CR_SEC_BLOCK;
2584 goto response;
2587 result = L2CAP_CR_NO_MEM;
2589 /* Check for backlog size */
2590 if (sk_acceptq_is_full(parent)) {
2591 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2592 goto response;
2595 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2596 if (!sk)
2597 goto response;
2599 write_lock_bh(&list->lock);
2601 /* Check if we already have channel with that dcid */
2602 if (__l2cap_get_chan_by_dcid(list, scid)) {
2603 write_unlock_bh(&list->lock);
2604 sock_set_flag(sk, SOCK_ZAPPED);
2605 l2cap_sock_kill(sk);
2606 goto response;
2609 hci_conn_hold(conn->hcon);
2611 l2cap_sock_init(sk, parent);
2612 bacpy(&bt_sk(sk)->src, conn->src);
2613 bacpy(&bt_sk(sk)->dst, conn->dst);
2614 l2cap_pi(sk)->psm = psm;
2615 l2cap_pi(sk)->dcid = scid;
2617 __l2cap_chan_add(conn, sk, parent);
2618 dcid = l2cap_pi(sk)->scid;
2620 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2622 l2cap_pi(sk)->ident = cmd->ident;
2624 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2625 if (l2cap_check_security(sk)) {
2626 if (bt_sk(sk)->defer_setup) {
2627 sk->sk_state = BT_CONNECT2;
2628 result = L2CAP_CR_PEND;
2629 status = L2CAP_CS_AUTHOR_PEND;
2630 parent->sk_data_ready(parent, 0);
2631 } else {
2632 sk->sk_state = BT_CONFIG;
2633 result = L2CAP_CR_SUCCESS;
2634 status = L2CAP_CS_NO_INFO;
2636 } else {
2637 sk->sk_state = BT_CONNECT2;
2638 result = L2CAP_CR_PEND;
2639 status = L2CAP_CS_AUTHEN_PEND;
2641 } else {
2642 sk->sk_state = BT_CONNECT2;
2643 result = L2CAP_CR_PEND;
2644 status = L2CAP_CS_NO_INFO;
2647 write_unlock_bh(&list->lock);
2649 response:
2650 bh_unlock_sock(parent);
2652 sendresp:
2653 rsp.scid = cpu_to_le16(scid);
2654 rsp.dcid = cpu_to_le16(dcid);
2655 rsp.result = cpu_to_le16(result);
2656 rsp.status = cpu_to_le16(status);
2657 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2659 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2660 struct l2cap_info_req info;
2661 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2663 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2664 conn->info_ident = l2cap_get_ident(conn);
2666 mod_timer(&conn->info_timer, jiffies +
2667 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2669 l2cap_send_cmd(conn, conn->info_ident,
2670 L2CAP_INFO_REQ, sizeof(info), &info);
2673 return 0;
2676 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2678 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2679 u16 scid, dcid, result, status;
2680 struct sock *sk;
2681 u8 req[128];
2683 scid = __le16_to_cpu(rsp->scid);
2684 dcid = __le16_to_cpu(rsp->dcid);
2685 result = __le16_to_cpu(rsp->result);
2686 status = __le16_to_cpu(rsp->status);
2688 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2690 if (scid) {
2691 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2692 if (!sk)
2693 return 0;
2694 } else {
2695 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2696 if (!sk)
2697 return 0;
2700 switch (result) {
2701 case L2CAP_CR_SUCCESS:
2702 sk->sk_state = BT_CONFIG;
2703 l2cap_pi(sk)->ident = 0;
2704 l2cap_pi(sk)->dcid = dcid;
2705 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2707 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2709 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2710 l2cap_build_conf_req(sk, req), req);
2711 l2cap_pi(sk)->num_conf_req++;
2712 break;
2714 case L2CAP_CR_PEND:
2715 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2716 break;
2718 default:
2719 l2cap_chan_del(sk, ECONNREFUSED);
2720 break;
2723 bh_unlock_sock(sk);
2724 return 0;
2727 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2729 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2730 u16 dcid, flags;
2731 u8 rsp[64];
2732 struct sock *sk;
2733 int len;
2735 dcid = __le16_to_cpu(req->dcid);
2736 flags = __le16_to_cpu(req->flags);
2738 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2740 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2741 if (!sk)
2742 return -ENOENT;
2744 if (sk->sk_state == BT_DISCONN)
2745 goto unlock;
2747 /* Reject if config buffer is too small. */
2748 len = cmd_len - sizeof(*req);
2749 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2750 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2751 l2cap_build_conf_rsp(sk, rsp,
2752 L2CAP_CONF_REJECT, flags), rsp);
2753 goto unlock;
2756 /* Store config. */
2757 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2758 l2cap_pi(sk)->conf_len += len;
2760 if (flags & 0x0001) {
2761 /* Incomplete config. Send empty response. */
2762 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2763 l2cap_build_conf_rsp(sk, rsp,
2764 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2765 goto unlock;
2768 /* Complete config. */
2769 len = l2cap_parse_conf_req(sk, rsp);
2770 if (len < 0) {
2771 l2cap_send_disconn_req(conn, sk);
2772 goto unlock;
2775 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2776 l2cap_pi(sk)->num_conf_rsp++;
2778 /* Reset config buffer. */
2779 l2cap_pi(sk)->conf_len = 0;
2781 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2782 goto unlock;
2784 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2785 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2786 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2787 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2789 sk->sk_state = BT_CONNECTED;
2791 l2cap_pi(sk)->next_tx_seq = 0;
2792 l2cap_pi(sk)->expected_tx_seq = 0;
2793 __skb_queue_head_init(TX_QUEUE(sk));
2794 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2795 l2cap_ertm_init(sk);
2797 l2cap_chan_ready(sk);
2798 goto unlock;
2801 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2802 u8 buf[64];
2803 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2804 l2cap_build_conf_req(sk, buf), buf);
2805 l2cap_pi(sk)->num_conf_req++;
2808 unlock:
2809 bh_unlock_sock(sk);
2810 return 0;
2813 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2815 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2816 u16 scid, flags, result;
2817 struct sock *sk;
2819 scid = __le16_to_cpu(rsp->scid);
2820 flags = __le16_to_cpu(rsp->flags);
2821 result = __le16_to_cpu(rsp->result);
2823 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2824 scid, flags, result);
2826 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2827 if (!sk)
2828 return 0;
2830 switch (result) {
2831 case L2CAP_CONF_SUCCESS:
2832 break;
2834 case L2CAP_CONF_UNACCEPT:
2835 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2836 int len = cmd->len - sizeof(*rsp);
2837 char req[64];
2839 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2840 l2cap_send_disconn_req(conn, sk);
2841 goto done;
2844 /* throw out any old stored conf requests */
2845 result = L2CAP_CONF_SUCCESS;
2846 len = l2cap_parse_conf_rsp(sk, rsp->data,
2847 len, req, &result);
2848 if (len < 0) {
2849 l2cap_send_disconn_req(conn, sk);
2850 goto done;
2853 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2854 L2CAP_CONF_REQ, len, req);
2855 l2cap_pi(sk)->num_conf_req++;
2856 if (result != L2CAP_CONF_SUCCESS)
2857 goto done;
2858 break;
2861 default:
2862 sk->sk_state = BT_DISCONN;
2863 sk->sk_err = ECONNRESET;
2864 l2cap_sock_set_timer(sk, HZ * 5);
2865 l2cap_send_disconn_req(conn, sk);
2866 goto done;
2869 if (flags & 0x01)
2870 goto done;
2872 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2874 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2875 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2876 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2877 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2879 sk->sk_state = BT_CONNECTED;
2880 l2cap_pi(sk)->next_tx_seq = 0;
2881 l2cap_pi(sk)->expected_tx_seq = 0;
2882 __skb_queue_head_init(TX_QUEUE(sk));
2883 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2884 l2cap_ertm_init(sk);
2886 l2cap_chan_ready(sk);
2889 done:
2890 bh_unlock_sock(sk);
2891 return 0;
2894 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2896 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2897 struct l2cap_disconn_rsp rsp;
2898 u16 dcid, scid;
2899 struct sock *sk;
2901 scid = __le16_to_cpu(req->scid);
2902 dcid = __le16_to_cpu(req->dcid);
2904 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2906 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2907 if (!sk)
2908 return 0;
2910 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2911 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2912 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2914 sk->sk_shutdown = SHUTDOWN_MASK;
2916 skb_queue_purge(TX_QUEUE(sk));
2918 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2919 skb_queue_purge(SREJ_QUEUE(sk));
2920 del_timer(&l2cap_pi(sk)->retrans_timer);
2921 del_timer(&l2cap_pi(sk)->monitor_timer);
2924 l2cap_chan_del(sk, ECONNRESET);
2925 bh_unlock_sock(sk);
2927 l2cap_sock_kill(sk);
2928 return 0;
2931 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2933 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2934 u16 dcid, scid;
2935 struct sock *sk;
2937 scid = __le16_to_cpu(rsp->scid);
2938 dcid = __le16_to_cpu(rsp->dcid);
2940 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2942 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2943 if (!sk)
2944 return 0;
2946 skb_queue_purge(TX_QUEUE(sk));
2948 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2949 skb_queue_purge(SREJ_QUEUE(sk));
2950 del_timer(&l2cap_pi(sk)->retrans_timer);
2951 del_timer(&l2cap_pi(sk)->monitor_timer);
2954 l2cap_chan_del(sk, 0);
2955 bh_unlock_sock(sk);
2957 l2cap_sock_kill(sk);
2958 return 0;
2961 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2963 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2964 u16 type;
2966 type = __le16_to_cpu(req->type);
2968 BT_DBG("type 0x%4.4x", type);
2970 if (type == L2CAP_IT_FEAT_MASK) {
2971 u8 buf[8];
2972 u32 feat_mask = l2cap_feat_mask;
2973 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2974 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2975 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2976 if (enable_ertm)
2977 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2978 | L2CAP_FEAT_FCS;
2979 put_unaligned_le32(feat_mask, rsp->data);
2980 l2cap_send_cmd(conn, cmd->ident,
2981 L2CAP_INFO_RSP, sizeof(buf), buf);
2982 } else if (type == L2CAP_IT_FIXED_CHAN) {
2983 u8 buf[12];
2984 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2985 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2986 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2987 memcpy(buf + 4, l2cap_fixed_chan, 8);
2988 l2cap_send_cmd(conn, cmd->ident,
2989 L2CAP_INFO_RSP, sizeof(buf), buf);
2990 } else {
2991 struct l2cap_info_rsp rsp;
2992 rsp.type = cpu_to_le16(type);
2993 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2994 l2cap_send_cmd(conn, cmd->ident,
2995 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2998 return 0;
3001 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3003 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3004 u16 type, result;
3006 type = __le16_to_cpu(rsp->type);
3007 result = __le16_to_cpu(rsp->result);
3009 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3011 del_timer(&conn->info_timer);
3013 if (type == L2CAP_IT_FEAT_MASK) {
3014 conn->feat_mask = get_unaligned_le32(rsp->data);
3016 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3017 struct l2cap_info_req req;
3018 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3020 conn->info_ident = l2cap_get_ident(conn);
3022 l2cap_send_cmd(conn, conn->info_ident,
3023 L2CAP_INFO_REQ, sizeof(req), &req);
3024 } else {
3025 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3026 conn->info_ident = 0;
3028 l2cap_conn_start(conn);
3030 } else if (type == L2CAP_IT_FIXED_CHAN) {
3031 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3032 conn->info_ident = 0;
3034 l2cap_conn_start(conn);
3037 return 0;
3040 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3042 u8 *data = skb->data;
3043 int len = skb->len;
3044 struct l2cap_cmd_hdr cmd;
3045 int err = 0;
3047 l2cap_raw_recv(conn, skb);
3049 while (len >= L2CAP_CMD_HDR_SIZE) {
3050 u16 cmd_len;
3051 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3052 data += L2CAP_CMD_HDR_SIZE;
3053 len -= L2CAP_CMD_HDR_SIZE;
3055 cmd_len = le16_to_cpu(cmd.len);
3057 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3059 if (cmd_len > len || !cmd.ident) {
3060 BT_DBG("corrupted command");
3061 break;
3064 switch (cmd.code) {
3065 case L2CAP_COMMAND_REJ:
3066 l2cap_command_rej(conn, &cmd, data);
3067 break;
3069 case L2CAP_CONN_REQ:
3070 err = l2cap_connect_req(conn, &cmd, data);
3071 break;
3073 case L2CAP_CONN_RSP:
3074 err = l2cap_connect_rsp(conn, &cmd, data);
3075 break;
3077 case L2CAP_CONF_REQ:
3078 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3079 break;
3081 case L2CAP_CONF_RSP:
3082 err = l2cap_config_rsp(conn, &cmd, data);
3083 break;
3085 case L2CAP_DISCONN_REQ:
3086 err = l2cap_disconnect_req(conn, &cmd, data);
3087 break;
3089 case L2CAP_DISCONN_RSP:
3090 err = l2cap_disconnect_rsp(conn, &cmd, data);
3091 break;
3093 case L2CAP_ECHO_REQ:
3094 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3095 break;
3097 case L2CAP_ECHO_RSP:
3098 break;
3100 case L2CAP_INFO_REQ:
3101 err = l2cap_information_req(conn, &cmd, data);
3102 break;
3104 case L2CAP_INFO_RSP:
3105 err = l2cap_information_rsp(conn, &cmd, data);
3106 break;
3108 default:
3109 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3110 err = -EINVAL;
3111 break;
3114 if (err) {
3115 struct l2cap_cmd_rej rej;
3116 BT_DBG("error %d", err);
3118 /* FIXME: Map err to a valid reason */
3119 rej.reason = cpu_to_le16(0);
3120 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3123 data += cmd_len;
3124 len -= cmd_len;
3127 kfree_skb(skb);
3130 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3132 u16 our_fcs, rcv_fcs;
3133 int hdr_size = L2CAP_HDR_SIZE + 2;
3135 if (pi->fcs == L2CAP_FCS_CRC16) {
3136 skb_trim(skb, skb->len - 2);
3137 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3138 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3140 if (our_fcs != rcv_fcs)
3141 return -EINVAL;
3143 return 0;
3146 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3148 struct sk_buff *next_skb;
3150 bt_cb(skb)->tx_seq = tx_seq;
3151 bt_cb(skb)->sar = sar;
3153 next_skb = skb_peek(SREJ_QUEUE(sk));
3154 if (!next_skb) {
3155 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3156 return;
3159 do {
3160 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3161 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3162 return;
3165 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3166 break;
3168 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3170 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3173 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3175 struct l2cap_pinfo *pi = l2cap_pi(sk);
3176 struct sk_buff *_skb;
3177 int err = -EINVAL;
3179 switch (control & L2CAP_CTRL_SAR) {
3180 case L2CAP_SDU_UNSEGMENTED:
3181 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3182 kfree_skb(pi->sdu);
3183 break;
3186 err = sock_queue_rcv_skb(sk, skb);
3187 if (!err)
3188 return 0;
3190 break;
3192 case L2CAP_SDU_START:
3193 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3194 kfree_skb(pi->sdu);
3195 break;
3198 pi->sdu_len = get_unaligned_le16(skb->data);
3199 skb_pull(skb, 2);
3201 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3202 if (!pi->sdu) {
3203 err = -ENOMEM;
3204 break;
3207 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3209 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3210 pi->partial_sdu_len = skb->len;
3211 err = 0;
3212 break;
3214 case L2CAP_SDU_CONTINUE:
3215 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3216 break;
3218 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3220 pi->partial_sdu_len += skb->len;
3221 if (pi->partial_sdu_len > pi->sdu_len)
3222 kfree_skb(pi->sdu);
3223 else
3224 err = 0;
3226 break;
3228 case L2CAP_SDU_END:
3229 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3230 break;
3232 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3234 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3235 pi->partial_sdu_len += skb->len;
3237 if (pi->partial_sdu_len == pi->sdu_len) {
3238 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3239 err = sock_queue_rcv_skb(sk, _skb);
3240 if (err < 0)
3241 kfree_skb(_skb);
3243 kfree_skb(pi->sdu);
3244 err = 0;
3246 break;
3249 kfree_skb(skb);
3250 return err;
3253 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3255 struct sk_buff *skb;
3256 u16 control = 0;
3258 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3259 if (bt_cb(skb)->tx_seq != tx_seq)
3260 break;
3262 skb = skb_dequeue(SREJ_QUEUE(sk));
3263 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3264 l2cap_sar_reassembly_sdu(sk, skb, control);
3265 l2cap_pi(sk)->buffer_seq_srej =
3266 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3267 tx_seq++;
3271 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3273 struct l2cap_pinfo *pi = l2cap_pi(sk);
3274 struct srej_list *l, *tmp;
3275 u16 control;
3277 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3278 if (l->tx_seq == tx_seq) {
3279 list_del(&l->list);
3280 kfree(l);
3281 return;
3283 control = L2CAP_SUPER_SELECT_REJECT;
3284 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3285 l2cap_send_sframe(pi, control);
3286 list_del(&l->list);
3287 list_add_tail(&l->list, SREJ_LIST(sk));
3291 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3293 struct l2cap_pinfo *pi = l2cap_pi(sk);
3294 struct srej_list *new;
3295 u16 control;
3297 while (tx_seq != pi->expected_tx_seq) {
3298 control = L2CAP_SUPER_SELECT_REJECT;
3299 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3300 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3301 control |= L2CAP_CTRL_POLL;
3302 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3304 l2cap_send_sframe(pi, control);
3306 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3307 new->tx_seq = pi->expected_tx_seq++;
3308 list_add_tail(&new->list, SREJ_LIST(sk));
3310 pi->expected_tx_seq++;
3313 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3315 struct l2cap_pinfo *pi = l2cap_pi(sk);
3316 u8 tx_seq = __get_txseq(rx_control);
3317 u8 req_seq = __get_reqseq(rx_control);
3318 u16 tx_control = 0;
3319 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3320 int err = 0;
3322 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3324 pi->expected_ack_seq = req_seq;
3325 l2cap_drop_acked_frames(sk);
3327 if (tx_seq == pi->expected_tx_seq)
3328 goto expected;
3330 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3331 struct srej_list *first;
3333 first = list_first_entry(SREJ_LIST(sk),
3334 struct srej_list, list);
3335 if (tx_seq == first->tx_seq) {
3336 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3337 l2cap_check_srej_gap(sk, tx_seq);
3339 list_del(&first->list);
3340 kfree(first);
3342 if (list_empty(SREJ_LIST(sk))) {
3343 pi->buffer_seq = pi->buffer_seq_srej;
3344 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3346 } else {
3347 struct srej_list *l;
3348 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3350 list_for_each_entry(l, SREJ_LIST(sk), list) {
3351 if (l->tx_seq == tx_seq) {
3352 l2cap_resend_srejframe(sk, tx_seq);
3353 return 0;
3356 l2cap_send_srejframe(sk, tx_seq);
3358 } else {
3359 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3361 INIT_LIST_HEAD(SREJ_LIST(sk));
3362 pi->buffer_seq_srej = pi->buffer_seq;
3364 __skb_queue_head_init(SREJ_QUEUE(sk));
3365 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3367 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3369 l2cap_send_srejframe(sk, tx_seq);
3371 return 0;
3373 expected:
3374 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3376 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3377 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3378 return 0;
3381 if (rx_control & L2CAP_CTRL_FINAL) {
3382 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3383 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3384 else {
3385 sk->sk_send_head = TX_QUEUE(sk)->next;
3386 pi->next_tx_seq = pi->expected_ack_seq;
3387 l2cap_ertm_send(sk);
3391 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3393 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3394 if (err < 0)
3395 return err;
3397 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3398 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3399 tx_control |= L2CAP_SUPER_RCV_READY;
3400 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3401 l2cap_send_sframe(pi, tx_control);
3403 return 0;
3406 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3408 struct l2cap_pinfo *pi = l2cap_pi(sk);
3409 u8 tx_seq = __get_reqseq(rx_control);
3411 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3413 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3414 case L2CAP_SUPER_RCV_READY:
3415 if (rx_control & L2CAP_CTRL_POLL) {
3416 u16 control = L2CAP_CTRL_FINAL;
3417 control |= L2CAP_SUPER_RCV_READY |
3418 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3419 l2cap_send_sframe(l2cap_pi(sk), control);
3420 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3422 } else if (rx_control & L2CAP_CTRL_FINAL) {
3423 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3424 pi->expected_ack_seq = tx_seq;
3425 l2cap_drop_acked_frames(sk);
3427 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3428 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3429 else {
3430 sk->sk_send_head = TX_QUEUE(sk)->next;
3431 pi->next_tx_seq = pi->expected_ack_seq;
3432 l2cap_ertm_send(sk);
3435 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3436 break;
3438 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3439 del_timer(&pi->monitor_timer);
3441 if (pi->unacked_frames > 0)
3442 __mod_retrans_timer();
3443 } else {
3444 pi->expected_ack_seq = tx_seq;
3445 l2cap_drop_acked_frames(sk);
3447 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3448 (pi->unacked_frames > 0))
3449 __mod_retrans_timer();
3451 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3452 l2cap_ertm_send(sk);
3454 break;
3456 case L2CAP_SUPER_REJECT:
3457 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3459 pi->expected_ack_seq = __get_reqseq(rx_control);
3460 l2cap_drop_acked_frames(sk);
3462 if (rx_control & L2CAP_CTRL_FINAL) {
3463 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3464 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3465 else {
3466 sk->sk_send_head = TX_QUEUE(sk)->next;
3467 pi->next_tx_seq = pi->expected_ack_seq;
3468 l2cap_ertm_send(sk);
3470 } else {
3471 sk->sk_send_head = TX_QUEUE(sk)->next;
3472 pi->next_tx_seq = pi->expected_ack_seq;
3473 l2cap_ertm_send(sk);
3475 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3476 pi->srej_save_reqseq = tx_seq;
3477 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3481 break;
3483 case L2CAP_SUPER_SELECT_REJECT:
3484 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3486 if (rx_control & L2CAP_CTRL_POLL) {
3487 pi->expected_ack_seq = tx_seq;
3488 l2cap_drop_acked_frames(sk);
3489 l2cap_retransmit_frame(sk, tx_seq);
3490 l2cap_ertm_send(sk);
3491 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3492 pi->srej_save_reqseq = tx_seq;
3493 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3495 } else if (rx_control & L2CAP_CTRL_FINAL) {
3496 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3497 pi->srej_save_reqseq == tx_seq)
3498 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3499 else
3500 l2cap_retransmit_frame(sk, tx_seq);
3502 else {
3503 l2cap_retransmit_frame(sk, tx_seq);
3504 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3505 pi->srej_save_reqseq = tx_seq;
3506 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3509 break;
3511 case L2CAP_SUPER_RCV_NOT_READY:
3512 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3513 pi->expected_ack_seq = tx_seq;
3514 l2cap_drop_acked_frames(sk);
3516 del_timer(&l2cap_pi(sk)->retrans_timer);
3517 if (rx_control & L2CAP_CTRL_POLL) {
3518 u16 control = L2CAP_CTRL_FINAL;
3519 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3521 break;
3524 return 0;
3527 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3529 struct sock *sk;
3530 struct l2cap_pinfo *pi;
3531 u16 control, len;
3532 u8 tx_seq;
3534 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3535 if (!sk) {
3536 BT_DBG("unknown cid 0x%4.4x", cid);
3537 goto drop;
3540 pi = l2cap_pi(sk);
3542 BT_DBG("sk %p, len %d", sk, skb->len);
3544 if (sk->sk_state != BT_CONNECTED)
3545 goto drop;
3547 switch (pi->mode) {
3548 case L2CAP_MODE_BASIC:
3549 /* If socket recv buffers overflows we drop data here
3550 * which is *bad* because L2CAP has to be reliable.
3551 * But we don't have any other choice. L2CAP doesn't
3552 * provide flow control mechanism. */
3554 if (pi->imtu < skb->len)
3555 goto drop;
3557 if (!sock_queue_rcv_skb(sk, skb))
3558 goto done;
3559 break;
3561 case L2CAP_MODE_ERTM:
3562 control = get_unaligned_le16(skb->data);
3563 skb_pull(skb, 2);
3564 len = skb->len;
3566 if (__is_sar_start(control))
3567 len -= 2;
3569 if (pi->fcs == L2CAP_FCS_CRC16)
3570 len -= 2;
3573 * We can just drop the corrupted I-frame here.
3574 * Receiver will miss it and start proper recovery
3575 * procedures and ask retransmission.
3577 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3578 goto drop;
3580 if (l2cap_check_fcs(pi, skb))
3581 goto drop;
3583 if (__is_iframe(control))
3584 l2cap_data_channel_iframe(sk, control, skb);
3585 else
3586 l2cap_data_channel_sframe(sk, control, skb);
3588 goto done;
3590 case L2CAP_MODE_STREAMING:
3591 control = get_unaligned_le16(skb->data);
3592 skb_pull(skb, 2);
3593 len = skb->len;
3595 if (__is_sar_start(control))
3596 len -= 2;
3598 if (pi->fcs == L2CAP_FCS_CRC16)
3599 len -= 2;
3601 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3602 goto drop;
3604 if (l2cap_check_fcs(pi, skb))
3605 goto drop;
3607 tx_seq = __get_txseq(control);
3609 if (pi->expected_tx_seq == tx_seq)
3610 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3611 else
3612 pi->expected_tx_seq = tx_seq + 1;
3614 l2cap_sar_reassembly_sdu(sk, skb, control);
3616 goto done;
3618 default:
3619 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3620 break;
3623 drop:
3624 kfree_skb(skb);
3626 done:
3627 if (sk)
3628 bh_unlock_sock(sk);
3630 return 0;
3633 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3635 struct sock *sk;
3637 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3638 if (!sk)
3639 goto drop;
3641 BT_DBG("sk %p, len %d", sk, skb->len);
3643 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3644 goto drop;
3646 if (l2cap_pi(sk)->imtu < skb->len)
3647 goto drop;
3649 if (!sock_queue_rcv_skb(sk, skb))
3650 goto done;
3652 drop:
3653 kfree_skb(skb);
3655 done:
3656 if (sk)
3657 bh_unlock_sock(sk);
3658 return 0;
3661 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3663 struct l2cap_hdr *lh = (void *) skb->data;
3664 u16 cid, len;
3665 __le16 psm;
3667 skb_pull(skb, L2CAP_HDR_SIZE);
3668 cid = __le16_to_cpu(lh->cid);
3669 len = __le16_to_cpu(lh->len);
3671 if (len != skb->len) {
3672 kfree_skb(skb);
3673 return;
3676 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3678 switch (cid) {
3679 case L2CAP_CID_SIGNALING:
3680 l2cap_sig_channel(conn, skb);
3681 break;
3683 case L2CAP_CID_CONN_LESS:
3684 psm = get_unaligned_le16(skb->data);
3685 skb_pull(skb, 2);
3686 l2cap_conless_channel(conn, psm, skb);
3687 break;
3689 default:
3690 l2cap_data_channel(conn, cid, skb);
3691 break;
3695 /* ---- L2CAP interface with lower layer (HCI) ---- */
3697 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3699 int exact = 0, lm1 = 0, lm2 = 0;
3700 register struct sock *sk;
3701 struct hlist_node *node;
3703 if (type != ACL_LINK)
3704 return 0;
3706 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3708 /* Find listening sockets and check their link_mode */
3709 read_lock(&l2cap_sk_list.lock);
3710 sk_for_each(sk, node, &l2cap_sk_list.head) {
3711 if (sk->sk_state != BT_LISTEN)
3712 continue;
3714 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3715 lm1 |= HCI_LM_ACCEPT;
3716 if (l2cap_pi(sk)->role_switch)
3717 lm1 |= HCI_LM_MASTER;
3718 exact++;
3719 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3720 lm2 |= HCI_LM_ACCEPT;
3721 if (l2cap_pi(sk)->role_switch)
3722 lm2 |= HCI_LM_MASTER;
3725 read_unlock(&l2cap_sk_list.lock);
3727 return exact ? lm1 : lm2;
3730 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3732 struct l2cap_conn *conn;
3734 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3736 if (hcon->type != ACL_LINK)
3737 return 0;
3739 if (!status) {
3740 conn = l2cap_conn_add(hcon, status);
3741 if (conn)
3742 l2cap_conn_ready(conn);
3743 } else
3744 l2cap_conn_del(hcon, bt_err(status));
3746 return 0;
3749 static int l2cap_disconn_ind(struct hci_conn *hcon)
3751 struct l2cap_conn *conn = hcon->l2cap_data;
3753 BT_DBG("hcon %p", hcon);
3755 if (hcon->type != ACL_LINK || !conn)
3756 return 0x13;
3758 return conn->disc_reason;
3761 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3763 BT_DBG("hcon %p reason %d", hcon, reason);
3765 if (hcon->type != ACL_LINK)
3766 return 0;
3768 l2cap_conn_del(hcon, bt_err(reason));
3770 return 0;
3773 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3775 if (sk->sk_type != SOCK_SEQPACKET)
3776 return;
3778 if (encrypt == 0x00) {
3779 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3780 l2cap_sock_clear_timer(sk);
3781 l2cap_sock_set_timer(sk, HZ * 5);
3782 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3783 __l2cap_sock_close(sk, ECONNREFUSED);
3784 } else {
3785 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3786 l2cap_sock_clear_timer(sk);
3790 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3792 struct l2cap_chan_list *l;
3793 struct l2cap_conn *conn = hcon->l2cap_data;
3794 struct sock *sk;
3796 if (!conn)
3797 return 0;
3799 l = &conn->chan_list;
3801 BT_DBG("conn %p", conn);
3803 read_lock(&l->lock);
3805 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3806 bh_lock_sock(sk);
3808 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3809 bh_unlock_sock(sk);
3810 continue;
3813 if (!status && (sk->sk_state == BT_CONNECTED ||
3814 sk->sk_state == BT_CONFIG)) {
3815 l2cap_check_encryption(sk, encrypt);
3816 bh_unlock_sock(sk);
3817 continue;
3820 if (sk->sk_state == BT_CONNECT) {
3821 if (!status) {
3822 struct l2cap_conn_req req;
3823 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3824 req.psm = l2cap_pi(sk)->psm;
3826 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3828 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3829 L2CAP_CONN_REQ, sizeof(req), &req);
3830 } else {
3831 l2cap_sock_clear_timer(sk);
3832 l2cap_sock_set_timer(sk, HZ / 10);
3834 } else if (sk->sk_state == BT_CONNECT2) {
3835 struct l2cap_conn_rsp rsp;
3836 __u16 result;
3838 if (!status) {
3839 sk->sk_state = BT_CONFIG;
3840 result = L2CAP_CR_SUCCESS;
3841 } else {
3842 sk->sk_state = BT_DISCONN;
3843 l2cap_sock_set_timer(sk, HZ / 10);
3844 result = L2CAP_CR_SEC_BLOCK;
3847 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3848 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3849 rsp.result = cpu_to_le16(result);
3850 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3851 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3852 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3855 bh_unlock_sock(sk);
3858 read_unlock(&l->lock);
3860 return 0;
3863 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3865 struct l2cap_conn *conn = hcon->l2cap_data;
3867 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3868 goto drop;
3870 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3872 if (flags & ACL_START) {
3873 struct l2cap_hdr *hdr;
3874 int len;
3876 if (conn->rx_len) {
3877 BT_ERR("Unexpected start frame (len %d)", skb->len);
3878 kfree_skb(conn->rx_skb);
3879 conn->rx_skb = NULL;
3880 conn->rx_len = 0;
3881 l2cap_conn_unreliable(conn, ECOMM);
3884 if (skb->len < 2) {
3885 BT_ERR("Frame is too short (len %d)", skb->len);
3886 l2cap_conn_unreliable(conn, ECOMM);
3887 goto drop;
3890 hdr = (struct l2cap_hdr *) skb->data;
3891 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3893 if (len == skb->len) {
3894 /* Complete frame received */
3895 l2cap_recv_frame(conn, skb);
3896 return 0;
3899 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3901 if (skb->len > len) {
3902 BT_ERR("Frame is too long (len %d, expected len %d)",
3903 skb->len, len);
3904 l2cap_conn_unreliable(conn, ECOMM);
3905 goto drop;
3908 /* Allocate skb for the complete frame (with header) */
3909 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3910 if (!conn->rx_skb)
3911 goto drop;
3913 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3914 skb->len);
3915 conn->rx_len = len - skb->len;
3916 } else {
3917 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3919 if (!conn->rx_len) {
3920 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3921 l2cap_conn_unreliable(conn, ECOMM);
3922 goto drop;
3925 if (skb->len > conn->rx_len) {
3926 BT_ERR("Fragment is too long (len %d, expected %d)",
3927 skb->len, conn->rx_len);
3928 kfree_skb(conn->rx_skb);
3929 conn->rx_skb = NULL;
3930 conn->rx_len = 0;
3931 l2cap_conn_unreliable(conn, ECOMM);
3932 goto drop;
3935 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3936 skb->len);
3937 conn->rx_len -= skb->len;
3939 if (!conn->rx_len) {
3940 /* Complete frame received */
3941 l2cap_recv_frame(conn, conn->rx_skb);
3942 conn->rx_skb = NULL;
3946 drop:
3947 kfree_skb(skb);
3948 return 0;
3951 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3953 struct sock *sk;
3954 struct hlist_node *node;
3956 read_lock_bh(&l2cap_sk_list.lock);
3958 sk_for_each(sk, node, &l2cap_sk_list.head) {
3959 struct l2cap_pinfo *pi = l2cap_pi(sk);
3961 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3962 batostr(&bt_sk(sk)->src),
3963 batostr(&bt_sk(sk)->dst),
3964 sk->sk_state, __le16_to_cpu(pi->psm),
3965 pi->scid, pi->dcid,
3966 pi->imtu, pi->omtu, pi->sec_level);
3969 read_unlock_bh(&l2cap_sk_list.lock);
3971 return 0;
3974 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3976 return single_open(file, l2cap_debugfs_show, inode->i_private);
3979 static const struct file_operations l2cap_debugfs_fops = {
3980 .open = l2cap_debugfs_open,
3981 .read = seq_read,
3982 .llseek = seq_lseek,
3983 .release = single_release,
3986 static struct dentry *l2cap_debugfs;
3988 static const struct proto_ops l2cap_sock_ops = {
3989 .family = PF_BLUETOOTH,
3990 .owner = THIS_MODULE,
3991 .release = l2cap_sock_release,
3992 .bind = l2cap_sock_bind,
3993 .connect = l2cap_sock_connect,
3994 .listen = l2cap_sock_listen,
3995 .accept = l2cap_sock_accept,
3996 .getname = l2cap_sock_getname,
3997 .sendmsg = l2cap_sock_sendmsg,
3998 .recvmsg = l2cap_sock_recvmsg,
3999 .poll = bt_sock_poll,
4000 .ioctl = bt_sock_ioctl,
4001 .mmap = sock_no_mmap,
4002 .socketpair = sock_no_socketpair,
4003 .shutdown = l2cap_sock_shutdown,
4004 .setsockopt = l2cap_sock_setsockopt,
4005 .getsockopt = l2cap_sock_getsockopt
4008 static const struct net_proto_family l2cap_sock_family_ops = {
4009 .family = PF_BLUETOOTH,
4010 .owner = THIS_MODULE,
4011 .create = l2cap_sock_create,
4014 static struct hci_proto l2cap_hci_proto = {
4015 .name = "L2CAP",
4016 .id = HCI_PROTO_L2CAP,
4017 .connect_ind = l2cap_connect_ind,
4018 .connect_cfm = l2cap_connect_cfm,
4019 .disconn_ind = l2cap_disconn_ind,
4020 .disconn_cfm = l2cap_disconn_cfm,
4021 .security_cfm = l2cap_security_cfm,
4022 .recv_acldata = l2cap_recv_acldata
4025 static int __init l2cap_init(void)
4027 int err;
4029 err = proto_register(&l2cap_proto, 0);
4030 if (err < 0)
4031 return err;
4033 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4034 if (err < 0) {
4035 BT_ERR("L2CAP socket registration failed");
4036 goto error;
4039 err = hci_register_proto(&l2cap_hci_proto);
4040 if (err < 0) {
4041 BT_ERR("L2CAP protocol registration failed");
4042 bt_sock_unregister(BTPROTO_L2CAP);
4043 goto error;
4046 if (bt_debugfs) {
4047 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4048 bt_debugfs, NULL, &l2cap_debugfs_fops);
4049 if (!l2cap_debugfs)
4050 BT_ERR("Failed to create L2CAP debug file");
4053 BT_INFO("L2CAP ver %s", VERSION);
4054 BT_INFO("L2CAP socket layer initialized");
4056 return 0;
4058 error:
4059 proto_unregister(&l2cap_proto);
4060 return err;
4063 static void __exit l2cap_exit(void)
4065 debugfs_remove(l2cap_debugfs);
4067 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4068 BT_ERR("L2CAP socket unregistration failed");
4070 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4071 BT_ERR("L2CAP protocol unregistration failed");
4073 proto_unregister(&l2cap_proto);
4076 void l2cap_load(void)
4078 /* Dummy function to trigger automatic L2CAP module loading by
4079 * other modules that use L2CAP sockets but don't use any other
4080 * symbols from it. */
4081 return;
4083 EXPORT_SYMBOL(l2cap_load);
4085 module_init(l2cap_init);
4086 module_exit(l2cap_exit);
4088 module_param(enable_ertm, bool, 0644);
4089 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4091 module_param(max_transmit, uint, 0644);
4092 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4094 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4095 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4096 MODULE_VERSION(VERSION);
4097 MODULE_LICENSE("GPL");
4098 MODULE_ALIAS("bt-proto-0");