MFGPT: move clocksource menu
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blob400efa26ddbab7474fdfc1d1992eb204b4104f20
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
45 #include <net/sock.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm = 0;
57 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
59 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
60 static u8 l2cap_fixed_chan[8] = { 0x02, };
62 static const struct proto_ops l2cap_sock_ops;
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
78 struct sock *sk = (struct sock *) arg;
79 int reason;
81 BT_DBG("sock %p state %d", sk, sk->sk_state);
83 bh_lock_sock(sk);
85 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
86 reason = ECONNREFUSED;
87 else if (sk->sk_state == BT_CONNECT &&
88 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
89 reason = ECONNREFUSED;
90 else
91 reason = ETIMEDOUT;
93 __l2cap_sock_close(sk, reason);
95 bh_unlock_sock(sk);
97 l2cap_sock_kill(sk);
98 sock_put(sk);
101 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
103 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
104 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
107 static void l2cap_sock_clear_timer(struct sock *sk)
109 BT_DBG("sock %p state %d", sk, sk->sk_state);
110 sk_stop_timer(sk, &sk->sk_timer);
113 /* ---- L2CAP channels ---- */
114 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
116 struct sock *s;
117 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
118 if (l2cap_pi(s)->dcid == cid)
119 break;
121 return s;
124 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
126 struct sock *s;
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->scid == cid)
129 break;
131 return s;
134 /* Find channel with given SCID.
135 * Returns locked socket */
136 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
138 struct sock *s;
139 read_lock(&l->lock);
140 s = __l2cap_get_chan_by_scid(l, cid);
141 if (s)
142 bh_lock_sock(s);
143 read_unlock(&l->lock);
144 return s;
147 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
149 struct sock *s;
150 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
151 if (l2cap_pi(s)->ident == ident)
152 break;
154 return s;
157 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 struct sock *s;
160 read_lock(&l->lock);
161 s = __l2cap_get_chan_by_ident(l, ident);
162 if (s)
163 bh_lock_sock(s);
164 read_unlock(&l->lock);
165 return s;
168 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
170 u16 cid = L2CAP_CID_DYN_START;
172 for (; cid < L2CAP_CID_DYN_END; cid++) {
173 if (!__l2cap_get_chan_by_scid(l, cid))
174 return cid;
177 return 0;
180 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
182 sock_hold(sk);
184 if (l->head)
185 l2cap_pi(l->head)->prev_c = sk;
187 l2cap_pi(sk)->next_c = l->head;
188 l2cap_pi(sk)->prev_c = NULL;
189 l->head = sk;
192 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
194 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
196 write_lock_bh(&l->lock);
197 if (sk == l->head)
198 l->head = next;
200 if (next)
201 l2cap_pi(next)->prev_c = prev;
202 if (prev)
203 l2cap_pi(prev)->next_c = next;
204 write_unlock_bh(&l->lock);
206 __sock_put(sk);
209 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
211 struct l2cap_chan_list *l = &conn->chan_list;
213 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
214 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
216 conn->disc_reason = 0x13;
218 l2cap_pi(sk)->conn = conn;
220 if (sk->sk_type == SOCK_SEQPACKET) {
221 /* Alloc CID for connection-oriented socket */
222 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
223 } else if (sk->sk_type == SOCK_DGRAM) {
224 /* Connectionless socket */
225 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
227 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
228 } else {
229 /* Raw socket can send/recv signalling messages only */
230 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
235 __l2cap_chan_link(l, sk);
237 if (parent)
238 bt_accept_enqueue(parent, sk);
241 /* Delete channel.
242 * Must be called on the locked socket. */
243 static void l2cap_chan_del(struct sock *sk, int err)
245 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
246 struct sock *parent = bt_sk(sk)->parent;
248 l2cap_sock_clear_timer(sk);
250 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
252 if (conn) {
253 /* Unlink from channel list */
254 l2cap_chan_unlink(&conn->chan_list, sk);
255 l2cap_pi(sk)->conn = NULL;
256 hci_conn_put(conn->hcon);
259 sk->sk_state = BT_CLOSED;
260 sock_set_flag(sk, SOCK_ZAPPED);
262 if (err)
263 sk->sk_err = err;
265 if (parent) {
266 bt_accept_unlink(sk);
267 parent->sk_data_ready(parent, 0);
268 } else
269 sk->sk_state_change(sk);
272 /* Service level security */
273 static inline int l2cap_check_security(struct sock *sk)
275 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
276 __u8 auth_type;
278 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
279 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
280 auth_type = HCI_AT_NO_BONDING_MITM;
281 else
282 auth_type = HCI_AT_NO_BONDING;
284 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
285 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
286 } else {
287 switch (l2cap_pi(sk)->sec_level) {
288 case BT_SECURITY_HIGH:
289 auth_type = HCI_AT_GENERAL_BONDING_MITM;
290 break;
291 case BT_SECURITY_MEDIUM:
292 auth_type = HCI_AT_GENERAL_BONDING;
293 break;
294 default:
295 auth_type = HCI_AT_NO_BONDING;
296 break;
300 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
301 auth_type);
304 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
306 u8 id;
308 /* Get next available identificator.
309 * 1 - 128 are used by kernel.
310 * 129 - 199 are reserved.
311 * 200 - 254 are used by utilities like l2ping, etc.
314 spin_lock_bh(&conn->lock);
316 if (++conn->tx_ident > 128)
317 conn->tx_ident = 1;
319 id = conn->tx_ident;
321 spin_unlock_bh(&conn->lock);
323 return id;
326 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
328 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
330 BT_DBG("code 0x%2.2x", code);
332 if (!skb)
333 return -ENOMEM;
335 return hci_send_acl(conn->hcon, skb, 0);
338 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
340 struct sk_buff *skb;
341 struct l2cap_hdr *lh;
342 struct l2cap_conn *conn = pi->conn;
343 int count, hlen = L2CAP_HDR_SIZE + 2;
345 if (pi->fcs == L2CAP_FCS_CRC16)
346 hlen += 2;
348 BT_DBG("pi %p, control 0x%2.2x", pi, control);
350 count = min_t(unsigned int, conn->mtu, hlen);
351 control |= L2CAP_CTRL_FRAME_TYPE;
353 skb = bt_skb_alloc(count, GFP_ATOMIC);
354 if (!skb)
355 return -ENOMEM;
357 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
358 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
359 lh->cid = cpu_to_le16(pi->dcid);
360 put_unaligned_le16(control, skb_put(skb, 2));
362 if (pi->fcs == L2CAP_FCS_CRC16) {
363 u16 fcs = crc16(0, (u8 *)lh, count - 2);
364 put_unaligned_le16(fcs, skb_put(skb, 2));
367 return hci_send_acl(pi->conn->hcon, skb, 0);
370 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
372 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
373 control |= L2CAP_SUPER_RCV_NOT_READY;
374 else
375 control |= L2CAP_SUPER_RCV_READY;
377 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
379 return l2cap_send_sframe(pi, control);
382 static void l2cap_do_start(struct sock *sk)
384 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
386 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
387 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
388 return;
390 if (l2cap_check_security(sk)) {
391 struct l2cap_conn_req req;
392 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
393 req.psm = l2cap_pi(sk)->psm;
395 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
397 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
398 L2CAP_CONN_REQ, sizeof(req), &req);
400 } else {
401 struct l2cap_info_req req;
402 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
404 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
405 conn->info_ident = l2cap_get_ident(conn);
407 mod_timer(&conn->info_timer, jiffies +
408 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
410 l2cap_send_cmd(conn, conn->info_ident,
411 L2CAP_INFO_REQ, sizeof(req), &req);
415 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
417 struct l2cap_disconn_req req;
419 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
420 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
421 l2cap_send_cmd(conn, l2cap_get_ident(conn),
422 L2CAP_DISCONN_REQ, sizeof(req), &req);
425 /* ---- L2CAP connections ---- */
426 static void l2cap_conn_start(struct l2cap_conn *conn)
428 struct l2cap_chan_list *l = &conn->chan_list;
429 struct sock *sk;
431 BT_DBG("conn %p", conn);
433 read_lock(&l->lock);
435 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
436 bh_lock_sock(sk);
438 if (sk->sk_type != SOCK_SEQPACKET) {
439 bh_unlock_sock(sk);
440 continue;
443 if (sk->sk_state == BT_CONNECT) {
444 if (l2cap_check_security(sk)) {
445 struct l2cap_conn_req req;
446 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
447 req.psm = l2cap_pi(sk)->psm;
449 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
451 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
452 L2CAP_CONN_REQ, sizeof(req), &req);
454 } else if (sk->sk_state == BT_CONNECT2) {
455 struct l2cap_conn_rsp rsp;
456 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
457 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
459 if (l2cap_check_security(sk)) {
460 if (bt_sk(sk)->defer_setup) {
461 struct sock *parent = bt_sk(sk)->parent;
462 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
463 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
464 parent->sk_data_ready(parent, 0);
466 } else {
467 sk->sk_state = BT_CONFIG;
468 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
469 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
471 } else {
472 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
473 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
476 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
477 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
480 bh_unlock_sock(sk);
483 read_unlock(&l->lock);
486 static void l2cap_conn_ready(struct l2cap_conn *conn)
488 struct l2cap_chan_list *l = &conn->chan_list;
489 struct sock *sk;
491 BT_DBG("conn %p", conn);
493 read_lock(&l->lock);
495 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
496 bh_lock_sock(sk);
498 if (sk->sk_type != SOCK_SEQPACKET) {
499 l2cap_sock_clear_timer(sk);
500 sk->sk_state = BT_CONNECTED;
501 sk->sk_state_change(sk);
502 } else if (sk->sk_state == BT_CONNECT)
503 l2cap_do_start(sk);
505 bh_unlock_sock(sk);
508 read_unlock(&l->lock);
511 /* Notify sockets that we cannot guaranty reliability anymore */
512 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
514 struct l2cap_chan_list *l = &conn->chan_list;
515 struct sock *sk;
517 BT_DBG("conn %p", conn);
519 read_lock(&l->lock);
521 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
522 if (l2cap_pi(sk)->force_reliable)
523 sk->sk_err = err;
526 read_unlock(&l->lock);
529 static void l2cap_info_timeout(unsigned long arg)
531 struct l2cap_conn *conn = (void *) arg;
533 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
534 conn->info_ident = 0;
536 l2cap_conn_start(conn);
539 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
541 struct l2cap_conn *conn = hcon->l2cap_data;
543 if (conn || status)
544 return conn;
546 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
547 if (!conn)
548 return NULL;
550 hcon->l2cap_data = conn;
551 conn->hcon = hcon;
553 BT_DBG("hcon %p conn %p", hcon, conn);
555 conn->mtu = hcon->hdev->acl_mtu;
556 conn->src = &hcon->hdev->bdaddr;
557 conn->dst = &hcon->dst;
559 conn->feat_mask = 0;
561 spin_lock_init(&conn->lock);
562 rwlock_init(&conn->chan_list.lock);
564 setup_timer(&conn->info_timer, l2cap_info_timeout,
565 (unsigned long) conn);
567 conn->disc_reason = 0x13;
569 return conn;
572 static void l2cap_conn_del(struct hci_conn *hcon, int err)
574 struct l2cap_conn *conn = hcon->l2cap_data;
575 struct sock *sk;
577 if (!conn)
578 return;
580 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
582 kfree_skb(conn->rx_skb);
584 /* Kill channels */
585 while ((sk = conn->chan_list.head)) {
586 bh_lock_sock(sk);
587 l2cap_chan_del(sk, err);
588 bh_unlock_sock(sk);
589 l2cap_sock_kill(sk);
592 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
593 del_timer_sync(&conn->info_timer);
595 hcon->l2cap_data = NULL;
596 kfree(conn);
599 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
601 struct l2cap_chan_list *l = &conn->chan_list;
602 write_lock_bh(&l->lock);
603 __l2cap_chan_add(conn, sk, parent);
604 write_unlock_bh(&l->lock);
607 /* ---- Socket interface ---- */
608 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
610 struct sock *sk;
611 struct hlist_node *node;
612 sk_for_each(sk, node, &l2cap_sk_list.head)
613 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
614 goto found;
615 sk = NULL;
616 found:
617 return sk;
620 /* Find socket with psm and source bdaddr.
621 * Returns closest match.
623 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
625 struct sock *sk = NULL, *sk1 = NULL;
626 struct hlist_node *node;
628 sk_for_each(sk, node, &l2cap_sk_list.head) {
629 if (state && sk->sk_state != state)
630 continue;
632 if (l2cap_pi(sk)->psm == psm) {
633 /* Exact match. */
634 if (!bacmp(&bt_sk(sk)->src, src))
635 break;
637 /* Closest match */
638 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
639 sk1 = sk;
642 return node ? sk : sk1;
645 /* Find socket with given address (psm, src).
646 * Returns locked socket */
647 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
649 struct sock *s;
650 read_lock(&l2cap_sk_list.lock);
651 s = __l2cap_get_sock_by_psm(state, psm, src);
652 if (s)
653 bh_lock_sock(s);
654 read_unlock(&l2cap_sk_list.lock);
655 return s;
658 static void l2cap_sock_destruct(struct sock *sk)
660 BT_DBG("sk %p", sk);
662 skb_queue_purge(&sk->sk_receive_queue);
663 skb_queue_purge(&sk->sk_write_queue);
666 static void l2cap_sock_cleanup_listen(struct sock *parent)
668 struct sock *sk;
670 BT_DBG("parent %p", parent);
672 /* Close not yet accepted channels */
673 while ((sk = bt_accept_dequeue(parent, NULL)))
674 l2cap_sock_close(sk);
676 parent->sk_state = BT_CLOSED;
677 sock_set_flag(parent, SOCK_ZAPPED);
680 /* Kill socket (only if zapped and orphan)
681 * Must be called on unlocked socket.
683 static void l2cap_sock_kill(struct sock *sk)
685 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
686 return;
688 BT_DBG("sk %p state %d", sk, sk->sk_state);
690 /* Kill poor orphan */
691 bt_sock_unlink(&l2cap_sk_list, sk);
692 sock_set_flag(sk, SOCK_DEAD);
693 sock_put(sk);
696 static void __l2cap_sock_close(struct sock *sk, int reason)
698 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
700 switch (sk->sk_state) {
701 case BT_LISTEN:
702 l2cap_sock_cleanup_listen(sk);
703 break;
705 case BT_CONNECTED:
706 case BT_CONFIG:
707 if (sk->sk_type == SOCK_SEQPACKET) {
708 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
710 sk->sk_state = BT_DISCONN;
711 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
712 l2cap_send_disconn_req(conn, sk);
713 } else
714 l2cap_chan_del(sk, reason);
715 break;
717 case BT_CONNECT2:
718 if (sk->sk_type == SOCK_SEQPACKET) {
719 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
720 struct l2cap_conn_rsp rsp;
721 __u16 result;
723 if (bt_sk(sk)->defer_setup)
724 result = L2CAP_CR_SEC_BLOCK;
725 else
726 result = L2CAP_CR_BAD_PSM;
728 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
729 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
730 rsp.result = cpu_to_le16(result);
731 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
732 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
733 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
734 } else
735 l2cap_chan_del(sk, reason);
736 break;
738 case BT_CONNECT:
739 case BT_DISCONN:
740 l2cap_chan_del(sk, reason);
741 break;
743 default:
744 sock_set_flag(sk, SOCK_ZAPPED);
745 break;
749 /* Must be called on unlocked socket. */
750 static void l2cap_sock_close(struct sock *sk)
752 l2cap_sock_clear_timer(sk);
753 lock_sock(sk);
754 __l2cap_sock_close(sk, ECONNRESET);
755 release_sock(sk);
756 l2cap_sock_kill(sk);
759 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
761 struct l2cap_pinfo *pi = l2cap_pi(sk);
763 BT_DBG("sk %p", sk);
765 if (parent) {
766 sk->sk_type = parent->sk_type;
767 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
769 pi->imtu = l2cap_pi(parent)->imtu;
770 pi->omtu = l2cap_pi(parent)->omtu;
771 pi->mode = l2cap_pi(parent)->mode;
772 pi->fcs = l2cap_pi(parent)->fcs;
773 pi->sec_level = l2cap_pi(parent)->sec_level;
774 pi->role_switch = l2cap_pi(parent)->role_switch;
775 pi->force_reliable = l2cap_pi(parent)->force_reliable;
776 } else {
777 pi->imtu = L2CAP_DEFAULT_MTU;
778 pi->omtu = 0;
779 pi->mode = L2CAP_MODE_BASIC;
780 pi->fcs = L2CAP_FCS_CRC16;
781 pi->sec_level = BT_SECURITY_LOW;
782 pi->role_switch = 0;
783 pi->force_reliable = 0;
786 /* Default config options */
787 pi->conf_len = 0;
788 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
789 skb_queue_head_init(TX_QUEUE(sk));
790 skb_queue_head_init(SREJ_QUEUE(sk));
791 INIT_LIST_HEAD(SREJ_LIST(sk));
794 static struct proto l2cap_proto = {
795 .name = "L2CAP",
796 .owner = THIS_MODULE,
797 .obj_size = sizeof(struct l2cap_pinfo)
800 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
802 struct sock *sk;
804 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
805 if (!sk)
806 return NULL;
808 sock_init_data(sock, sk);
809 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
811 sk->sk_destruct = l2cap_sock_destruct;
812 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
814 sock_reset_flag(sk, SOCK_ZAPPED);
816 sk->sk_protocol = proto;
817 sk->sk_state = BT_OPEN;
819 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
821 bt_sock_link(&l2cap_sk_list, sk);
822 return sk;
825 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
826 int kern)
828 struct sock *sk;
830 BT_DBG("sock %p", sock);
832 sock->state = SS_UNCONNECTED;
834 if (sock->type != SOCK_SEQPACKET &&
835 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
836 return -ESOCKTNOSUPPORT;
838 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
839 return -EPERM;
841 sock->ops = &l2cap_sock_ops;
843 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
844 if (!sk)
845 return -ENOMEM;
847 l2cap_sock_init(sk, NULL);
848 return 0;
851 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
853 struct sock *sk = sock->sk;
854 struct sockaddr_l2 la;
855 int len, err = 0;
857 BT_DBG("sk %p", sk);
859 if (!addr || addr->sa_family != AF_BLUETOOTH)
860 return -EINVAL;
862 memset(&la, 0, sizeof(la));
863 len = min_t(unsigned int, sizeof(la), alen);
864 memcpy(&la, addr, len);
866 if (la.l2_cid)
867 return -EINVAL;
869 lock_sock(sk);
871 if (sk->sk_state != BT_OPEN) {
872 err = -EBADFD;
873 goto done;
876 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
877 !capable(CAP_NET_BIND_SERVICE)) {
878 err = -EACCES;
879 goto done;
882 write_lock_bh(&l2cap_sk_list.lock);
884 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
885 err = -EADDRINUSE;
886 } else {
887 /* Save source address */
888 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
889 l2cap_pi(sk)->psm = la.l2_psm;
890 l2cap_pi(sk)->sport = la.l2_psm;
891 sk->sk_state = BT_BOUND;
893 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
894 __le16_to_cpu(la.l2_psm) == 0x0003)
895 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
898 write_unlock_bh(&l2cap_sk_list.lock);
900 done:
901 release_sock(sk);
902 return err;
905 static int l2cap_do_connect(struct sock *sk)
907 bdaddr_t *src = &bt_sk(sk)->src;
908 bdaddr_t *dst = &bt_sk(sk)->dst;
909 struct l2cap_conn *conn;
910 struct hci_conn *hcon;
911 struct hci_dev *hdev;
912 __u8 auth_type;
913 int err;
915 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
916 l2cap_pi(sk)->psm);
918 hdev = hci_get_route(dst, src);
919 if (!hdev)
920 return -EHOSTUNREACH;
922 hci_dev_lock_bh(hdev);
924 err = -ENOMEM;
926 if (sk->sk_type == SOCK_RAW) {
927 switch (l2cap_pi(sk)->sec_level) {
928 case BT_SECURITY_HIGH:
929 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
930 break;
931 case BT_SECURITY_MEDIUM:
932 auth_type = HCI_AT_DEDICATED_BONDING;
933 break;
934 default:
935 auth_type = HCI_AT_NO_BONDING;
936 break;
938 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
939 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
940 auth_type = HCI_AT_NO_BONDING_MITM;
941 else
942 auth_type = HCI_AT_NO_BONDING;
944 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
945 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
946 } else {
947 switch (l2cap_pi(sk)->sec_level) {
948 case BT_SECURITY_HIGH:
949 auth_type = HCI_AT_GENERAL_BONDING_MITM;
950 break;
951 case BT_SECURITY_MEDIUM:
952 auth_type = HCI_AT_GENERAL_BONDING;
953 break;
954 default:
955 auth_type = HCI_AT_NO_BONDING;
956 break;
960 hcon = hci_connect(hdev, ACL_LINK, dst,
961 l2cap_pi(sk)->sec_level, auth_type);
962 if (!hcon)
963 goto done;
965 conn = l2cap_conn_add(hcon, 0);
966 if (!conn) {
967 hci_conn_put(hcon);
968 goto done;
971 err = 0;
973 /* Update source addr of the socket */
974 bacpy(src, conn->src);
976 l2cap_chan_add(conn, sk, NULL);
978 sk->sk_state = BT_CONNECT;
979 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
981 if (hcon->state == BT_CONNECTED) {
982 if (sk->sk_type != SOCK_SEQPACKET) {
983 l2cap_sock_clear_timer(sk);
984 sk->sk_state = BT_CONNECTED;
985 } else
986 l2cap_do_start(sk);
989 done:
990 hci_dev_unlock_bh(hdev);
991 hci_dev_put(hdev);
992 return err;
995 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
997 struct sock *sk = sock->sk;
998 struct sockaddr_l2 la;
999 int len, err = 0;
1001 BT_DBG("sk %p", sk);
1003 if (!addr || addr->sa_family != AF_BLUETOOTH)
1004 return -EINVAL;
1006 memset(&la, 0, sizeof(la));
1007 len = min_t(unsigned int, sizeof(la), alen);
1008 memcpy(&la, addr, len);
1010 if (la.l2_cid)
1011 return -EINVAL;
1013 lock_sock(sk);
1015 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1016 err = -EINVAL;
1017 goto done;
1020 switch (l2cap_pi(sk)->mode) {
1021 case L2CAP_MODE_BASIC:
1022 break;
1023 case L2CAP_MODE_ERTM:
1024 case L2CAP_MODE_STREAMING:
1025 if (enable_ertm)
1026 break;
1027 /* fall through */
1028 default:
1029 err = -ENOTSUPP;
1030 goto done;
1033 switch (sk->sk_state) {
1034 case BT_CONNECT:
1035 case BT_CONNECT2:
1036 case BT_CONFIG:
1037 /* Already connecting */
1038 goto wait;
1040 case BT_CONNECTED:
1041 /* Already connected */
1042 goto done;
1044 case BT_OPEN:
1045 case BT_BOUND:
1046 /* Can connect */
1047 break;
1049 default:
1050 err = -EBADFD;
1051 goto done;
1054 /* Set destination address and psm */
1055 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1056 l2cap_pi(sk)->psm = la.l2_psm;
1058 err = l2cap_do_connect(sk);
1059 if (err)
1060 goto done;
1062 wait:
1063 err = bt_sock_wait_state(sk, BT_CONNECTED,
1064 sock_sndtimeo(sk, flags & O_NONBLOCK));
1065 done:
1066 release_sock(sk);
1067 return err;
1070 static int l2cap_sock_listen(struct socket *sock, int backlog)
1072 struct sock *sk = sock->sk;
1073 int err = 0;
1075 BT_DBG("sk %p backlog %d", sk, backlog);
1077 lock_sock(sk);
1079 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1080 err = -EBADFD;
1081 goto done;
1084 switch (l2cap_pi(sk)->mode) {
1085 case L2CAP_MODE_BASIC:
1086 break;
1087 case L2CAP_MODE_ERTM:
1088 case L2CAP_MODE_STREAMING:
1089 if (enable_ertm)
1090 break;
1091 /* fall through */
1092 default:
1093 err = -ENOTSUPP;
1094 goto done;
1097 if (!l2cap_pi(sk)->psm) {
1098 bdaddr_t *src = &bt_sk(sk)->src;
1099 u16 psm;
1101 err = -EINVAL;
1103 write_lock_bh(&l2cap_sk_list.lock);
1105 for (psm = 0x1001; psm < 0x1100; psm += 2)
1106 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1107 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1108 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1109 err = 0;
1110 break;
1113 write_unlock_bh(&l2cap_sk_list.lock);
1115 if (err < 0)
1116 goto done;
1119 sk->sk_max_ack_backlog = backlog;
1120 sk->sk_ack_backlog = 0;
1121 sk->sk_state = BT_LISTEN;
1123 done:
1124 release_sock(sk);
1125 return err;
1128 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1130 DECLARE_WAITQUEUE(wait, current);
1131 struct sock *sk = sock->sk, *nsk;
1132 long timeo;
1133 int err = 0;
1135 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1137 if (sk->sk_state != BT_LISTEN) {
1138 err = -EBADFD;
1139 goto done;
1142 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1144 BT_DBG("sk %p timeo %ld", sk, timeo);
1146 /* Wait for an incoming connection. (wake-one). */
1147 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1148 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1149 set_current_state(TASK_INTERRUPTIBLE);
1150 if (!timeo) {
1151 err = -EAGAIN;
1152 break;
1155 release_sock(sk);
1156 timeo = schedule_timeout(timeo);
1157 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1159 if (sk->sk_state != BT_LISTEN) {
1160 err = -EBADFD;
1161 break;
1164 if (signal_pending(current)) {
1165 err = sock_intr_errno(timeo);
1166 break;
1169 set_current_state(TASK_RUNNING);
1170 remove_wait_queue(sk->sk_sleep, &wait);
1172 if (err)
1173 goto done;
1175 newsock->state = SS_CONNECTED;
1177 BT_DBG("new socket %p", nsk);
1179 done:
1180 release_sock(sk);
1181 return err;
1184 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1186 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1187 struct sock *sk = sock->sk;
1189 BT_DBG("sock %p, sk %p", sock, sk);
1191 addr->sa_family = AF_BLUETOOTH;
1192 *len = sizeof(struct sockaddr_l2);
1194 if (peer) {
1195 la->l2_psm = l2cap_pi(sk)->psm;
1196 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1197 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1198 } else {
1199 la->l2_psm = l2cap_pi(sk)->sport;
1200 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1201 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1204 return 0;
1207 static void l2cap_monitor_timeout(unsigned long arg)
1209 struct sock *sk = (void *) arg;
1210 u16 control;
1212 bh_lock_sock(sk);
1213 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1214 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1215 bh_unlock_sock(sk);
1216 return;
1219 l2cap_pi(sk)->retry_count++;
1220 __mod_monitor_timer();
1222 control = L2CAP_CTRL_POLL;
1223 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1224 bh_unlock_sock(sk);
1227 static void l2cap_retrans_timeout(unsigned long arg)
1229 struct sock *sk = (void *) arg;
1230 u16 control;
1232 bh_lock_sock(sk);
1233 l2cap_pi(sk)->retry_count = 1;
1234 __mod_monitor_timer();
1236 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1238 control = L2CAP_CTRL_POLL;
1239 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1240 bh_unlock_sock(sk);
1243 static void l2cap_drop_acked_frames(struct sock *sk)
1245 struct sk_buff *skb;
1247 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1248 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1249 break;
1251 skb = skb_dequeue(TX_QUEUE(sk));
1252 kfree_skb(skb);
1254 l2cap_pi(sk)->unacked_frames--;
1257 if (!l2cap_pi(sk)->unacked_frames)
1258 del_timer(&l2cap_pi(sk)->retrans_timer);
1260 return;
1263 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1265 struct l2cap_pinfo *pi = l2cap_pi(sk);
1266 int err;
1268 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1270 err = hci_send_acl(pi->conn->hcon, skb, 0);
1271 if (err < 0)
1272 kfree_skb(skb);
1274 return err;
1277 static int l2cap_streaming_send(struct sock *sk)
1279 struct sk_buff *skb, *tx_skb;
1280 struct l2cap_pinfo *pi = l2cap_pi(sk);
1281 u16 control, fcs;
1282 int err;
1284 while ((skb = sk->sk_send_head)) {
1285 tx_skb = skb_clone(skb, GFP_ATOMIC);
1287 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1288 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1289 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1291 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1292 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1293 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1296 err = l2cap_do_send(sk, tx_skb);
1297 if (err < 0) {
1298 l2cap_send_disconn_req(pi->conn, sk);
1299 return err;
1302 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1304 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1305 sk->sk_send_head = NULL;
1306 else
1307 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1309 skb = skb_dequeue(TX_QUEUE(sk));
1310 kfree_skb(skb);
1312 return 0;
1315 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1317 struct l2cap_pinfo *pi = l2cap_pi(sk);
1318 struct sk_buff *skb, *tx_skb;
1319 u16 control, fcs;
1320 int err;
1322 skb = skb_peek(TX_QUEUE(sk));
1323 do {
1324 if (bt_cb(skb)->tx_seq != tx_seq) {
1325 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1326 break;
1327 skb = skb_queue_next(TX_QUEUE(sk), skb);
1328 continue;
1331 if (pi->remote_max_tx &&
1332 bt_cb(skb)->retries == pi->remote_max_tx) {
1333 l2cap_send_disconn_req(pi->conn, sk);
1334 break;
1337 tx_skb = skb_clone(skb, GFP_ATOMIC);
1338 bt_cb(skb)->retries++;
1339 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1340 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1341 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1342 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1344 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1345 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1346 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1349 err = l2cap_do_send(sk, tx_skb);
1350 if (err < 0) {
1351 l2cap_send_disconn_req(pi->conn, sk);
1352 return err;
1354 break;
1355 } while(1);
1356 return 0;
1359 static int l2cap_ertm_send(struct sock *sk)
1361 struct sk_buff *skb, *tx_skb;
1362 struct l2cap_pinfo *pi = l2cap_pi(sk);
1363 u16 control, fcs;
1364 int err;
1366 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1367 return 0;
1369 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1370 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1372 if (pi->remote_max_tx &&
1373 bt_cb(skb)->retries == pi->remote_max_tx) {
1374 l2cap_send_disconn_req(pi->conn, sk);
1375 break;
1378 tx_skb = skb_clone(skb, GFP_ATOMIC);
1380 bt_cb(skb)->retries++;
1382 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1383 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1384 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1385 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1388 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1389 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1390 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1393 err = l2cap_do_send(sk, tx_skb);
1394 if (err < 0) {
1395 l2cap_send_disconn_req(pi->conn, sk);
1396 return err;
1398 __mod_retrans_timer();
1400 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1401 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1403 pi->unacked_frames++;
1405 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1406 sk->sk_send_head = NULL;
1407 else
1408 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1411 return 0;
1414 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1416 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1417 struct sk_buff **frag;
1418 int err, sent = 0;
1420 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1421 return -EFAULT;
1424 sent += count;
1425 len -= count;
1427 /* Continuation fragments (no L2CAP header) */
1428 frag = &skb_shinfo(skb)->frag_list;
1429 while (len) {
1430 count = min_t(unsigned int, conn->mtu, len);
1432 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1433 if (!*frag)
1434 return -EFAULT;
1435 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1436 return -EFAULT;
1438 sent += count;
1439 len -= count;
1441 frag = &(*frag)->next;
1444 return sent;
1447 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1449 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1450 struct sk_buff *skb;
1451 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1452 struct l2cap_hdr *lh;
1454 BT_DBG("sk %p len %d", sk, (int)len);
1456 count = min_t(unsigned int, (conn->mtu - hlen), len);
1457 skb = bt_skb_send_alloc(sk, count + hlen,
1458 msg->msg_flags & MSG_DONTWAIT, &err);
1459 if (!skb)
1460 return ERR_PTR(-ENOMEM);
1462 /* Create L2CAP header */
1463 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1464 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1465 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1466 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1468 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1469 if (unlikely(err < 0)) {
1470 kfree_skb(skb);
1471 return ERR_PTR(err);
1473 return skb;
1476 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1478 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1479 struct sk_buff *skb;
1480 int err, count, hlen = L2CAP_HDR_SIZE;
1481 struct l2cap_hdr *lh;
1483 BT_DBG("sk %p len %d", sk, (int)len);
1485 count = min_t(unsigned int, (conn->mtu - hlen), len);
1486 skb = bt_skb_send_alloc(sk, count + hlen,
1487 msg->msg_flags & MSG_DONTWAIT, &err);
1488 if (!skb)
1489 return ERR_PTR(-ENOMEM);
1491 /* Create L2CAP header */
1492 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1493 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1494 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1496 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1497 if (unlikely(err < 0)) {
1498 kfree_skb(skb);
1499 return ERR_PTR(err);
1501 return skb;
1504 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1506 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1507 struct sk_buff *skb;
1508 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1509 struct l2cap_hdr *lh;
1511 BT_DBG("sk %p len %d", sk, (int)len);
1513 if (sdulen)
1514 hlen += 2;
1516 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1517 hlen += 2;
1519 count = min_t(unsigned int, (conn->mtu - hlen), len);
1520 skb = bt_skb_send_alloc(sk, count + hlen,
1521 msg->msg_flags & MSG_DONTWAIT, &err);
1522 if (!skb)
1523 return ERR_PTR(-ENOMEM);
1525 /* Create L2CAP header */
1526 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1527 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1528 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1529 put_unaligned_le16(control, skb_put(skb, 2));
1530 if (sdulen)
1531 put_unaligned_le16(sdulen, skb_put(skb, 2));
1533 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1534 if (unlikely(err < 0)) {
1535 kfree_skb(skb);
1536 return ERR_PTR(err);
1539 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1540 put_unaligned_le16(0, skb_put(skb, 2));
1542 bt_cb(skb)->retries = 0;
1543 return skb;
1546 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1548 struct l2cap_pinfo *pi = l2cap_pi(sk);
1549 struct sk_buff *skb;
1550 struct sk_buff_head sar_queue;
1551 u16 control;
1552 size_t size = 0;
1554 __skb_queue_head_init(&sar_queue);
1555 control = L2CAP_SDU_START;
1556 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1557 if (IS_ERR(skb))
1558 return PTR_ERR(skb);
1560 __skb_queue_tail(&sar_queue, skb);
1561 len -= pi->max_pdu_size;
1562 size +=pi->max_pdu_size;
1563 control = 0;
1565 while (len > 0) {
1566 size_t buflen;
1568 if (len > pi->max_pdu_size) {
1569 control |= L2CAP_SDU_CONTINUE;
1570 buflen = pi->max_pdu_size;
1571 } else {
1572 control |= L2CAP_SDU_END;
1573 buflen = len;
1576 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1577 if (IS_ERR(skb)) {
1578 skb_queue_purge(&sar_queue);
1579 return PTR_ERR(skb);
1582 __skb_queue_tail(&sar_queue, skb);
1583 len -= buflen;
1584 size += buflen;
1585 control = 0;
1587 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1588 if (sk->sk_send_head == NULL)
1589 sk->sk_send_head = sar_queue.next;
1591 return size;
1594 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1596 struct sock *sk = sock->sk;
1597 struct l2cap_pinfo *pi = l2cap_pi(sk);
1598 struct sk_buff *skb;
1599 u16 control;
1600 int err;
1602 BT_DBG("sock %p, sk %p", sock, sk);
1604 err = sock_error(sk);
1605 if (err)
1606 return err;
1608 if (msg->msg_flags & MSG_OOB)
1609 return -EOPNOTSUPP;
1611 /* Check outgoing MTU */
1612 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
1613 len > pi->omtu)
1614 return -EINVAL;
1616 lock_sock(sk);
1618 if (sk->sk_state != BT_CONNECTED) {
1619 err = -ENOTCONN;
1620 goto done;
1623 /* Connectionless channel */
1624 if (sk->sk_type == SOCK_DGRAM) {
1625 skb = l2cap_create_connless_pdu(sk, msg, len);
1626 err = l2cap_do_send(sk, skb);
1627 goto done;
1630 switch (pi->mode) {
1631 case L2CAP_MODE_BASIC:
1632 /* Create a basic PDU */
1633 skb = l2cap_create_basic_pdu(sk, msg, len);
1634 if (IS_ERR(skb)) {
1635 err = PTR_ERR(skb);
1636 goto done;
1639 err = l2cap_do_send(sk, skb);
1640 if (!err)
1641 err = len;
1642 break;
1644 case L2CAP_MODE_ERTM:
1645 case L2CAP_MODE_STREAMING:
1646 /* Entire SDU fits into one PDU */
1647 if (len <= pi->max_pdu_size) {
1648 control = L2CAP_SDU_UNSEGMENTED;
1649 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1650 if (IS_ERR(skb)) {
1651 err = PTR_ERR(skb);
1652 goto done;
1654 __skb_queue_tail(TX_QUEUE(sk), skb);
1655 if (sk->sk_send_head == NULL)
1656 sk->sk_send_head = skb;
1657 } else {
1658 /* Segment SDU into multiples PDUs */
1659 err = l2cap_sar_segment_sdu(sk, msg, len);
1660 if (err < 0)
1661 goto done;
1664 if (pi->mode == L2CAP_MODE_STREAMING)
1665 err = l2cap_streaming_send(sk);
1666 else
1667 err = l2cap_ertm_send(sk);
1669 if (!err)
1670 err = len;
1671 break;
1673 default:
1674 BT_DBG("bad state %1.1x", pi->mode);
1675 err = -EINVAL;
1678 done:
1679 release_sock(sk);
1680 return err;
1683 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1685 struct sock *sk = sock->sk;
1687 lock_sock(sk);
1689 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1690 struct l2cap_conn_rsp rsp;
1692 sk->sk_state = BT_CONFIG;
1694 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1695 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1696 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1697 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1698 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1699 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1701 release_sock(sk);
1702 return 0;
1705 release_sock(sk);
1707 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1710 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1712 struct sock *sk = sock->sk;
1713 struct l2cap_options opts;
1714 int len, err = 0;
1715 u32 opt;
1717 BT_DBG("sk %p", sk);
1719 lock_sock(sk);
1721 switch (optname) {
1722 case L2CAP_OPTIONS:
1723 opts.imtu = l2cap_pi(sk)->imtu;
1724 opts.omtu = l2cap_pi(sk)->omtu;
1725 opts.flush_to = l2cap_pi(sk)->flush_to;
1726 opts.mode = l2cap_pi(sk)->mode;
1727 opts.fcs = l2cap_pi(sk)->fcs;
1729 len = min_t(unsigned int, sizeof(opts), optlen);
1730 if (copy_from_user((char *) &opts, optval, len)) {
1731 err = -EFAULT;
1732 break;
1735 l2cap_pi(sk)->imtu = opts.imtu;
1736 l2cap_pi(sk)->omtu = opts.omtu;
1737 l2cap_pi(sk)->mode = opts.mode;
1738 l2cap_pi(sk)->fcs = opts.fcs;
1739 break;
1741 case L2CAP_LM:
1742 if (get_user(opt, (u32 __user *) optval)) {
1743 err = -EFAULT;
1744 break;
1747 if (opt & L2CAP_LM_AUTH)
1748 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1749 if (opt & L2CAP_LM_ENCRYPT)
1750 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1751 if (opt & L2CAP_LM_SECURE)
1752 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1754 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1755 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1756 break;
1758 default:
1759 err = -ENOPROTOOPT;
1760 break;
1763 release_sock(sk);
1764 return err;
1767 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1769 struct sock *sk = sock->sk;
1770 struct bt_security sec;
1771 int len, err = 0;
1772 u32 opt;
1774 BT_DBG("sk %p", sk);
1776 if (level == SOL_L2CAP)
1777 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1779 if (level != SOL_BLUETOOTH)
1780 return -ENOPROTOOPT;
1782 lock_sock(sk);
1784 switch (optname) {
1785 case BT_SECURITY:
1786 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1787 err = -EINVAL;
1788 break;
1791 sec.level = BT_SECURITY_LOW;
1793 len = min_t(unsigned int, sizeof(sec), optlen);
1794 if (copy_from_user((char *) &sec, optval, len)) {
1795 err = -EFAULT;
1796 break;
1799 if (sec.level < BT_SECURITY_LOW ||
1800 sec.level > BT_SECURITY_HIGH) {
1801 err = -EINVAL;
1802 break;
1805 l2cap_pi(sk)->sec_level = sec.level;
1806 break;
1808 case BT_DEFER_SETUP:
1809 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1810 err = -EINVAL;
1811 break;
1814 if (get_user(opt, (u32 __user *) optval)) {
1815 err = -EFAULT;
1816 break;
1819 bt_sk(sk)->defer_setup = opt;
1820 break;
1822 default:
1823 err = -ENOPROTOOPT;
1824 break;
1827 release_sock(sk);
1828 return err;
1831 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1833 struct sock *sk = sock->sk;
1834 struct l2cap_options opts;
1835 struct l2cap_conninfo cinfo;
1836 int len, err = 0;
1837 u32 opt;
1839 BT_DBG("sk %p", sk);
1841 if (get_user(len, optlen))
1842 return -EFAULT;
1844 lock_sock(sk);
1846 switch (optname) {
1847 case L2CAP_OPTIONS:
1848 opts.imtu = l2cap_pi(sk)->imtu;
1849 opts.omtu = l2cap_pi(sk)->omtu;
1850 opts.flush_to = l2cap_pi(sk)->flush_to;
1851 opts.mode = l2cap_pi(sk)->mode;
1852 opts.fcs = l2cap_pi(sk)->fcs;
1854 len = min_t(unsigned int, len, sizeof(opts));
1855 if (copy_to_user(optval, (char *) &opts, len))
1856 err = -EFAULT;
1858 break;
1860 case L2CAP_LM:
1861 switch (l2cap_pi(sk)->sec_level) {
1862 case BT_SECURITY_LOW:
1863 opt = L2CAP_LM_AUTH;
1864 break;
1865 case BT_SECURITY_MEDIUM:
1866 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1867 break;
1868 case BT_SECURITY_HIGH:
1869 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1870 L2CAP_LM_SECURE;
1871 break;
1872 default:
1873 opt = 0;
1874 break;
1877 if (l2cap_pi(sk)->role_switch)
1878 opt |= L2CAP_LM_MASTER;
1880 if (l2cap_pi(sk)->force_reliable)
1881 opt |= L2CAP_LM_RELIABLE;
1883 if (put_user(opt, (u32 __user *) optval))
1884 err = -EFAULT;
1885 break;
1887 case L2CAP_CONNINFO:
1888 if (sk->sk_state != BT_CONNECTED &&
1889 !(sk->sk_state == BT_CONNECT2 &&
1890 bt_sk(sk)->defer_setup)) {
1891 err = -ENOTCONN;
1892 break;
1895 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1896 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1898 len = min_t(unsigned int, len, sizeof(cinfo));
1899 if (copy_to_user(optval, (char *) &cinfo, len))
1900 err = -EFAULT;
1902 break;
1904 default:
1905 err = -ENOPROTOOPT;
1906 break;
1909 release_sock(sk);
1910 return err;
1913 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1915 struct sock *sk = sock->sk;
1916 struct bt_security sec;
1917 int len, err = 0;
1919 BT_DBG("sk %p", sk);
1921 if (level == SOL_L2CAP)
1922 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1924 if (level != SOL_BLUETOOTH)
1925 return -ENOPROTOOPT;
1927 if (get_user(len, optlen))
1928 return -EFAULT;
1930 lock_sock(sk);
1932 switch (optname) {
1933 case BT_SECURITY:
1934 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1935 err = -EINVAL;
1936 break;
1939 sec.level = l2cap_pi(sk)->sec_level;
1941 len = min_t(unsigned int, len, sizeof(sec));
1942 if (copy_to_user(optval, (char *) &sec, len))
1943 err = -EFAULT;
1945 break;
1947 case BT_DEFER_SETUP:
1948 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1949 err = -EINVAL;
1950 break;
1953 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1954 err = -EFAULT;
1956 break;
1958 default:
1959 err = -ENOPROTOOPT;
1960 break;
1963 release_sock(sk);
1964 return err;
1967 static int l2cap_sock_shutdown(struct socket *sock, int how)
1969 struct sock *sk = sock->sk;
1970 int err = 0;
1972 BT_DBG("sock %p, sk %p", sock, sk);
1974 if (!sk)
1975 return 0;
1977 lock_sock(sk);
1978 if (!sk->sk_shutdown) {
1979 sk->sk_shutdown = SHUTDOWN_MASK;
1980 l2cap_sock_clear_timer(sk);
1981 __l2cap_sock_close(sk, 0);
1983 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1984 err = bt_sock_wait_state(sk, BT_CLOSED,
1985 sk->sk_lingertime);
1987 release_sock(sk);
1988 return err;
1991 static int l2cap_sock_release(struct socket *sock)
1993 struct sock *sk = sock->sk;
1994 int err;
1996 BT_DBG("sock %p, sk %p", sock, sk);
1998 if (!sk)
1999 return 0;
2001 err = l2cap_sock_shutdown(sock, 2);
2003 sock_orphan(sk);
2004 l2cap_sock_kill(sk);
2005 return err;
2008 static void l2cap_chan_ready(struct sock *sk)
2010 struct sock *parent = bt_sk(sk)->parent;
2012 BT_DBG("sk %p, parent %p", sk, parent);
2014 l2cap_pi(sk)->conf_state = 0;
2015 l2cap_sock_clear_timer(sk);
2017 if (!parent) {
2018 /* Outgoing channel.
2019 * Wake up socket sleeping on connect.
2021 sk->sk_state = BT_CONNECTED;
2022 sk->sk_state_change(sk);
2023 } else {
2024 /* Incoming channel.
2025 * Wake up socket sleeping on accept.
2027 parent->sk_data_ready(parent, 0);
2031 /* Copy frame to all raw sockets on that connection */
2032 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2034 struct l2cap_chan_list *l = &conn->chan_list;
2035 struct sk_buff *nskb;
2036 struct sock *sk;
2038 BT_DBG("conn %p", conn);
2040 read_lock(&l->lock);
2041 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2042 if (sk->sk_type != SOCK_RAW)
2043 continue;
2045 /* Don't send frame to the socket it came from */
2046 if (skb->sk == sk)
2047 continue;
2048 nskb = skb_clone(skb, GFP_ATOMIC);
2049 if (!nskb)
2050 continue;
2052 if (sock_queue_rcv_skb(sk, nskb))
2053 kfree_skb(nskb);
2055 read_unlock(&l->lock);
2058 /* ---- L2CAP signalling commands ---- */
2059 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2060 u8 code, u8 ident, u16 dlen, void *data)
2062 struct sk_buff *skb, **frag;
2063 struct l2cap_cmd_hdr *cmd;
2064 struct l2cap_hdr *lh;
2065 int len, count;
2067 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2068 conn, code, ident, dlen);
2070 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2071 count = min_t(unsigned int, conn->mtu, len);
2073 skb = bt_skb_alloc(count, GFP_ATOMIC);
2074 if (!skb)
2075 return NULL;
2077 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2078 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2079 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2081 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2082 cmd->code = code;
2083 cmd->ident = ident;
2084 cmd->len = cpu_to_le16(dlen);
2086 if (dlen) {
2087 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2088 memcpy(skb_put(skb, count), data, count);
2089 data += count;
2092 len -= skb->len;
2094 /* Continuation fragments (no L2CAP header) */
2095 frag = &skb_shinfo(skb)->frag_list;
2096 while (len) {
2097 count = min_t(unsigned int, conn->mtu, len);
2099 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2100 if (!*frag)
2101 goto fail;
2103 memcpy(skb_put(*frag, count), data, count);
2105 len -= count;
2106 data += count;
2108 frag = &(*frag)->next;
2111 return skb;
2113 fail:
2114 kfree_skb(skb);
2115 return NULL;
2118 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2120 struct l2cap_conf_opt *opt = *ptr;
2121 int len;
2123 len = L2CAP_CONF_OPT_SIZE + opt->len;
2124 *ptr += len;
2126 *type = opt->type;
2127 *olen = opt->len;
2129 switch (opt->len) {
2130 case 1:
2131 *val = *((u8 *) opt->val);
2132 break;
2134 case 2:
2135 *val = __le16_to_cpu(*((__le16 *) opt->val));
2136 break;
2138 case 4:
2139 *val = __le32_to_cpu(*((__le32 *) opt->val));
2140 break;
2142 default:
2143 *val = (unsigned long) opt->val;
2144 break;
2147 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2148 return len;
2151 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2153 struct l2cap_conf_opt *opt = *ptr;
2155 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2157 opt->type = type;
2158 opt->len = len;
2160 switch (len) {
2161 case 1:
2162 *((u8 *) opt->val) = val;
2163 break;
2165 case 2:
2166 *((__le16 *) opt->val) = cpu_to_le16(val);
2167 break;
2169 case 4:
2170 *((__le32 *) opt->val) = cpu_to_le32(val);
2171 break;
2173 default:
2174 memcpy(opt->val, (void *) val, len);
2175 break;
2178 *ptr += L2CAP_CONF_OPT_SIZE + len;
2181 static inline void l2cap_ertm_init(struct sock *sk)
2183 l2cap_pi(sk)->expected_ack_seq = 0;
2184 l2cap_pi(sk)->unacked_frames = 0;
2185 l2cap_pi(sk)->buffer_seq = 0;
2186 l2cap_pi(sk)->num_to_ack = 0;
2188 setup_timer(&l2cap_pi(sk)->retrans_timer,
2189 l2cap_retrans_timeout, (unsigned long) sk);
2190 setup_timer(&l2cap_pi(sk)->monitor_timer,
2191 l2cap_monitor_timeout, (unsigned long) sk);
2193 __skb_queue_head_init(SREJ_QUEUE(sk));
2196 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2198 u32 local_feat_mask = l2cap_feat_mask;
2199 if (enable_ertm)
2200 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2202 switch (mode) {
2203 case L2CAP_MODE_ERTM:
2204 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2205 case L2CAP_MODE_STREAMING:
2206 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2207 default:
2208 return 0x00;
2212 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2214 switch (mode) {
2215 case L2CAP_MODE_STREAMING:
2216 case L2CAP_MODE_ERTM:
2217 if (l2cap_mode_supported(mode, remote_feat_mask))
2218 return mode;
2219 /* fall through */
2220 default:
2221 return L2CAP_MODE_BASIC;
2225 static int l2cap_build_conf_req(struct sock *sk, void *data)
2227 struct l2cap_pinfo *pi = l2cap_pi(sk);
2228 struct l2cap_conf_req *req = data;
2229 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2230 void *ptr = req->data;
2232 BT_DBG("sk %p", sk);
2234 if (pi->num_conf_req || pi->num_conf_rsp)
2235 goto done;
2237 switch (pi->mode) {
2238 case L2CAP_MODE_STREAMING:
2239 case L2CAP_MODE_ERTM:
2240 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2241 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2242 l2cap_send_disconn_req(pi->conn, sk);
2243 break;
2244 default:
2245 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2246 break;
2249 done:
2250 switch (pi->mode) {
2251 case L2CAP_MODE_BASIC:
2252 if (pi->imtu != L2CAP_DEFAULT_MTU)
2253 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2254 break;
2256 case L2CAP_MODE_ERTM:
2257 rfc.mode = L2CAP_MODE_ERTM;
2258 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2259 rfc.max_transmit = max_transmit;
2260 rfc.retrans_timeout = 0;
2261 rfc.monitor_timeout = 0;
2262 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2264 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2265 sizeof(rfc), (unsigned long) &rfc);
2267 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2268 break;
2270 if (pi->fcs == L2CAP_FCS_NONE ||
2271 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2272 pi->fcs = L2CAP_FCS_NONE;
2273 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2275 break;
2277 case L2CAP_MODE_STREAMING:
2278 rfc.mode = L2CAP_MODE_STREAMING;
2279 rfc.txwin_size = 0;
2280 rfc.max_transmit = 0;
2281 rfc.retrans_timeout = 0;
2282 rfc.monitor_timeout = 0;
2283 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2285 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2286 sizeof(rfc), (unsigned long) &rfc);
2288 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2289 break;
2291 if (pi->fcs == L2CAP_FCS_NONE ||
2292 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2293 pi->fcs = L2CAP_FCS_NONE;
2294 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2296 break;
2299 /* FIXME: Need actual value of the flush timeout */
2300 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2301 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2303 req->dcid = cpu_to_le16(pi->dcid);
2304 req->flags = cpu_to_le16(0);
2306 return ptr - data;
2309 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2311 struct l2cap_pinfo *pi = l2cap_pi(sk);
2312 struct l2cap_conf_rsp *rsp = data;
2313 void *ptr = rsp->data;
2314 void *req = pi->conf_req;
2315 int len = pi->conf_len;
2316 int type, hint, olen;
2317 unsigned long val;
2318 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2319 u16 mtu = L2CAP_DEFAULT_MTU;
2320 u16 result = L2CAP_CONF_SUCCESS;
2322 BT_DBG("sk %p", sk);
2324 while (len >= L2CAP_CONF_OPT_SIZE) {
2325 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2327 hint = type & L2CAP_CONF_HINT;
2328 type &= L2CAP_CONF_MASK;
2330 switch (type) {
2331 case L2CAP_CONF_MTU:
2332 mtu = val;
2333 break;
2335 case L2CAP_CONF_FLUSH_TO:
2336 pi->flush_to = val;
2337 break;
2339 case L2CAP_CONF_QOS:
2340 break;
2342 case L2CAP_CONF_RFC:
2343 if (olen == sizeof(rfc))
2344 memcpy(&rfc, (void *) val, olen);
2345 break;
2347 case L2CAP_CONF_FCS:
2348 if (val == L2CAP_FCS_NONE)
2349 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2351 break;
2353 default:
2354 if (hint)
2355 break;
2357 result = L2CAP_CONF_UNKNOWN;
2358 *((u8 *) ptr++) = type;
2359 break;
2363 if (pi->num_conf_rsp || pi->num_conf_req)
2364 goto done;
2366 switch (pi->mode) {
2367 case L2CAP_MODE_STREAMING:
2368 case L2CAP_MODE_ERTM:
2369 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2370 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2371 return -ECONNREFUSED;
2372 break;
2373 default:
2374 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2375 break;
2378 done:
2379 if (pi->mode != rfc.mode) {
2380 result = L2CAP_CONF_UNACCEPT;
2381 rfc.mode = pi->mode;
2383 if (pi->num_conf_rsp == 1)
2384 return -ECONNREFUSED;
2386 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2387 sizeof(rfc), (unsigned long) &rfc);
2391 if (result == L2CAP_CONF_SUCCESS) {
2392 /* Configure output options and let the other side know
2393 * which ones we don't like. */
2395 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2396 result = L2CAP_CONF_UNACCEPT;
2397 else {
2398 pi->omtu = mtu;
2399 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2401 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2403 switch (rfc.mode) {
2404 case L2CAP_MODE_BASIC:
2405 pi->fcs = L2CAP_FCS_NONE;
2406 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2407 break;
2409 case L2CAP_MODE_ERTM:
2410 pi->remote_tx_win = rfc.txwin_size;
2411 pi->remote_max_tx = rfc.max_transmit;
2412 pi->max_pdu_size = rfc.max_pdu_size;
2414 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2415 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2417 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2419 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2420 sizeof(rfc), (unsigned long) &rfc);
2422 break;
2424 case L2CAP_MODE_STREAMING:
2425 pi->remote_tx_win = rfc.txwin_size;
2426 pi->max_pdu_size = rfc.max_pdu_size;
2428 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2431 sizeof(rfc), (unsigned long) &rfc);
2433 break;
2435 default:
2436 result = L2CAP_CONF_UNACCEPT;
2438 memset(&rfc, 0, sizeof(rfc));
2439 rfc.mode = pi->mode;
2442 if (result == L2CAP_CONF_SUCCESS)
2443 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2445 rsp->scid = cpu_to_le16(pi->dcid);
2446 rsp->result = cpu_to_le16(result);
2447 rsp->flags = cpu_to_le16(0x0000);
2449 return ptr - data;
2452 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2454 struct l2cap_pinfo *pi = l2cap_pi(sk);
2455 struct l2cap_conf_req *req = data;
2456 void *ptr = req->data;
2457 int type, olen;
2458 unsigned long val;
2459 struct l2cap_conf_rfc rfc;
2461 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2463 while (len >= L2CAP_CONF_OPT_SIZE) {
2464 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2466 switch (type) {
2467 case L2CAP_CONF_MTU:
2468 if (val < L2CAP_DEFAULT_MIN_MTU) {
2469 *result = L2CAP_CONF_UNACCEPT;
2470 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2471 } else
2472 pi->omtu = val;
2473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2474 break;
2476 case L2CAP_CONF_FLUSH_TO:
2477 pi->flush_to = val;
2478 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2479 2, pi->flush_to);
2480 break;
2482 case L2CAP_CONF_RFC:
2483 if (olen == sizeof(rfc))
2484 memcpy(&rfc, (void *)val, olen);
2486 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2487 rfc.mode != pi->mode)
2488 return -ECONNREFUSED;
2490 pi->mode = rfc.mode;
2491 pi->fcs = 0;
2493 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2494 sizeof(rfc), (unsigned long) &rfc);
2495 break;
2499 if (*result == L2CAP_CONF_SUCCESS) {
2500 switch (rfc.mode) {
2501 case L2CAP_MODE_ERTM:
2502 pi->remote_tx_win = rfc.txwin_size;
2503 pi->retrans_timeout = rfc.retrans_timeout;
2504 pi->monitor_timeout = rfc.monitor_timeout;
2505 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2506 break;
2507 case L2CAP_MODE_STREAMING:
2508 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2509 break;
2513 req->dcid = cpu_to_le16(pi->dcid);
2514 req->flags = cpu_to_le16(0x0000);
2516 return ptr - data;
2519 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2521 struct l2cap_conf_rsp *rsp = data;
2522 void *ptr = rsp->data;
2524 BT_DBG("sk %p", sk);
2526 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2527 rsp->result = cpu_to_le16(result);
2528 rsp->flags = cpu_to_le16(flags);
2530 return ptr - data;
2533 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2535 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2537 if (rej->reason != 0x0000)
2538 return 0;
2540 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2541 cmd->ident == conn->info_ident) {
2542 del_timer(&conn->info_timer);
2544 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2545 conn->info_ident = 0;
2547 l2cap_conn_start(conn);
2550 return 0;
2553 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2555 struct l2cap_chan_list *list = &conn->chan_list;
2556 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2557 struct l2cap_conn_rsp rsp;
2558 struct sock *sk, *parent;
2559 int result, status = L2CAP_CS_NO_INFO;
2561 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2562 __le16 psm = req->psm;
2564 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2566 /* Check if we have socket listening on psm */
2567 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2568 if (!parent) {
2569 result = L2CAP_CR_BAD_PSM;
2570 goto sendresp;
2573 /* Check if the ACL is secure enough (if not SDP) */
2574 if (psm != cpu_to_le16(0x0001) &&
2575 !hci_conn_check_link_mode(conn->hcon)) {
2576 conn->disc_reason = 0x05;
2577 result = L2CAP_CR_SEC_BLOCK;
2578 goto response;
2581 result = L2CAP_CR_NO_MEM;
2583 /* Check for backlog size */
2584 if (sk_acceptq_is_full(parent)) {
2585 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2586 goto response;
2589 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2590 if (!sk)
2591 goto response;
2593 write_lock_bh(&list->lock);
2595 /* Check if we already have channel with that dcid */
2596 if (__l2cap_get_chan_by_dcid(list, scid)) {
2597 write_unlock_bh(&list->lock);
2598 sock_set_flag(sk, SOCK_ZAPPED);
2599 l2cap_sock_kill(sk);
2600 goto response;
2603 hci_conn_hold(conn->hcon);
2605 l2cap_sock_init(sk, parent);
2606 bacpy(&bt_sk(sk)->src, conn->src);
2607 bacpy(&bt_sk(sk)->dst, conn->dst);
2608 l2cap_pi(sk)->psm = psm;
2609 l2cap_pi(sk)->dcid = scid;
2611 __l2cap_chan_add(conn, sk, parent);
2612 dcid = l2cap_pi(sk)->scid;
2614 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2616 l2cap_pi(sk)->ident = cmd->ident;
2618 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2619 if (l2cap_check_security(sk)) {
2620 if (bt_sk(sk)->defer_setup) {
2621 sk->sk_state = BT_CONNECT2;
2622 result = L2CAP_CR_PEND;
2623 status = L2CAP_CS_AUTHOR_PEND;
2624 parent->sk_data_ready(parent, 0);
2625 } else {
2626 sk->sk_state = BT_CONFIG;
2627 result = L2CAP_CR_SUCCESS;
2628 status = L2CAP_CS_NO_INFO;
2630 } else {
2631 sk->sk_state = BT_CONNECT2;
2632 result = L2CAP_CR_PEND;
2633 status = L2CAP_CS_AUTHEN_PEND;
2635 } else {
2636 sk->sk_state = BT_CONNECT2;
2637 result = L2CAP_CR_PEND;
2638 status = L2CAP_CS_NO_INFO;
2641 write_unlock_bh(&list->lock);
2643 response:
2644 bh_unlock_sock(parent);
2646 sendresp:
2647 rsp.scid = cpu_to_le16(scid);
2648 rsp.dcid = cpu_to_le16(dcid);
2649 rsp.result = cpu_to_le16(result);
2650 rsp.status = cpu_to_le16(status);
2651 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2653 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2654 struct l2cap_info_req info;
2655 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2657 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2658 conn->info_ident = l2cap_get_ident(conn);
2660 mod_timer(&conn->info_timer, jiffies +
2661 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2663 l2cap_send_cmd(conn, conn->info_ident,
2664 L2CAP_INFO_REQ, sizeof(info), &info);
2667 return 0;
2670 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2672 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2673 u16 scid, dcid, result, status;
2674 struct sock *sk;
2675 u8 req[128];
2677 scid = __le16_to_cpu(rsp->scid);
2678 dcid = __le16_to_cpu(rsp->dcid);
2679 result = __le16_to_cpu(rsp->result);
2680 status = __le16_to_cpu(rsp->status);
2682 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2684 if (scid) {
2685 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2686 if (!sk)
2687 return 0;
2688 } else {
2689 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2690 if (!sk)
2691 return 0;
2694 switch (result) {
2695 case L2CAP_CR_SUCCESS:
2696 sk->sk_state = BT_CONFIG;
2697 l2cap_pi(sk)->ident = 0;
2698 l2cap_pi(sk)->dcid = dcid;
2699 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2701 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2703 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2704 l2cap_build_conf_req(sk, req), req);
2705 l2cap_pi(sk)->num_conf_req++;
2706 break;
2708 case L2CAP_CR_PEND:
2709 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2710 break;
2712 default:
2713 l2cap_chan_del(sk, ECONNREFUSED);
2714 break;
2717 bh_unlock_sock(sk);
2718 return 0;
2721 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2723 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2724 u16 dcid, flags;
2725 u8 rsp[64];
2726 struct sock *sk;
2727 int len;
2729 dcid = __le16_to_cpu(req->dcid);
2730 flags = __le16_to_cpu(req->flags);
2732 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2734 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2735 if (!sk)
2736 return -ENOENT;
2738 if (sk->sk_state == BT_DISCONN)
2739 goto unlock;
2741 /* Reject if config buffer is too small. */
2742 len = cmd_len - sizeof(*req);
2743 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2744 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2745 l2cap_build_conf_rsp(sk, rsp,
2746 L2CAP_CONF_REJECT, flags), rsp);
2747 goto unlock;
2750 /* Store config. */
2751 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2752 l2cap_pi(sk)->conf_len += len;
2754 if (flags & 0x0001) {
2755 /* Incomplete config. Send empty response. */
2756 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2757 l2cap_build_conf_rsp(sk, rsp,
2758 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2759 goto unlock;
2762 /* Complete config. */
2763 len = l2cap_parse_conf_req(sk, rsp);
2764 if (len < 0) {
2765 l2cap_send_disconn_req(conn, sk);
2766 goto unlock;
2769 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2770 l2cap_pi(sk)->num_conf_rsp++;
2772 /* Reset config buffer. */
2773 l2cap_pi(sk)->conf_len = 0;
2775 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2776 goto unlock;
2778 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2779 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2780 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2781 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2783 sk->sk_state = BT_CONNECTED;
2785 l2cap_pi(sk)->next_tx_seq = 0;
2786 l2cap_pi(sk)->expected_tx_seq = 0;
2787 __skb_queue_head_init(TX_QUEUE(sk));
2788 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2789 l2cap_ertm_init(sk);
2791 l2cap_chan_ready(sk);
2792 goto unlock;
2795 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2796 u8 buf[64];
2797 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2798 l2cap_build_conf_req(sk, buf), buf);
2799 l2cap_pi(sk)->num_conf_req++;
2802 unlock:
2803 bh_unlock_sock(sk);
2804 return 0;
2807 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2809 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2810 u16 scid, flags, result;
2811 struct sock *sk;
2813 scid = __le16_to_cpu(rsp->scid);
2814 flags = __le16_to_cpu(rsp->flags);
2815 result = __le16_to_cpu(rsp->result);
2817 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2818 scid, flags, result);
2820 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2821 if (!sk)
2822 return 0;
2824 switch (result) {
2825 case L2CAP_CONF_SUCCESS:
2826 break;
2828 case L2CAP_CONF_UNACCEPT:
2829 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2830 int len = cmd->len - sizeof(*rsp);
2831 char req[64];
2833 /* throw out any old stored conf requests */
2834 result = L2CAP_CONF_SUCCESS;
2835 len = l2cap_parse_conf_rsp(sk, rsp->data,
2836 len, req, &result);
2837 if (len < 0) {
2838 l2cap_send_disconn_req(conn, sk);
2839 goto done;
2842 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2843 L2CAP_CONF_REQ, len, req);
2844 l2cap_pi(sk)->num_conf_req++;
2845 if (result != L2CAP_CONF_SUCCESS)
2846 goto done;
2847 break;
2850 default:
2851 sk->sk_state = BT_DISCONN;
2852 sk->sk_err = ECONNRESET;
2853 l2cap_sock_set_timer(sk, HZ * 5);
2854 l2cap_send_disconn_req(conn, sk);
2855 goto done;
2858 if (flags & 0x01)
2859 goto done;
2861 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2863 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2864 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2865 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2866 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2868 sk->sk_state = BT_CONNECTED;
2869 l2cap_pi(sk)->next_tx_seq = 0;
2870 l2cap_pi(sk)->expected_tx_seq = 0;
2871 __skb_queue_head_init(TX_QUEUE(sk));
2872 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2873 l2cap_ertm_init(sk);
2875 l2cap_chan_ready(sk);
2878 done:
2879 bh_unlock_sock(sk);
2880 return 0;
2883 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2885 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2886 struct l2cap_disconn_rsp rsp;
2887 u16 dcid, scid;
2888 struct sock *sk;
2890 scid = __le16_to_cpu(req->scid);
2891 dcid = __le16_to_cpu(req->dcid);
2893 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2895 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2896 if (!sk)
2897 return 0;
2899 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2900 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2901 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2903 sk->sk_shutdown = SHUTDOWN_MASK;
2905 skb_queue_purge(TX_QUEUE(sk));
2907 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2908 skb_queue_purge(SREJ_QUEUE(sk));
2909 del_timer(&l2cap_pi(sk)->retrans_timer);
2910 del_timer(&l2cap_pi(sk)->monitor_timer);
2913 l2cap_chan_del(sk, ECONNRESET);
2914 bh_unlock_sock(sk);
2916 l2cap_sock_kill(sk);
2917 return 0;
2920 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2922 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2923 u16 dcid, scid;
2924 struct sock *sk;
2926 scid = __le16_to_cpu(rsp->scid);
2927 dcid = __le16_to_cpu(rsp->dcid);
2929 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2931 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2932 if (!sk)
2933 return 0;
2935 skb_queue_purge(TX_QUEUE(sk));
2937 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2938 skb_queue_purge(SREJ_QUEUE(sk));
2939 del_timer(&l2cap_pi(sk)->retrans_timer);
2940 del_timer(&l2cap_pi(sk)->monitor_timer);
2943 l2cap_chan_del(sk, 0);
2944 bh_unlock_sock(sk);
2946 l2cap_sock_kill(sk);
2947 return 0;
2950 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2952 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2953 u16 type;
2955 type = __le16_to_cpu(req->type);
2957 BT_DBG("type 0x%4.4x", type);
2959 if (type == L2CAP_IT_FEAT_MASK) {
2960 u8 buf[8];
2961 u32 feat_mask = l2cap_feat_mask;
2962 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2963 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2964 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2965 if (enable_ertm)
2966 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2967 | L2CAP_FEAT_FCS;
2968 put_unaligned_le32(feat_mask, rsp->data);
2969 l2cap_send_cmd(conn, cmd->ident,
2970 L2CAP_INFO_RSP, sizeof(buf), buf);
2971 } else if (type == L2CAP_IT_FIXED_CHAN) {
2972 u8 buf[12];
2973 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2974 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2975 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2976 memcpy(buf + 4, l2cap_fixed_chan, 8);
2977 l2cap_send_cmd(conn, cmd->ident,
2978 L2CAP_INFO_RSP, sizeof(buf), buf);
2979 } else {
2980 struct l2cap_info_rsp rsp;
2981 rsp.type = cpu_to_le16(type);
2982 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2983 l2cap_send_cmd(conn, cmd->ident,
2984 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2987 return 0;
2990 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2992 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2993 u16 type, result;
2995 type = __le16_to_cpu(rsp->type);
2996 result = __le16_to_cpu(rsp->result);
2998 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3000 del_timer(&conn->info_timer);
3002 if (type == L2CAP_IT_FEAT_MASK) {
3003 conn->feat_mask = get_unaligned_le32(rsp->data);
3005 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3006 struct l2cap_info_req req;
3007 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3009 conn->info_ident = l2cap_get_ident(conn);
3011 l2cap_send_cmd(conn, conn->info_ident,
3012 L2CAP_INFO_REQ, sizeof(req), &req);
3013 } else {
3014 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3015 conn->info_ident = 0;
3017 l2cap_conn_start(conn);
3019 } else if (type == L2CAP_IT_FIXED_CHAN) {
3020 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3021 conn->info_ident = 0;
3023 l2cap_conn_start(conn);
3026 return 0;
3029 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3031 u8 *data = skb->data;
3032 int len = skb->len;
3033 struct l2cap_cmd_hdr cmd;
3034 int err = 0;
3036 l2cap_raw_recv(conn, skb);
3038 while (len >= L2CAP_CMD_HDR_SIZE) {
3039 u16 cmd_len;
3040 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3041 data += L2CAP_CMD_HDR_SIZE;
3042 len -= L2CAP_CMD_HDR_SIZE;
3044 cmd_len = le16_to_cpu(cmd.len);
3046 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3048 if (cmd_len > len || !cmd.ident) {
3049 BT_DBG("corrupted command");
3050 break;
3053 switch (cmd.code) {
3054 case L2CAP_COMMAND_REJ:
3055 l2cap_command_rej(conn, &cmd, data);
3056 break;
3058 case L2CAP_CONN_REQ:
3059 err = l2cap_connect_req(conn, &cmd, data);
3060 break;
3062 case L2CAP_CONN_RSP:
3063 err = l2cap_connect_rsp(conn, &cmd, data);
3064 break;
3066 case L2CAP_CONF_REQ:
3067 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3068 break;
3070 case L2CAP_CONF_RSP:
3071 err = l2cap_config_rsp(conn, &cmd, data);
3072 break;
3074 case L2CAP_DISCONN_REQ:
3075 err = l2cap_disconnect_req(conn, &cmd, data);
3076 break;
3078 case L2CAP_DISCONN_RSP:
3079 err = l2cap_disconnect_rsp(conn, &cmd, data);
3080 break;
3082 case L2CAP_ECHO_REQ:
3083 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3084 break;
3086 case L2CAP_ECHO_RSP:
3087 break;
3089 case L2CAP_INFO_REQ:
3090 err = l2cap_information_req(conn, &cmd, data);
3091 break;
3093 case L2CAP_INFO_RSP:
3094 err = l2cap_information_rsp(conn, &cmd, data);
3095 break;
3097 default:
3098 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3099 err = -EINVAL;
3100 break;
3103 if (err) {
3104 struct l2cap_cmd_rej rej;
3105 BT_DBG("error %d", err);
3107 /* FIXME: Map err to a valid reason */
3108 rej.reason = cpu_to_le16(0);
3109 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3112 data += cmd_len;
3113 len -= cmd_len;
3116 kfree_skb(skb);
3119 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3121 u16 our_fcs, rcv_fcs;
3122 int hdr_size = L2CAP_HDR_SIZE + 2;
3124 if (pi->fcs == L2CAP_FCS_CRC16) {
3125 skb_trim(skb, skb->len - 2);
3126 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3127 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3129 if (our_fcs != rcv_fcs)
3130 return -EINVAL;
3132 return 0;
3135 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3137 struct sk_buff *next_skb;
3139 bt_cb(skb)->tx_seq = tx_seq;
3140 bt_cb(skb)->sar = sar;
3142 next_skb = skb_peek(SREJ_QUEUE(sk));
3143 if (!next_skb) {
3144 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3145 return;
3148 do {
3149 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3150 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3151 return;
3154 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3155 break;
3157 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3159 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3162 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3164 struct l2cap_pinfo *pi = l2cap_pi(sk);
3165 struct sk_buff *_skb;
3166 int err = -EINVAL;
3168 switch (control & L2CAP_CTRL_SAR) {
3169 case L2CAP_SDU_UNSEGMENTED:
3170 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3171 kfree_skb(pi->sdu);
3172 break;
3175 err = sock_queue_rcv_skb(sk, skb);
3176 if (!err)
3177 return 0;
3179 break;
3181 case L2CAP_SDU_START:
3182 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3183 kfree_skb(pi->sdu);
3184 break;
3187 pi->sdu_len = get_unaligned_le16(skb->data);
3188 skb_pull(skb, 2);
3190 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3191 if (!pi->sdu) {
3192 err = -ENOMEM;
3193 break;
3196 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3198 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3199 pi->partial_sdu_len = skb->len;
3200 err = 0;
3201 break;
3203 case L2CAP_SDU_CONTINUE:
3204 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3205 break;
3207 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3209 pi->partial_sdu_len += skb->len;
3210 if (pi->partial_sdu_len > pi->sdu_len)
3211 kfree_skb(pi->sdu);
3212 else
3213 err = 0;
3215 break;
3217 case L2CAP_SDU_END:
3218 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3219 break;
3221 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3223 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3224 pi->partial_sdu_len += skb->len;
3226 if (pi->partial_sdu_len == pi->sdu_len) {
3227 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3228 err = sock_queue_rcv_skb(sk, _skb);
3229 if (err < 0)
3230 kfree_skb(_skb);
3232 kfree_skb(pi->sdu);
3233 err = 0;
3235 break;
3238 kfree_skb(skb);
3239 return err;
3242 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3244 struct sk_buff *skb;
3245 u16 control = 0;
3247 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3248 if (bt_cb(skb)->tx_seq != tx_seq)
3249 break;
3251 skb = skb_dequeue(SREJ_QUEUE(sk));
3252 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3253 l2cap_sar_reassembly_sdu(sk, skb, control);
3254 l2cap_pi(sk)->buffer_seq_srej =
3255 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3256 tx_seq++;
3260 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3262 struct l2cap_pinfo *pi = l2cap_pi(sk);
3263 struct srej_list *l, *tmp;
3264 u16 control;
3266 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3267 if (l->tx_seq == tx_seq) {
3268 list_del(&l->list);
3269 kfree(l);
3270 return;
3272 control = L2CAP_SUPER_SELECT_REJECT;
3273 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3274 l2cap_send_sframe(pi, control);
3275 list_del(&l->list);
3276 list_add_tail(&l->list, SREJ_LIST(sk));
3280 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3282 struct l2cap_pinfo *pi = l2cap_pi(sk);
3283 struct srej_list *new;
3284 u16 control;
3286 while (tx_seq != pi->expected_tx_seq) {
3287 control = L2CAP_SUPER_SELECT_REJECT;
3288 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3289 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3290 control |= L2CAP_CTRL_POLL;
3291 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3293 l2cap_send_sframe(pi, control);
3295 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3296 new->tx_seq = pi->expected_tx_seq++;
3297 list_add_tail(&new->list, SREJ_LIST(sk));
3299 pi->expected_tx_seq++;
3302 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3304 struct l2cap_pinfo *pi = l2cap_pi(sk);
3305 u8 tx_seq = __get_txseq(rx_control);
3306 u8 req_seq = __get_reqseq(rx_control);
3307 u16 tx_control = 0;
3308 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3309 int err = 0;
3311 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3313 pi->expected_ack_seq = req_seq;
3314 l2cap_drop_acked_frames(sk);
3316 if (tx_seq == pi->expected_tx_seq)
3317 goto expected;
3319 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3320 struct srej_list *first;
3322 first = list_first_entry(SREJ_LIST(sk),
3323 struct srej_list, list);
3324 if (tx_seq == first->tx_seq) {
3325 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3326 l2cap_check_srej_gap(sk, tx_seq);
3328 list_del(&first->list);
3329 kfree(first);
3331 if (list_empty(SREJ_LIST(sk))) {
3332 pi->buffer_seq = pi->buffer_seq_srej;
3333 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3335 } else {
3336 struct srej_list *l;
3337 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3339 list_for_each_entry(l, SREJ_LIST(sk), list) {
3340 if (l->tx_seq == tx_seq) {
3341 l2cap_resend_srejframe(sk, tx_seq);
3342 return 0;
3345 l2cap_send_srejframe(sk, tx_seq);
3347 } else {
3348 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3350 INIT_LIST_HEAD(SREJ_LIST(sk));
3351 pi->buffer_seq_srej = pi->buffer_seq;
3353 __skb_queue_head_init(SREJ_QUEUE(sk));
3354 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3356 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3358 l2cap_send_srejframe(sk, tx_seq);
3360 return 0;
3362 expected:
3363 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3365 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3366 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3367 return 0;
3370 if (rx_control & L2CAP_CTRL_FINAL) {
3371 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3372 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3373 else {
3374 sk->sk_send_head = TX_QUEUE(sk)->next;
3375 pi->next_tx_seq = pi->expected_ack_seq;
3376 l2cap_ertm_send(sk);
3380 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3382 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3383 if (err < 0)
3384 return err;
3386 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3387 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3388 tx_control |= L2CAP_SUPER_RCV_READY;
3389 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3390 l2cap_send_sframe(pi, tx_control);
3392 return 0;
3395 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3397 struct l2cap_pinfo *pi = l2cap_pi(sk);
3398 u8 tx_seq = __get_reqseq(rx_control);
3400 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3402 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3403 case L2CAP_SUPER_RCV_READY:
3404 if (rx_control & L2CAP_CTRL_POLL) {
3405 u16 control = L2CAP_CTRL_FINAL;
3406 control |= L2CAP_SUPER_RCV_READY |
3407 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3408 l2cap_send_sframe(l2cap_pi(sk), control);
3409 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3411 } else if (rx_control & L2CAP_CTRL_FINAL) {
3412 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3413 pi->expected_ack_seq = tx_seq;
3414 l2cap_drop_acked_frames(sk);
3416 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3417 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3418 else {
3419 sk->sk_send_head = TX_QUEUE(sk)->next;
3420 pi->next_tx_seq = pi->expected_ack_seq;
3421 l2cap_ertm_send(sk);
3424 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3425 break;
3427 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3428 del_timer(&pi->monitor_timer);
3430 if (pi->unacked_frames > 0)
3431 __mod_retrans_timer();
3432 } else {
3433 pi->expected_ack_seq = tx_seq;
3434 l2cap_drop_acked_frames(sk);
3436 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3437 (pi->unacked_frames > 0))
3438 __mod_retrans_timer();
3440 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3441 l2cap_ertm_send(sk);
3443 break;
3445 case L2CAP_SUPER_REJECT:
3446 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3448 pi->expected_ack_seq = __get_reqseq(rx_control);
3449 l2cap_drop_acked_frames(sk);
3451 if (rx_control & L2CAP_CTRL_FINAL) {
3452 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3453 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3454 else {
3455 sk->sk_send_head = TX_QUEUE(sk)->next;
3456 pi->next_tx_seq = pi->expected_ack_seq;
3457 l2cap_ertm_send(sk);
3459 } else {
3460 sk->sk_send_head = TX_QUEUE(sk)->next;
3461 pi->next_tx_seq = pi->expected_ack_seq;
3462 l2cap_ertm_send(sk);
3464 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3465 pi->srej_save_reqseq = tx_seq;
3466 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3470 break;
3472 case L2CAP_SUPER_SELECT_REJECT:
3473 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3475 if (rx_control & L2CAP_CTRL_POLL) {
3476 pi->expected_ack_seq = tx_seq;
3477 l2cap_drop_acked_frames(sk);
3478 l2cap_retransmit_frame(sk, tx_seq);
3479 l2cap_ertm_send(sk);
3480 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3481 pi->srej_save_reqseq = tx_seq;
3482 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3484 } else if (rx_control & L2CAP_CTRL_FINAL) {
3485 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3486 pi->srej_save_reqseq == tx_seq)
3487 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3488 else
3489 l2cap_retransmit_frame(sk, tx_seq);
3491 else {
3492 l2cap_retransmit_frame(sk, tx_seq);
3493 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3494 pi->srej_save_reqseq = tx_seq;
3495 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3498 break;
3500 case L2CAP_SUPER_RCV_NOT_READY:
3501 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3502 pi->expected_ack_seq = tx_seq;
3503 l2cap_drop_acked_frames(sk);
3505 del_timer(&l2cap_pi(sk)->retrans_timer);
3506 if (rx_control & L2CAP_CTRL_POLL) {
3507 u16 control = L2CAP_CTRL_FINAL;
3508 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3510 break;
3513 return 0;
3516 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3518 struct sock *sk;
3519 struct l2cap_pinfo *pi;
3520 u16 control, len;
3521 u8 tx_seq;
3523 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3524 if (!sk) {
3525 BT_DBG("unknown cid 0x%4.4x", cid);
3526 goto drop;
3529 pi = l2cap_pi(sk);
3531 BT_DBG("sk %p, len %d", sk, skb->len);
3533 if (sk->sk_state != BT_CONNECTED)
3534 goto drop;
3536 switch (pi->mode) {
3537 case L2CAP_MODE_BASIC:
3538 /* If socket recv buffers overflows we drop data here
3539 * which is *bad* because L2CAP has to be reliable.
3540 * But we don't have any other choice. L2CAP doesn't
3541 * provide flow control mechanism. */
3543 if (pi->imtu < skb->len)
3544 goto drop;
3546 if (!sock_queue_rcv_skb(sk, skb))
3547 goto done;
3548 break;
3550 case L2CAP_MODE_ERTM:
3551 control = get_unaligned_le16(skb->data);
3552 skb_pull(skb, 2);
3553 len = skb->len;
3555 if (__is_sar_start(control))
3556 len -= 2;
3558 if (pi->fcs == L2CAP_FCS_CRC16)
3559 len -= 2;
3562 * We can just drop the corrupted I-frame here.
3563 * Receiver will miss it and start proper recovery
3564 * procedures and ask retransmission.
3566 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3567 goto drop;
3569 if (l2cap_check_fcs(pi, skb))
3570 goto drop;
3572 if (__is_iframe(control))
3573 l2cap_data_channel_iframe(sk, control, skb);
3574 else
3575 l2cap_data_channel_sframe(sk, control, skb);
3577 goto done;
3579 case L2CAP_MODE_STREAMING:
3580 control = get_unaligned_le16(skb->data);
3581 skb_pull(skb, 2);
3582 len = skb->len;
3584 if (__is_sar_start(control))
3585 len -= 2;
3587 if (pi->fcs == L2CAP_FCS_CRC16)
3588 len -= 2;
3590 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3591 goto drop;
3593 if (l2cap_check_fcs(pi, skb))
3594 goto drop;
3596 tx_seq = __get_txseq(control);
3598 if (pi->expected_tx_seq == tx_seq)
3599 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3600 else
3601 pi->expected_tx_seq = tx_seq + 1;
3603 l2cap_sar_reassembly_sdu(sk, skb, control);
3605 goto done;
3607 default:
3608 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3609 break;
3612 drop:
3613 kfree_skb(skb);
3615 done:
3616 if (sk)
3617 bh_unlock_sock(sk);
3619 return 0;
3622 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3624 struct sock *sk;
3626 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3627 if (!sk)
3628 goto drop;
3630 BT_DBG("sk %p, len %d", sk, skb->len);
3632 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3633 goto drop;
3635 if (l2cap_pi(sk)->imtu < skb->len)
3636 goto drop;
3638 if (!sock_queue_rcv_skb(sk, skb))
3639 goto done;
3641 drop:
3642 kfree_skb(skb);
3644 done:
3645 if (sk)
3646 bh_unlock_sock(sk);
3647 return 0;
3650 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3652 struct l2cap_hdr *lh = (void *) skb->data;
3653 u16 cid, len;
3654 __le16 psm;
3656 skb_pull(skb, L2CAP_HDR_SIZE);
3657 cid = __le16_to_cpu(lh->cid);
3658 len = __le16_to_cpu(lh->len);
3660 if (len != skb->len) {
3661 kfree_skb(skb);
3662 return;
3665 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3667 switch (cid) {
3668 case L2CAP_CID_SIGNALING:
3669 l2cap_sig_channel(conn, skb);
3670 break;
3672 case L2CAP_CID_CONN_LESS:
3673 psm = get_unaligned_le16(skb->data);
3674 skb_pull(skb, 2);
3675 l2cap_conless_channel(conn, psm, skb);
3676 break;
3678 default:
3679 l2cap_data_channel(conn, cid, skb);
3680 break;
3684 /* ---- L2CAP interface with lower layer (HCI) ---- */
3686 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3688 int exact = 0, lm1 = 0, lm2 = 0;
3689 register struct sock *sk;
3690 struct hlist_node *node;
3692 if (type != ACL_LINK)
3693 return 0;
3695 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3697 /* Find listening sockets and check their link_mode */
3698 read_lock(&l2cap_sk_list.lock);
3699 sk_for_each(sk, node, &l2cap_sk_list.head) {
3700 if (sk->sk_state != BT_LISTEN)
3701 continue;
3703 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3704 lm1 |= HCI_LM_ACCEPT;
3705 if (l2cap_pi(sk)->role_switch)
3706 lm1 |= HCI_LM_MASTER;
3707 exact++;
3708 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3709 lm2 |= HCI_LM_ACCEPT;
3710 if (l2cap_pi(sk)->role_switch)
3711 lm2 |= HCI_LM_MASTER;
3714 read_unlock(&l2cap_sk_list.lock);
3716 return exact ? lm1 : lm2;
3719 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3721 struct l2cap_conn *conn;
3723 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3725 if (hcon->type != ACL_LINK)
3726 return 0;
3728 if (!status) {
3729 conn = l2cap_conn_add(hcon, status);
3730 if (conn)
3731 l2cap_conn_ready(conn);
3732 } else
3733 l2cap_conn_del(hcon, bt_err(status));
3735 return 0;
3738 static int l2cap_disconn_ind(struct hci_conn *hcon)
3740 struct l2cap_conn *conn = hcon->l2cap_data;
3742 BT_DBG("hcon %p", hcon);
3744 if (hcon->type != ACL_LINK || !conn)
3745 return 0x13;
3747 return conn->disc_reason;
3750 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3752 BT_DBG("hcon %p reason %d", hcon, reason);
3754 if (hcon->type != ACL_LINK)
3755 return 0;
3757 l2cap_conn_del(hcon, bt_err(reason));
3759 return 0;
3762 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3764 if (sk->sk_type != SOCK_SEQPACKET)
3765 return;
3767 if (encrypt == 0x00) {
3768 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3769 l2cap_sock_clear_timer(sk);
3770 l2cap_sock_set_timer(sk, HZ * 5);
3771 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3772 __l2cap_sock_close(sk, ECONNREFUSED);
3773 } else {
3774 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3775 l2cap_sock_clear_timer(sk);
3779 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3781 struct l2cap_chan_list *l;
3782 struct l2cap_conn *conn = hcon->l2cap_data;
3783 struct sock *sk;
3785 if (!conn)
3786 return 0;
3788 l = &conn->chan_list;
3790 BT_DBG("conn %p", conn);
3792 read_lock(&l->lock);
3794 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3795 bh_lock_sock(sk);
3797 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3798 bh_unlock_sock(sk);
3799 continue;
3802 if (!status && (sk->sk_state == BT_CONNECTED ||
3803 sk->sk_state == BT_CONFIG)) {
3804 l2cap_check_encryption(sk, encrypt);
3805 bh_unlock_sock(sk);
3806 continue;
3809 if (sk->sk_state == BT_CONNECT) {
3810 if (!status) {
3811 struct l2cap_conn_req req;
3812 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3813 req.psm = l2cap_pi(sk)->psm;
3815 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3817 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3818 L2CAP_CONN_REQ, sizeof(req), &req);
3819 } else {
3820 l2cap_sock_clear_timer(sk);
3821 l2cap_sock_set_timer(sk, HZ / 10);
3823 } else if (sk->sk_state == BT_CONNECT2) {
3824 struct l2cap_conn_rsp rsp;
3825 __u16 result;
3827 if (!status) {
3828 sk->sk_state = BT_CONFIG;
3829 result = L2CAP_CR_SUCCESS;
3830 } else {
3831 sk->sk_state = BT_DISCONN;
3832 l2cap_sock_set_timer(sk, HZ / 10);
3833 result = L2CAP_CR_SEC_BLOCK;
3836 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3837 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3838 rsp.result = cpu_to_le16(result);
3839 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3840 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3841 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3844 bh_unlock_sock(sk);
3847 read_unlock(&l->lock);
3849 return 0;
3852 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3854 struct l2cap_conn *conn = hcon->l2cap_data;
3856 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3857 goto drop;
3859 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3861 if (flags & ACL_START) {
3862 struct l2cap_hdr *hdr;
3863 int len;
3865 if (conn->rx_len) {
3866 BT_ERR("Unexpected start frame (len %d)", skb->len);
3867 kfree_skb(conn->rx_skb);
3868 conn->rx_skb = NULL;
3869 conn->rx_len = 0;
3870 l2cap_conn_unreliable(conn, ECOMM);
3873 if (skb->len < 2) {
3874 BT_ERR("Frame is too short (len %d)", skb->len);
3875 l2cap_conn_unreliable(conn, ECOMM);
3876 goto drop;
3879 hdr = (struct l2cap_hdr *) skb->data;
3880 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3882 if (len == skb->len) {
3883 /* Complete frame received */
3884 l2cap_recv_frame(conn, skb);
3885 return 0;
3888 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3890 if (skb->len > len) {
3891 BT_ERR("Frame is too long (len %d, expected len %d)",
3892 skb->len, len);
3893 l2cap_conn_unreliable(conn, ECOMM);
3894 goto drop;
3897 /* Allocate skb for the complete frame (with header) */
3898 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3899 if (!conn->rx_skb)
3900 goto drop;
3902 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3903 skb->len);
3904 conn->rx_len = len - skb->len;
3905 } else {
3906 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3908 if (!conn->rx_len) {
3909 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3910 l2cap_conn_unreliable(conn, ECOMM);
3911 goto drop;
3914 if (skb->len > conn->rx_len) {
3915 BT_ERR("Fragment is too long (len %d, expected %d)",
3916 skb->len, conn->rx_len);
3917 kfree_skb(conn->rx_skb);
3918 conn->rx_skb = NULL;
3919 conn->rx_len = 0;
3920 l2cap_conn_unreliable(conn, ECOMM);
3921 goto drop;
3924 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3925 skb->len);
3926 conn->rx_len -= skb->len;
3928 if (!conn->rx_len) {
3929 /* Complete frame received */
3930 l2cap_recv_frame(conn, conn->rx_skb);
3931 conn->rx_skb = NULL;
3935 drop:
3936 kfree_skb(skb);
3937 return 0;
3940 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3942 struct sock *sk;
3943 struct hlist_node *node;
3944 char *str = buf;
3946 read_lock_bh(&l2cap_sk_list.lock);
3948 sk_for_each(sk, node, &l2cap_sk_list.head) {
3949 struct l2cap_pinfo *pi = l2cap_pi(sk);
3951 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3952 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3953 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3954 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3957 read_unlock_bh(&l2cap_sk_list.lock);
3959 return str - buf;
3962 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3964 static const struct proto_ops l2cap_sock_ops = {
3965 .family = PF_BLUETOOTH,
3966 .owner = THIS_MODULE,
3967 .release = l2cap_sock_release,
3968 .bind = l2cap_sock_bind,
3969 .connect = l2cap_sock_connect,
3970 .listen = l2cap_sock_listen,
3971 .accept = l2cap_sock_accept,
3972 .getname = l2cap_sock_getname,
3973 .sendmsg = l2cap_sock_sendmsg,
3974 .recvmsg = l2cap_sock_recvmsg,
3975 .poll = bt_sock_poll,
3976 .ioctl = bt_sock_ioctl,
3977 .mmap = sock_no_mmap,
3978 .socketpair = sock_no_socketpair,
3979 .shutdown = l2cap_sock_shutdown,
3980 .setsockopt = l2cap_sock_setsockopt,
3981 .getsockopt = l2cap_sock_getsockopt
3984 static const struct net_proto_family l2cap_sock_family_ops = {
3985 .family = PF_BLUETOOTH,
3986 .owner = THIS_MODULE,
3987 .create = l2cap_sock_create,
3990 static struct hci_proto l2cap_hci_proto = {
3991 .name = "L2CAP",
3992 .id = HCI_PROTO_L2CAP,
3993 .connect_ind = l2cap_connect_ind,
3994 .connect_cfm = l2cap_connect_cfm,
3995 .disconn_ind = l2cap_disconn_ind,
3996 .disconn_cfm = l2cap_disconn_cfm,
3997 .security_cfm = l2cap_security_cfm,
3998 .recv_acldata = l2cap_recv_acldata
4001 static int __init l2cap_init(void)
4003 int err;
4005 err = proto_register(&l2cap_proto, 0);
4006 if (err < 0)
4007 return err;
4009 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4010 if (err < 0) {
4011 BT_ERR("L2CAP socket registration failed");
4012 goto error;
4015 err = hci_register_proto(&l2cap_hci_proto);
4016 if (err < 0) {
4017 BT_ERR("L2CAP protocol registration failed");
4018 bt_sock_unregister(BTPROTO_L2CAP);
4019 goto error;
4022 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
4023 BT_ERR("Failed to create L2CAP info file");
4025 BT_INFO("L2CAP ver %s", VERSION);
4026 BT_INFO("L2CAP socket layer initialized");
4028 return 0;
4030 error:
4031 proto_unregister(&l2cap_proto);
4032 return err;
4035 static void __exit l2cap_exit(void)
4037 class_remove_file(bt_class, &class_attr_l2cap);
4039 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4040 BT_ERR("L2CAP socket unregistration failed");
4042 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4043 BT_ERR("L2CAP protocol unregistration failed");
4045 proto_unregister(&l2cap_proto);
4048 void l2cap_load(void)
4050 /* Dummy function to trigger automatic L2CAP module loading by
4051 * other modules that use L2CAP sockets but don't use any other
4052 * symbols from it. */
4053 return;
4055 EXPORT_SYMBOL(l2cap_load);
4057 module_init(l2cap_init);
4058 module_exit(l2cap_exit);
4060 module_param(enable_ertm, bool, 0644);
4061 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4063 module_param(max_transmit, uint, 0644);
4064 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4066 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4067 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4068 MODULE_VERSION(VERSION);
4069 MODULE_LICENSE("GPL");
4070 MODULE_ALIAS("bt-proto-0");