Bluetooth: Completes the I-frame tx_seq check logic on RECV
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blob481cec22ef96f71aa1f62bd9d14f71e4452dad87
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 #ifdef CONFIG_BT_L2CAP_EXT_FEATURES
59 static int enable_ertm = 1;
60 #else
61 static int enable_ertm = 0;
62 #endif
63 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
64 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
66 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
67 static u8 l2cap_fixed_chan[8] = { 0x02, };
69 static const struct proto_ops l2cap_sock_ops;
71 static struct bt_sock_list l2cap_sk_list = {
72 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
80 u8 code, u8 ident, u16 dlen, void *data);
82 /* ---- L2CAP timers ---- */
83 static void l2cap_sock_timeout(unsigned long arg)
85 struct sock *sk = (struct sock *) arg;
86 int reason;
88 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 bh_lock_sock(sk);
92 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
93 reason = ECONNREFUSED;
94 else if (sk->sk_state == BT_CONNECT &&
95 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
96 reason = ECONNREFUSED;
97 else
98 reason = ETIMEDOUT;
100 __l2cap_sock_close(sk, reason);
102 bh_unlock_sock(sk);
104 l2cap_sock_kill(sk);
105 sock_put(sk);
108 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
110 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
111 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
114 static void l2cap_sock_clear_timer(struct sock *sk)
116 BT_DBG("sock %p state %d", sk, sk->sk_state);
117 sk_stop_timer(sk, &sk->sk_timer);
120 /* ---- L2CAP channels ---- */
121 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
123 struct sock *s;
124 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
125 if (l2cap_pi(s)->dcid == cid)
126 break;
128 return s;
131 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
133 struct sock *s;
134 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
135 if (l2cap_pi(s)->scid == cid)
136 break;
138 return s;
141 /* Find channel with given SCID.
142 * Returns locked socket */
143 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 struct sock *s;
146 read_lock(&l->lock);
147 s = __l2cap_get_chan_by_scid(l, cid);
148 if (s)
149 bh_lock_sock(s);
150 read_unlock(&l->lock);
151 return s;
154 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
156 struct sock *s;
157 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
158 if (l2cap_pi(s)->ident == ident)
159 break;
161 return s;
164 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 struct sock *s;
167 read_lock(&l->lock);
168 s = __l2cap_get_chan_by_ident(l, ident);
169 if (s)
170 bh_lock_sock(s);
171 read_unlock(&l->lock);
172 return s;
175 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
177 u16 cid = L2CAP_CID_DYN_START;
179 for (; cid < L2CAP_CID_DYN_END; cid++) {
180 if (!__l2cap_get_chan_by_scid(l, cid))
181 return cid;
184 return 0;
187 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
189 sock_hold(sk);
191 if (l->head)
192 l2cap_pi(l->head)->prev_c = sk;
194 l2cap_pi(sk)->next_c = l->head;
195 l2cap_pi(sk)->prev_c = NULL;
196 l->head = sk;
199 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
201 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
203 write_lock_bh(&l->lock);
204 if (sk == l->head)
205 l->head = next;
207 if (next)
208 l2cap_pi(next)->prev_c = prev;
209 if (prev)
210 l2cap_pi(prev)->next_c = next;
211 write_unlock_bh(&l->lock);
213 __sock_put(sk);
216 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
218 struct l2cap_chan_list *l = &conn->chan_list;
220 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
221 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
223 conn->disc_reason = 0x13;
225 l2cap_pi(sk)->conn = conn;
227 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
228 /* Alloc CID for connection-oriented socket */
229 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
230 } else if (sk->sk_type == SOCK_DGRAM) {
231 /* Connectionless socket */
232 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
233 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
235 } else {
236 /* Raw socket can send/recv signalling messages only */
237 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
238 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
239 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
242 __l2cap_chan_link(l, sk);
244 if (parent)
245 bt_accept_enqueue(parent, sk);
248 /* Delete channel.
249 * Must be called on the locked socket. */
250 static void l2cap_chan_del(struct sock *sk, int err)
252 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
253 struct sock *parent = bt_sk(sk)->parent;
255 l2cap_sock_clear_timer(sk);
257 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
259 if (conn) {
260 /* Unlink from channel list */
261 l2cap_chan_unlink(&conn->chan_list, sk);
262 l2cap_pi(sk)->conn = NULL;
263 hci_conn_put(conn->hcon);
266 sk->sk_state = BT_CLOSED;
267 sock_set_flag(sk, SOCK_ZAPPED);
269 if (err)
270 sk->sk_err = err;
272 if (parent) {
273 bt_accept_unlink(sk);
274 parent->sk_data_ready(parent, 0);
275 } else
276 sk->sk_state_change(sk);
279 /* Service level security */
280 static inline int l2cap_check_security(struct sock *sk)
282 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
283 __u8 auth_type;
285 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
287 auth_type = HCI_AT_NO_BONDING_MITM;
288 else
289 auth_type = HCI_AT_NO_BONDING;
291 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
292 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
293 } else {
294 switch (l2cap_pi(sk)->sec_level) {
295 case BT_SECURITY_HIGH:
296 auth_type = HCI_AT_GENERAL_BONDING_MITM;
297 break;
298 case BT_SECURITY_MEDIUM:
299 auth_type = HCI_AT_GENERAL_BONDING;
300 break;
301 default:
302 auth_type = HCI_AT_NO_BONDING;
303 break;
307 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
308 auth_type);
311 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
313 u8 id;
315 /* Get next available identificator.
316 * 1 - 128 are used by kernel.
317 * 129 - 199 are reserved.
318 * 200 - 254 are used by utilities like l2ping, etc.
321 spin_lock_bh(&conn->lock);
323 if (++conn->tx_ident > 128)
324 conn->tx_ident = 1;
326 id = conn->tx_ident;
328 spin_unlock_bh(&conn->lock);
330 return id;
333 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
335 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
337 BT_DBG("code 0x%2.2x", code);
339 if (!skb)
340 return;
342 hci_send_acl(conn->hcon, skb, 0);
345 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
347 struct sk_buff *skb;
348 struct l2cap_hdr *lh;
349 struct l2cap_conn *conn = pi->conn;
350 int count, hlen = L2CAP_HDR_SIZE + 2;
352 if (pi->fcs == L2CAP_FCS_CRC16)
353 hlen += 2;
355 BT_DBG("pi %p, control 0x%2.2x", pi, control);
357 count = min_t(unsigned int, conn->mtu, hlen);
358 control |= L2CAP_CTRL_FRAME_TYPE;
360 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
361 control |= L2CAP_CTRL_FINAL;
362 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
365 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
366 control |= L2CAP_CTRL_POLL;
367 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
370 skb = bt_skb_alloc(count, GFP_ATOMIC);
371 if (!skb)
372 return;
374 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
375 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
376 lh->cid = cpu_to_le16(pi->dcid);
377 put_unaligned_le16(control, skb_put(skb, 2));
379 if (pi->fcs == L2CAP_FCS_CRC16) {
380 u16 fcs = crc16(0, (u8 *)lh, count - 2);
381 put_unaligned_le16(fcs, skb_put(skb, 2));
384 hci_send_acl(pi->conn->hcon, skb, 0);
387 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
389 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
390 control |= L2CAP_SUPER_RCV_NOT_READY;
391 else
392 control |= L2CAP_SUPER_RCV_READY;
394 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
396 l2cap_send_sframe(pi, control);
399 static void l2cap_do_start(struct sock *sk)
401 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
403 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
404 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
405 return;
407 if (l2cap_check_security(sk)) {
408 struct l2cap_conn_req req;
409 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
410 req.psm = l2cap_pi(sk)->psm;
412 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
414 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
415 L2CAP_CONN_REQ, sizeof(req), &req);
417 } else {
418 struct l2cap_info_req req;
419 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
421 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
422 conn->info_ident = l2cap_get_ident(conn);
424 mod_timer(&conn->info_timer, jiffies +
425 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
427 l2cap_send_cmd(conn, conn->info_ident,
428 L2CAP_INFO_REQ, sizeof(req), &req);
432 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
434 struct l2cap_disconn_req req;
436 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
437 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
438 l2cap_send_cmd(conn, l2cap_get_ident(conn),
439 L2CAP_DISCONN_REQ, sizeof(req), &req);
442 /* ---- L2CAP connections ---- */
443 static void l2cap_conn_start(struct l2cap_conn *conn)
445 struct l2cap_chan_list *l = &conn->chan_list;
446 struct sock *sk;
448 BT_DBG("conn %p", conn);
450 read_lock(&l->lock);
452 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
453 bh_lock_sock(sk);
455 if (sk->sk_type != SOCK_SEQPACKET &&
456 sk->sk_type != SOCK_STREAM) {
457 bh_unlock_sock(sk);
458 continue;
461 if (sk->sk_state == BT_CONNECT) {
462 if (l2cap_check_security(sk)) {
463 struct l2cap_conn_req req;
464 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
465 req.psm = l2cap_pi(sk)->psm;
467 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
469 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
470 L2CAP_CONN_REQ, sizeof(req), &req);
472 } else if (sk->sk_state == BT_CONNECT2) {
473 struct l2cap_conn_rsp rsp;
474 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
477 if (l2cap_check_security(sk)) {
478 if (bt_sk(sk)->defer_setup) {
479 struct sock *parent = bt_sk(sk)->parent;
480 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
481 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
482 parent->sk_data_ready(parent, 0);
484 } else {
485 sk->sk_state = BT_CONFIG;
486 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
487 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
489 } else {
490 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
491 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
494 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
495 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
498 bh_unlock_sock(sk);
501 read_unlock(&l->lock);
504 static void l2cap_conn_ready(struct l2cap_conn *conn)
506 struct l2cap_chan_list *l = &conn->chan_list;
507 struct sock *sk;
509 BT_DBG("conn %p", conn);
511 read_lock(&l->lock);
513 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
514 bh_lock_sock(sk);
516 if (sk->sk_type != SOCK_SEQPACKET &&
517 sk->sk_type != SOCK_STREAM) {
518 l2cap_sock_clear_timer(sk);
519 sk->sk_state = BT_CONNECTED;
520 sk->sk_state_change(sk);
521 } else if (sk->sk_state == BT_CONNECT)
522 l2cap_do_start(sk);
524 bh_unlock_sock(sk);
527 read_unlock(&l->lock);
530 /* Notify sockets that we cannot guaranty reliability anymore */
531 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
533 struct l2cap_chan_list *l = &conn->chan_list;
534 struct sock *sk;
536 BT_DBG("conn %p", conn);
538 read_lock(&l->lock);
540 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
541 if (l2cap_pi(sk)->force_reliable)
542 sk->sk_err = err;
545 read_unlock(&l->lock);
548 static void l2cap_info_timeout(unsigned long arg)
550 struct l2cap_conn *conn = (void *) arg;
552 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
553 conn->info_ident = 0;
555 l2cap_conn_start(conn);
558 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
560 struct l2cap_conn *conn = hcon->l2cap_data;
562 if (conn || status)
563 return conn;
565 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
566 if (!conn)
567 return NULL;
569 hcon->l2cap_data = conn;
570 conn->hcon = hcon;
572 BT_DBG("hcon %p conn %p", hcon, conn);
574 conn->mtu = hcon->hdev->acl_mtu;
575 conn->src = &hcon->hdev->bdaddr;
576 conn->dst = &hcon->dst;
578 conn->feat_mask = 0;
580 spin_lock_init(&conn->lock);
581 rwlock_init(&conn->chan_list.lock);
583 setup_timer(&conn->info_timer, l2cap_info_timeout,
584 (unsigned long) conn);
586 conn->disc_reason = 0x13;
588 return conn;
591 static void l2cap_conn_del(struct hci_conn *hcon, int err)
593 struct l2cap_conn *conn = hcon->l2cap_data;
594 struct sock *sk;
596 if (!conn)
597 return;
599 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
601 kfree_skb(conn->rx_skb);
603 /* Kill channels */
604 while ((sk = conn->chan_list.head)) {
605 bh_lock_sock(sk);
606 l2cap_chan_del(sk, err);
607 bh_unlock_sock(sk);
608 l2cap_sock_kill(sk);
611 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
612 del_timer_sync(&conn->info_timer);
614 hcon->l2cap_data = NULL;
615 kfree(conn);
618 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
620 struct l2cap_chan_list *l = &conn->chan_list;
621 write_lock_bh(&l->lock);
622 __l2cap_chan_add(conn, sk, parent);
623 write_unlock_bh(&l->lock);
626 /* ---- Socket interface ---- */
627 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
629 struct sock *sk;
630 struct hlist_node *node;
631 sk_for_each(sk, node, &l2cap_sk_list.head)
632 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
633 goto found;
634 sk = NULL;
635 found:
636 return sk;
639 /* Find socket with psm and source bdaddr.
640 * Returns closest match.
642 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
644 struct sock *sk = NULL, *sk1 = NULL;
645 struct hlist_node *node;
647 sk_for_each(sk, node, &l2cap_sk_list.head) {
648 if (state && sk->sk_state != state)
649 continue;
651 if (l2cap_pi(sk)->psm == psm) {
652 /* Exact match. */
653 if (!bacmp(&bt_sk(sk)->src, src))
654 break;
656 /* Closest match */
657 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
658 sk1 = sk;
661 return node ? sk : sk1;
664 /* Find socket with given address (psm, src).
665 * Returns locked socket */
666 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
668 struct sock *s;
669 read_lock(&l2cap_sk_list.lock);
670 s = __l2cap_get_sock_by_psm(state, psm, src);
671 if (s)
672 bh_lock_sock(s);
673 read_unlock(&l2cap_sk_list.lock);
674 return s;
677 static void l2cap_sock_destruct(struct sock *sk)
679 BT_DBG("sk %p", sk);
681 skb_queue_purge(&sk->sk_receive_queue);
682 skb_queue_purge(&sk->sk_write_queue);
685 static void l2cap_sock_cleanup_listen(struct sock *parent)
687 struct sock *sk;
689 BT_DBG("parent %p", parent);
691 /* Close not yet accepted channels */
692 while ((sk = bt_accept_dequeue(parent, NULL)))
693 l2cap_sock_close(sk);
695 parent->sk_state = BT_CLOSED;
696 sock_set_flag(parent, SOCK_ZAPPED);
699 /* Kill socket (only if zapped and orphan)
700 * Must be called on unlocked socket.
702 static void l2cap_sock_kill(struct sock *sk)
704 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
705 return;
707 BT_DBG("sk %p state %d", sk, sk->sk_state);
709 /* Kill poor orphan */
710 bt_sock_unlink(&l2cap_sk_list, sk);
711 sock_set_flag(sk, SOCK_DEAD);
712 sock_put(sk);
715 static void __l2cap_sock_close(struct sock *sk, int reason)
717 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
719 switch (sk->sk_state) {
720 case BT_LISTEN:
721 l2cap_sock_cleanup_listen(sk);
722 break;
724 case BT_CONNECTED:
725 case BT_CONFIG:
726 if (sk->sk_type == SOCK_SEQPACKET ||
727 sk->sk_type == SOCK_STREAM) {
728 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
730 sk->sk_state = BT_DISCONN;
731 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
732 l2cap_send_disconn_req(conn, sk);
733 } else
734 l2cap_chan_del(sk, reason);
735 break;
737 case BT_CONNECT2:
738 if (sk->sk_type == SOCK_SEQPACKET ||
739 sk->sk_type == SOCK_STREAM) {
740 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
741 struct l2cap_conn_rsp rsp;
742 __u16 result;
744 if (bt_sk(sk)->defer_setup)
745 result = L2CAP_CR_SEC_BLOCK;
746 else
747 result = L2CAP_CR_BAD_PSM;
749 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
750 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
751 rsp.result = cpu_to_le16(result);
752 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
753 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
754 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
755 } else
756 l2cap_chan_del(sk, reason);
757 break;
759 case BT_CONNECT:
760 case BT_DISCONN:
761 l2cap_chan_del(sk, reason);
762 break;
764 default:
765 sock_set_flag(sk, SOCK_ZAPPED);
766 break;
770 /* Must be called on unlocked socket. */
771 static void l2cap_sock_close(struct sock *sk)
773 l2cap_sock_clear_timer(sk);
774 lock_sock(sk);
775 __l2cap_sock_close(sk, ECONNRESET);
776 release_sock(sk);
777 l2cap_sock_kill(sk);
780 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
782 struct l2cap_pinfo *pi = l2cap_pi(sk);
784 BT_DBG("sk %p", sk);
786 if (parent) {
787 sk->sk_type = parent->sk_type;
788 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
790 pi->imtu = l2cap_pi(parent)->imtu;
791 pi->omtu = l2cap_pi(parent)->omtu;
792 pi->mode = l2cap_pi(parent)->mode;
793 pi->fcs = l2cap_pi(parent)->fcs;
794 pi->max_tx = l2cap_pi(parent)->max_tx;
795 pi->tx_win = l2cap_pi(parent)->tx_win;
796 pi->sec_level = l2cap_pi(parent)->sec_level;
797 pi->role_switch = l2cap_pi(parent)->role_switch;
798 pi->force_reliable = l2cap_pi(parent)->force_reliable;
799 } else {
800 pi->imtu = L2CAP_DEFAULT_MTU;
801 pi->omtu = 0;
802 if (enable_ertm && sk->sk_type == SOCK_STREAM)
803 pi->mode = L2CAP_MODE_ERTM;
804 else
805 pi->mode = L2CAP_MODE_BASIC;
806 pi->max_tx = max_transmit;
807 pi->fcs = L2CAP_FCS_CRC16;
808 pi->tx_win = tx_window;
809 pi->sec_level = BT_SECURITY_LOW;
810 pi->role_switch = 0;
811 pi->force_reliable = 0;
814 /* Default config options */
815 pi->conf_len = 0;
816 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
817 skb_queue_head_init(TX_QUEUE(sk));
818 skb_queue_head_init(SREJ_QUEUE(sk));
819 INIT_LIST_HEAD(SREJ_LIST(sk));
822 static struct proto l2cap_proto = {
823 .name = "L2CAP",
824 .owner = THIS_MODULE,
825 .obj_size = sizeof(struct l2cap_pinfo)
828 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
830 struct sock *sk;
832 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
833 if (!sk)
834 return NULL;
836 sock_init_data(sock, sk);
837 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
839 sk->sk_destruct = l2cap_sock_destruct;
840 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
842 sock_reset_flag(sk, SOCK_ZAPPED);
844 sk->sk_protocol = proto;
845 sk->sk_state = BT_OPEN;
847 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
849 bt_sock_link(&l2cap_sk_list, sk);
850 return sk;
853 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
854 int kern)
856 struct sock *sk;
858 BT_DBG("sock %p", sock);
860 sock->state = SS_UNCONNECTED;
862 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
863 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
864 return -ESOCKTNOSUPPORT;
866 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
867 return -EPERM;
869 sock->ops = &l2cap_sock_ops;
871 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
872 if (!sk)
873 return -ENOMEM;
875 l2cap_sock_init(sk, NULL);
876 return 0;
879 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
881 struct sock *sk = sock->sk;
882 struct sockaddr_l2 la;
883 int len, err = 0;
885 BT_DBG("sk %p", sk);
887 if (!addr || addr->sa_family != AF_BLUETOOTH)
888 return -EINVAL;
890 memset(&la, 0, sizeof(la));
891 len = min_t(unsigned int, sizeof(la), alen);
892 memcpy(&la, addr, len);
894 if (la.l2_cid)
895 return -EINVAL;
897 lock_sock(sk);
899 if (sk->sk_state != BT_OPEN) {
900 err = -EBADFD;
901 goto done;
904 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
905 !capable(CAP_NET_BIND_SERVICE)) {
906 err = -EACCES;
907 goto done;
910 write_lock_bh(&l2cap_sk_list.lock);
912 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
913 err = -EADDRINUSE;
914 } else {
915 /* Save source address */
916 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
917 l2cap_pi(sk)->psm = la.l2_psm;
918 l2cap_pi(sk)->sport = la.l2_psm;
919 sk->sk_state = BT_BOUND;
921 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
922 __le16_to_cpu(la.l2_psm) == 0x0003)
923 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
926 write_unlock_bh(&l2cap_sk_list.lock);
928 done:
929 release_sock(sk);
930 return err;
933 static int l2cap_do_connect(struct sock *sk)
935 bdaddr_t *src = &bt_sk(sk)->src;
936 bdaddr_t *dst = &bt_sk(sk)->dst;
937 struct l2cap_conn *conn;
938 struct hci_conn *hcon;
939 struct hci_dev *hdev;
940 __u8 auth_type;
941 int err;
943 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
944 l2cap_pi(sk)->psm);
946 hdev = hci_get_route(dst, src);
947 if (!hdev)
948 return -EHOSTUNREACH;
950 hci_dev_lock_bh(hdev);
952 err = -ENOMEM;
954 if (sk->sk_type == SOCK_RAW) {
955 switch (l2cap_pi(sk)->sec_level) {
956 case BT_SECURITY_HIGH:
957 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
958 break;
959 case BT_SECURITY_MEDIUM:
960 auth_type = HCI_AT_DEDICATED_BONDING;
961 break;
962 default:
963 auth_type = HCI_AT_NO_BONDING;
964 break;
966 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
967 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
968 auth_type = HCI_AT_NO_BONDING_MITM;
969 else
970 auth_type = HCI_AT_NO_BONDING;
972 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
973 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
974 } else {
975 switch (l2cap_pi(sk)->sec_level) {
976 case BT_SECURITY_HIGH:
977 auth_type = HCI_AT_GENERAL_BONDING_MITM;
978 break;
979 case BT_SECURITY_MEDIUM:
980 auth_type = HCI_AT_GENERAL_BONDING;
981 break;
982 default:
983 auth_type = HCI_AT_NO_BONDING;
984 break;
988 hcon = hci_connect(hdev, ACL_LINK, dst,
989 l2cap_pi(sk)->sec_level, auth_type);
990 if (!hcon)
991 goto done;
993 conn = l2cap_conn_add(hcon, 0);
994 if (!conn) {
995 hci_conn_put(hcon);
996 goto done;
999 err = 0;
1001 /* Update source addr of the socket */
1002 bacpy(src, conn->src);
1004 l2cap_chan_add(conn, sk, NULL);
1006 sk->sk_state = BT_CONNECT;
1007 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1009 if (hcon->state == BT_CONNECTED) {
1010 if (sk->sk_type != SOCK_SEQPACKET &&
1011 sk->sk_type != SOCK_STREAM) {
1012 l2cap_sock_clear_timer(sk);
1013 sk->sk_state = BT_CONNECTED;
1014 } else
1015 l2cap_do_start(sk);
1018 done:
1019 hci_dev_unlock_bh(hdev);
1020 hci_dev_put(hdev);
1021 return err;
1024 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1026 struct sock *sk = sock->sk;
1027 struct sockaddr_l2 la;
1028 int len, err = 0;
1030 BT_DBG("sk %p", sk);
1032 if (!addr || alen < sizeof(addr->sa_family) ||
1033 addr->sa_family != AF_BLUETOOTH)
1034 return -EINVAL;
1036 memset(&la, 0, sizeof(la));
1037 len = min_t(unsigned int, sizeof(la), alen);
1038 memcpy(&la, addr, len);
1040 if (la.l2_cid)
1041 return -EINVAL;
1043 lock_sock(sk);
1045 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1046 && !la.l2_psm) {
1047 err = -EINVAL;
1048 goto done;
1051 switch (l2cap_pi(sk)->mode) {
1052 case L2CAP_MODE_BASIC:
1053 break;
1054 case L2CAP_MODE_ERTM:
1055 case L2CAP_MODE_STREAMING:
1056 if (enable_ertm)
1057 break;
1058 /* fall through */
1059 default:
1060 err = -ENOTSUPP;
1061 goto done;
1064 switch (sk->sk_state) {
1065 case BT_CONNECT:
1066 case BT_CONNECT2:
1067 case BT_CONFIG:
1068 /* Already connecting */
1069 goto wait;
1071 case BT_CONNECTED:
1072 /* Already connected */
1073 goto done;
1075 case BT_OPEN:
1076 case BT_BOUND:
1077 /* Can connect */
1078 break;
1080 default:
1081 err = -EBADFD;
1082 goto done;
1085 /* Set destination address and psm */
1086 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1087 l2cap_pi(sk)->psm = la.l2_psm;
1089 err = l2cap_do_connect(sk);
1090 if (err)
1091 goto done;
1093 wait:
1094 err = bt_sock_wait_state(sk, BT_CONNECTED,
1095 sock_sndtimeo(sk, flags & O_NONBLOCK));
1096 done:
1097 release_sock(sk);
1098 return err;
1101 static int l2cap_sock_listen(struct socket *sock, int backlog)
1103 struct sock *sk = sock->sk;
1104 int err = 0;
1106 BT_DBG("sk %p backlog %d", sk, backlog);
1108 lock_sock(sk);
1110 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1111 || sk->sk_state != BT_BOUND) {
1112 err = -EBADFD;
1113 goto done;
1116 switch (l2cap_pi(sk)->mode) {
1117 case L2CAP_MODE_BASIC:
1118 break;
1119 case L2CAP_MODE_ERTM:
1120 case L2CAP_MODE_STREAMING:
1121 if (enable_ertm)
1122 break;
1123 /* fall through */
1124 default:
1125 err = -ENOTSUPP;
1126 goto done;
1129 if (!l2cap_pi(sk)->psm) {
1130 bdaddr_t *src = &bt_sk(sk)->src;
1131 u16 psm;
1133 err = -EINVAL;
1135 write_lock_bh(&l2cap_sk_list.lock);
1137 for (psm = 0x1001; psm < 0x1100; psm += 2)
1138 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1139 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1140 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1141 err = 0;
1142 break;
1145 write_unlock_bh(&l2cap_sk_list.lock);
1147 if (err < 0)
1148 goto done;
1151 sk->sk_max_ack_backlog = backlog;
1152 sk->sk_ack_backlog = 0;
1153 sk->sk_state = BT_LISTEN;
1155 done:
1156 release_sock(sk);
1157 return err;
1160 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1162 DECLARE_WAITQUEUE(wait, current);
1163 struct sock *sk = sock->sk, *nsk;
1164 long timeo;
1165 int err = 0;
1167 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1169 if (sk->sk_state != BT_LISTEN) {
1170 err = -EBADFD;
1171 goto done;
1174 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1176 BT_DBG("sk %p timeo %ld", sk, timeo);
1178 /* Wait for an incoming connection. (wake-one). */
1179 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1180 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1181 set_current_state(TASK_INTERRUPTIBLE);
1182 if (!timeo) {
1183 err = -EAGAIN;
1184 break;
1187 release_sock(sk);
1188 timeo = schedule_timeout(timeo);
1189 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1191 if (sk->sk_state != BT_LISTEN) {
1192 err = -EBADFD;
1193 break;
1196 if (signal_pending(current)) {
1197 err = sock_intr_errno(timeo);
1198 break;
1201 set_current_state(TASK_RUNNING);
1202 remove_wait_queue(sk_sleep(sk), &wait);
1204 if (err)
1205 goto done;
1207 newsock->state = SS_CONNECTED;
1209 BT_DBG("new socket %p", nsk);
1211 done:
1212 release_sock(sk);
1213 return err;
1216 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1218 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1219 struct sock *sk = sock->sk;
1221 BT_DBG("sock %p, sk %p", sock, sk);
1223 addr->sa_family = AF_BLUETOOTH;
1224 *len = sizeof(struct sockaddr_l2);
1226 if (peer) {
1227 la->l2_psm = l2cap_pi(sk)->psm;
1228 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1229 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1230 } else {
1231 la->l2_psm = l2cap_pi(sk)->sport;
1232 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1233 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1236 return 0;
1239 static void l2cap_monitor_timeout(unsigned long arg)
1241 struct sock *sk = (void *) arg;
1243 bh_lock_sock(sk);
1244 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1245 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1246 bh_unlock_sock(sk);
1247 return;
1250 l2cap_pi(sk)->retry_count++;
1251 __mod_monitor_timer();
1253 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1254 bh_unlock_sock(sk);
1257 static void l2cap_retrans_timeout(unsigned long arg)
1259 struct sock *sk = (void *) arg;
1261 bh_lock_sock(sk);
1262 l2cap_pi(sk)->retry_count = 1;
1263 __mod_monitor_timer();
1265 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1267 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1268 bh_unlock_sock(sk);
1271 static void l2cap_drop_acked_frames(struct sock *sk)
1273 struct sk_buff *skb;
1275 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1276 l2cap_pi(sk)->unacked_frames) {
1277 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1278 break;
1280 skb = skb_dequeue(TX_QUEUE(sk));
1281 kfree_skb(skb);
1283 l2cap_pi(sk)->unacked_frames--;
1286 if (!l2cap_pi(sk)->unacked_frames)
1287 del_timer(&l2cap_pi(sk)->retrans_timer);
1289 return;
1292 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1294 struct l2cap_pinfo *pi = l2cap_pi(sk);
1296 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1298 hci_send_acl(pi->conn->hcon, skb, 0);
1301 static int l2cap_streaming_send(struct sock *sk)
1303 struct sk_buff *skb, *tx_skb;
1304 struct l2cap_pinfo *pi = l2cap_pi(sk);
1305 u16 control, fcs;
1307 while ((skb = sk->sk_send_head)) {
1308 tx_skb = skb_clone(skb, GFP_ATOMIC);
1310 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1311 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1312 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1314 if (pi->fcs == L2CAP_FCS_CRC16) {
1315 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1316 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1319 l2cap_do_send(sk, tx_skb);
1321 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1323 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1324 sk->sk_send_head = NULL;
1325 else
1326 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1328 skb = skb_dequeue(TX_QUEUE(sk));
1329 kfree_skb(skb);
1331 return 0;
1334 static void l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1336 struct l2cap_pinfo *pi = l2cap_pi(sk);
1337 struct sk_buff *skb, *tx_skb;
1338 u16 control, fcs;
1340 skb = skb_peek(TX_QUEUE(sk));
1341 if (!skb)
1342 return;
1344 do {
1345 if (bt_cb(skb)->tx_seq == tx_seq)
1346 break;
1348 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1349 return;
1351 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1353 if (pi->remote_max_tx &&
1354 bt_cb(skb)->retries == pi->remote_max_tx) {
1355 l2cap_send_disconn_req(pi->conn, sk);
1356 return;
1359 tx_skb = skb_clone(skb, GFP_ATOMIC);
1360 bt_cb(skb)->retries++;
1361 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1362 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1363 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1364 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1366 if (pi->fcs == L2CAP_FCS_CRC16) {
1367 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1368 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1371 l2cap_do_send(sk, tx_skb);
1374 static int l2cap_ertm_send(struct sock *sk)
1376 struct sk_buff *skb, *tx_skb;
1377 struct l2cap_pinfo *pi = l2cap_pi(sk);
1378 u16 control, fcs;
1379 int nsent = 0;
1381 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1382 return 0;
1384 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1385 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1387 if (pi->remote_max_tx &&
1388 bt_cb(skb)->retries == pi->remote_max_tx) {
1389 l2cap_send_disconn_req(pi->conn, sk);
1390 break;
1393 tx_skb = skb_clone(skb, GFP_ATOMIC);
1395 bt_cb(skb)->retries++;
1397 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1398 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1399 control |= L2CAP_CTRL_FINAL;
1400 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1402 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1403 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1404 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1407 if (pi->fcs == L2CAP_FCS_CRC16) {
1408 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1409 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1412 l2cap_do_send(sk, tx_skb);
1414 __mod_retrans_timer();
1416 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1417 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1419 pi->unacked_frames++;
1420 pi->frames_sent++;
1422 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1423 sk->sk_send_head = NULL;
1424 else
1425 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1427 nsent++;
1430 return nsent;
1433 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1435 struct sock *sk = (struct sock *)pi;
1436 u16 control = 0;
1438 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1440 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1441 control |= L2CAP_SUPER_RCV_NOT_READY;
1442 l2cap_send_sframe(pi, control);
1443 return;
1444 } else if (l2cap_ertm_send(sk) == 0) {
1445 control |= L2CAP_SUPER_RCV_READY;
1446 l2cap_send_sframe(pi, control);
1450 static void l2cap_send_srejtail(struct sock *sk)
1452 struct srej_list *tail;
1453 u16 control;
1455 control = L2CAP_SUPER_SELECT_REJECT;
1456 control |= L2CAP_CTRL_FINAL;
1458 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1459 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1461 l2cap_send_sframe(l2cap_pi(sk), control);
1464 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1466 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1467 struct sk_buff **frag;
1468 int err, sent = 0;
1470 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1471 return -EFAULT;
1473 sent += count;
1474 len -= count;
1476 /* Continuation fragments (no L2CAP header) */
1477 frag = &skb_shinfo(skb)->frag_list;
1478 while (len) {
1479 count = min_t(unsigned int, conn->mtu, len);
1481 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1482 if (!*frag)
1483 return -EFAULT;
1484 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1485 return -EFAULT;
1487 sent += count;
1488 len -= count;
1490 frag = &(*frag)->next;
1493 return sent;
1496 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1498 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1499 struct sk_buff *skb;
1500 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1501 struct l2cap_hdr *lh;
1503 BT_DBG("sk %p len %d", sk, (int)len);
1505 count = min_t(unsigned int, (conn->mtu - hlen), len);
1506 skb = bt_skb_send_alloc(sk, count + hlen,
1507 msg->msg_flags & MSG_DONTWAIT, &err);
1508 if (!skb)
1509 return ERR_PTR(-ENOMEM);
1511 /* Create L2CAP header */
1512 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1513 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1514 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1515 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1517 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1518 if (unlikely(err < 0)) {
1519 kfree_skb(skb);
1520 return ERR_PTR(err);
1522 return skb;
1525 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1527 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1528 struct sk_buff *skb;
1529 int err, count, hlen = L2CAP_HDR_SIZE;
1530 struct l2cap_hdr *lh;
1532 BT_DBG("sk %p len %d", sk, (int)len);
1534 count = min_t(unsigned int, (conn->mtu - hlen), len);
1535 skb = bt_skb_send_alloc(sk, count + hlen,
1536 msg->msg_flags & MSG_DONTWAIT, &err);
1537 if (!skb)
1538 return ERR_PTR(-ENOMEM);
1540 /* Create L2CAP header */
1541 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1542 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1543 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1545 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1546 if (unlikely(err < 0)) {
1547 kfree_skb(skb);
1548 return ERR_PTR(err);
1550 return skb;
1553 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1555 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1556 struct sk_buff *skb;
1557 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1558 struct l2cap_hdr *lh;
1560 BT_DBG("sk %p len %d", sk, (int)len);
1562 if (!conn)
1563 return ERR_PTR(-ENOTCONN);
1565 if (sdulen)
1566 hlen += 2;
1568 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1569 hlen += 2;
1571 count = min_t(unsigned int, (conn->mtu - hlen), len);
1572 skb = bt_skb_send_alloc(sk, count + hlen,
1573 msg->msg_flags & MSG_DONTWAIT, &err);
1574 if (!skb)
1575 return ERR_PTR(-ENOMEM);
1577 /* Create L2CAP header */
1578 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1579 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1580 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1581 put_unaligned_le16(control, skb_put(skb, 2));
1582 if (sdulen)
1583 put_unaligned_le16(sdulen, skb_put(skb, 2));
1585 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1586 if (unlikely(err < 0)) {
1587 kfree_skb(skb);
1588 return ERR_PTR(err);
1591 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1592 put_unaligned_le16(0, skb_put(skb, 2));
1594 bt_cb(skb)->retries = 0;
1595 return skb;
1598 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1600 struct l2cap_pinfo *pi = l2cap_pi(sk);
1601 struct sk_buff *skb;
1602 struct sk_buff_head sar_queue;
1603 u16 control;
1604 size_t size = 0;
1606 skb_queue_head_init(&sar_queue);
1607 control = L2CAP_SDU_START;
1608 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1609 if (IS_ERR(skb))
1610 return PTR_ERR(skb);
1612 __skb_queue_tail(&sar_queue, skb);
1613 len -= pi->remote_mps;
1614 size += pi->remote_mps;
1616 while (len > 0) {
1617 size_t buflen;
1619 if (len > pi->remote_mps) {
1620 control = L2CAP_SDU_CONTINUE;
1621 buflen = pi->remote_mps;
1622 } else {
1623 control = L2CAP_SDU_END;
1624 buflen = len;
1627 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1628 if (IS_ERR(skb)) {
1629 skb_queue_purge(&sar_queue);
1630 return PTR_ERR(skb);
1633 __skb_queue_tail(&sar_queue, skb);
1634 len -= buflen;
1635 size += buflen;
1637 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1638 if (sk->sk_send_head == NULL)
1639 sk->sk_send_head = sar_queue.next;
1641 return size;
1644 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1646 struct sock *sk = sock->sk;
1647 struct l2cap_pinfo *pi = l2cap_pi(sk);
1648 struct sk_buff *skb;
1649 u16 control;
1650 int err;
1652 BT_DBG("sock %p, sk %p", sock, sk);
1654 err = sock_error(sk);
1655 if (err)
1656 return err;
1658 if (msg->msg_flags & MSG_OOB)
1659 return -EOPNOTSUPP;
1661 lock_sock(sk);
1663 if (sk->sk_state != BT_CONNECTED) {
1664 err = -ENOTCONN;
1665 goto done;
1668 /* Connectionless channel */
1669 if (sk->sk_type == SOCK_DGRAM) {
1670 skb = l2cap_create_connless_pdu(sk, msg, len);
1671 if (IS_ERR(skb)) {
1672 err = PTR_ERR(skb);
1673 } else {
1674 l2cap_do_send(sk, skb);
1675 err = len;
1677 goto done;
1680 switch (pi->mode) {
1681 case L2CAP_MODE_BASIC:
1682 /* Check outgoing MTU */
1683 if (len > pi->omtu) {
1684 err = -EINVAL;
1685 goto done;
1688 /* Create a basic PDU */
1689 skb = l2cap_create_basic_pdu(sk, msg, len);
1690 if (IS_ERR(skb)) {
1691 err = PTR_ERR(skb);
1692 goto done;
1695 l2cap_do_send(sk, skb);
1696 err = len;
1697 break;
1699 case L2CAP_MODE_ERTM:
1700 case L2CAP_MODE_STREAMING:
1701 /* Entire SDU fits into one PDU */
1702 if (len <= pi->remote_mps) {
1703 control = L2CAP_SDU_UNSEGMENTED;
1704 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1705 if (IS_ERR(skb)) {
1706 err = PTR_ERR(skb);
1707 goto done;
1709 __skb_queue_tail(TX_QUEUE(sk), skb);
1710 if (sk->sk_send_head == NULL)
1711 sk->sk_send_head = skb;
1712 } else {
1713 /* Segment SDU into multiples PDUs */
1714 err = l2cap_sar_segment_sdu(sk, msg, len);
1715 if (err < 0)
1716 goto done;
1719 if (pi->mode == L2CAP_MODE_STREAMING)
1720 err = l2cap_streaming_send(sk);
1721 else
1722 err = l2cap_ertm_send(sk);
1724 if (err >= 0)
1725 err = len;
1726 break;
1728 default:
1729 BT_DBG("bad state %1.1x", pi->mode);
1730 err = -EINVAL;
1733 done:
1734 release_sock(sk);
1735 return err;
1738 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1740 struct sock *sk = sock->sk;
1742 lock_sock(sk);
1744 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1745 struct l2cap_conn_rsp rsp;
1747 sk->sk_state = BT_CONFIG;
1749 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1750 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1751 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1752 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1753 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1754 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1756 release_sock(sk);
1757 return 0;
1760 release_sock(sk);
1762 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1765 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1767 struct sock *sk = sock->sk;
1768 struct l2cap_options opts;
1769 int len, err = 0;
1770 u32 opt;
1772 BT_DBG("sk %p", sk);
1774 lock_sock(sk);
1776 switch (optname) {
1777 case L2CAP_OPTIONS:
1778 opts.imtu = l2cap_pi(sk)->imtu;
1779 opts.omtu = l2cap_pi(sk)->omtu;
1780 opts.flush_to = l2cap_pi(sk)->flush_to;
1781 opts.mode = l2cap_pi(sk)->mode;
1782 opts.fcs = l2cap_pi(sk)->fcs;
1783 opts.max_tx = l2cap_pi(sk)->max_tx;
1784 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1786 len = min_t(unsigned int, sizeof(opts), optlen);
1787 if (copy_from_user((char *) &opts, optval, len)) {
1788 err = -EFAULT;
1789 break;
1792 l2cap_pi(sk)->mode = opts.mode;
1793 switch (l2cap_pi(sk)->mode) {
1794 case L2CAP_MODE_BASIC:
1795 break;
1796 case L2CAP_MODE_ERTM:
1797 case L2CAP_MODE_STREAMING:
1798 if (enable_ertm)
1799 break;
1800 /* fall through */
1801 default:
1802 err = -EINVAL;
1803 break;
1806 l2cap_pi(sk)->imtu = opts.imtu;
1807 l2cap_pi(sk)->omtu = opts.omtu;
1808 l2cap_pi(sk)->fcs = opts.fcs;
1809 l2cap_pi(sk)->max_tx = opts.max_tx;
1810 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1811 break;
1813 case L2CAP_LM:
1814 if (get_user(opt, (u32 __user *) optval)) {
1815 err = -EFAULT;
1816 break;
1819 if (opt & L2CAP_LM_AUTH)
1820 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1821 if (opt & L2CAP_LM_ENCRYPT)
1822 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1823 if (opt & L2CAP_LM_SECURE)
1824 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1826 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1827 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1828 break;
1830 default:
1831 err = -ENOPROTOOPT;
1832 break;
1835 release_sock(sk);
1836 return err;
1839 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1841 struct sock *sk = sock->sk;
1842 struct bt_security sec;
1843 int len, err = 0;
1844 u32 opt;
1846 BT_DBG("sk %p", sk);
1848 if (level == SOL_L2CAP)
1849 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1851 if (level != SOL_BLUETOOTH)
1852 return -ENOPROTOOPT;
1854 lock_sock(sk);
1856 switch (optname) {
1857 case BT_SECURITY:
1858 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1859 && sk->sk_type != SOCK_RAW) {
1860 err = -EINVAL;
1861 break;
1864 sec.level = BT_SECURITY_LOW;
1866 len = min_t(unsigned int, sizeof(sec), optlen);
1867 if (copy_from_user((char *) &sec, optval, len)) {
1868 err = -EFAULT;
1869 break;
1872 if (sec.level < BT_SECURITY_LOW ||
1873 sec.level > BT_SECURITY_HIGH) {
1874 err = -EINVAL;
1875 break;
1878 l2cap_pi(sk)->sec_level = sec.level;
1879 break;
1881 case BT_DEFER_SETUP:
1882 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1883 err = -EINVAL;
1884 break;
1887 if (get_user(opt, (u32 __user *) optval)) {
1888 err = -EFAULT;
1889 break;
1892 bt_sk(sk)->defer_setup = opt;
1893 break;
1895 default:
1896 err = -ENOPROTOOPT;
1897 break;
1900 release_sock(sk);
1901 return err;
1904 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1906 struct sock *sk = sock->sk;
1907 struct l2cap_options opts;
1908 struct l2cap_conninfo cinfo;
1909 int len, err = 0;
1910 u32 opt;
1912 BT_DBG("sk %p", sk);
1914 if (get_user(len, optlen))
1915 return -EFAULT;
1917 lock_sock(sk);
1919 switch (optname) {
1920 case L2CAP_OPTIONS:
1921 opts.imtu = l2cap_pi(sk)->imtu;
1922 opts.omtu = l2cap_pi(sk)->omtu;
1923 opts.flush_to = l2cap_pi(sk)->flush_to;
1924 opts.mode = l2cap_pi(sk)->mode;
1925 opts.fcs = l2cap_pi(sk)->fcs;
1926 opts.max_tx = l2cap_pi(sk)->max_tx;
1927 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1929 len = min_t(unsigned int, len, sizeof(opts));
1930 if (copy_to_user(optval, (char *) &opts, len))
1931 err = -EFAULT;
1933 break;
1935 case L2CAP_LM:
1936 switch (l2cap_pi(sk)->sec_level) {
1937 case BT_SECURITY_LOW:
1938 opt = L2CAP_LM_AUTH;
1939 break;
1940 case BT_SECURITY_MEDIUM:
1941 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1942 break;
1943 case BT_SECURITY_HIGH:
1944 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1945 L2CAP_LM_SECURE;
1946 break;
1947 default:
1948 opt = 0;
1949 break;
1952 if (l2cap_pi(sk)->role_switch)
1953 opt |= L2CAP_LM_MASTER;
1955 if (l2cap_pi(sk)->force_reliable)
1956 opt |= L2CAP_LM_RELIABLE;
1958 if (put_user(opt, (u32 __user *) optval))
1959 err = -EFAULT;
1960 break;
1962 case L2CAP_CONNINFO:
1963 if (sk->sk_state != BT_CONNECTED &&
1964 !(sk->sk_state == BT_CONNECT2 &&
1965 bt_sk(sk)->defer_setup)) {
1966 err = -ENOTCONN;
1967 break;
1970 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1971 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1973 len = min_t(unsigned int, len, sizeof(cinfo));
1974 if (copy_to_user(optval, (char *) &cinfo, len))
1975 err = -EFAULT;
1977 break;
1979 default:
1980 err = -ENOPROTOOPT;
1981 break;
1984 release_sock(sk);
1985 return err;
1988 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1990 struct sock *sk = sock->sk;
1991 struct bt_security sec;
1992 int len, err = 0;
1994 BT_DBG("sk %p", sk);
1996 if (level == SOL_L2CAP)
1997 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1999 if (level != SOL_BLUETOOTH)
2000 return -ENOPROTOOPT;
2002 if (get_user(len, optlen))
2003 return -EFAULT;
2005 lock_sock(sk);
2007 switch (optname) {
2008 case BT_SECURITY:
2009 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2010 && sk->sk_type != SOCK_RAW) {
2011 err = -EINVAL;
2012 break;
2015 sec.level = l2cap_pi(sk)->sec_level;
2017 len = min_t(unsigned int, len, sizeof(sec));
2018 if (copy_to_user(optval, (char *) &sec, len))
2019 err = -EFAULT;
2021 break;
2023 case BT_DEFER_SETUP:
2024 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2025 err = -EINVAL;
2026 break;
2029 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2030 err = -EFAULT;
2032 break;
2034 default:
2035 err = -ENOPROTOOPT;
2036 break;
2039 release_sock(sk);
2040 return err;
2043 static int l2cap_sock_shutdown(struct socket *sock, int how)
2045 struct sock *sk = sock->sk;
2046 int err = 0;
2048 BT_DBG("sock %p, sk %p", sock, sk);
2050 if (!sk)
2051 return 0;
2053 lock_sock(sk);
2054 if (!sk->sk_shutdown) {
2055 sk->sk_shutdown = SHUTDOWN_MASK;
2056 l2cap_sock_clear_timer(sk);
2057 __l2cap_sock_close(sk, 0);
2059 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2060 err = bt_sock_wait_state(sk, BT_CLOSED,
2061 sk->sk_lingertime);
2063 release_sock(sk);
2064 return err;
2067 static int l2cap_sock_release(struct socket *sock)
2069 struct sock *sk = sock->sk;
2070 int err;
2072 BT_DBG("sock %p, sk %p", sock, sk);
2074 if (!sk)
2075 return 0;
2077 err = l2cap_sock_shutdown(sock, 2);
2079 sock_orphan(sk);
2080 l2cap_sock_kill(sk);
2081 return err;
2084 static void l2cap_chan_ready(struct sock *sk)
2086 struct sock *parent = bt_sk(sk)->parent;
2088 BT_DBG("sk %p, parent %p", sk, parent);
2090 l2cap_pi(sk)->conf_state = 0;
2091 l2cap_sock_clear_timer(sk);
2093 if (!parent) {
2094 /* Outgoing channel.
2095 * Wake up socket sleeping on connect.
2097 sk->sk_state = BT_CONNECTED;
2098 sk->sk_state_change(sk);
2099 } else {
2100 /* Incoming channel.
2101 * Wake up socket sleeping on accept.
2103 parent->sk_data_ready(parent, 0);
2107 /* Copy frame to all raw sockets on that connection */
2108 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2110 struct l2cap_chan_list *l = &conn->chan_list;
2111 struct sk_buff *nskb;
2112 struct sock *sk;
2114 BT_DBG("conn %p", conn);
2116 read_lock(&l->lock);
2117 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2118 if (sk->sk_type != SOCK_RAW)
2119 continue;
2121 /* Don't send frame to the socket it came from */
2122 if (skb->sk == sk)
2123 continue;
2124 nskb = skb_clone(skb, GFP_ATOMIC);
2125 if (!nskb)
2126 continue;
2128 if (sock_queue_rcv_skb(sk, nskb))
2129 kfree_skb(nskb);
2131 read_unlock(&l->lock);
2134 /* ---- L2CAP signalling commands ---- */
2135 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2136 u8 code, u8 ident, u16 dlen, void *data)
2138 struct sk_buff *skb, **frag;
2139 struct l2cap_cmd_hdr *cmd;
2140 struct l2cap_hdr *lh;
2141 int len, count;
2143 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2144 conn, code, ident, dlen);
2146 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2147 count = min_t(unsigned int, conn->mtu, len);
2149 skb = bt_skb_alloc(count, GFP_ATOMIC);
2150 if (!skb)
2151 return NULL;
2153 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2154 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2155 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2157 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2158 cmd->code = code;
2159 cmd->ident = ident;
2160 cmd->len = cpu_to_le16(dlen);
2162 if (dlen) {
2163 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2164 memcpy(skb_put(skb, count), data, count);
2165 data += count;
2168 len -= skb->len;
2170 /* Continuation fragments (no L2CAP header) */
2171 frag = &skb_shinfo(skb)->frag_list;
2172 while (len) {
2173 count = min_t(unsigned int, conn->mtu, len);
2175 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2176 if (!*frag)
2177 goto fail;
2179 memcpy(skb_put(*frag, count), data, count);
2181 len -= count;
2182 data += count;
2184 frag = &(*frag)->next;
2187 return skb;
2189 fail:
2190 kfree_skb(skb);
2191 return NULL;
2194 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2196 struct l2cap_conf_opt *opt = *ptr;
2197 int len;
2199 len = L2CAP_CONF_OPT_SIZE + opt->len;
2200 *ptr += len;
2202 *type = opt->type;
2203 *olen = opt->len;
2205 switch (opt->len) {
2206 case 1:
2207 *val = *((u8 *) opt->val);
2208 break;
2210 case 2:
2211 *val = __le16_to_cpu(*((__le16 *) opt->val));
2212 break;
2214 case 4:
2215 *val = __le32_to_cpu(*((__le32 *) opt->val));
2216 break;
2218 default:
2219 *val = (unsigned long) opt->val;
2220 break;
2223 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2224 return len;
2227 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2229 struct l2cap_conf_opt *opt = *ptr;
2231 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2233 opt->type = type;
2234 opt->len = len;
2236 switch (len) {
2237 case 1:
2238 *((u8 *) opt->val) = val;
2239 break;
2241 case 2:
2242 *((__le16 *) opt->val) = cpu_to_le16(val);
2243 break;
2245 case 4:
2246 *((__le32 *) opt->val) = cpu_to_le32(val);
2247 break;
2249 default:
2250 memcpy(opt->val, (void *) val, len);
2251 break;
2254 *ptr += L2CAP_CONF_OPT_SIZE + len;
2257 static void l2cap_ack_timeout(unsigned long arg)
2259 struct sock *sk = (void *) arg;
2261 bh_lock_sock(sk);
2262 l2cap_send_ack(l2cap_pi(sk));
2263 bh_unlock_sock(sk);
2266 static inline void l2cap_ertm_init(struct sock *sk)
2268 l2cap_pi(sk)->expected_ack_seq = 0;
2269 l2cap_pi(sk)->unacked_frames = 0;
2270 l2cap_pi(sk)->buffer_seq = 0;
2271 l2cap_pi(sk)->num_acked = 0;
2272 l2cap_pi(sk)->frames_sent = 0;
2274 setup_timer(&l2cap_pi(sk)->retrans_timer,
2275 l2cap_retrans_timeout, (unsigned long) sk);
2276 setup_timer(&l2cap_pi(sk)->monitor_timer,
2277 l2cap_monitor_timeout, (unsigned long) sk);
2278 setup_timer(&l2cap_pi(sk)->ack_timer,
2279 l2cap_ack_timeout, (unsigned long) sk);
2281 __skb_queue_head_init(SREJ_QUEUE(sk));
2284 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2286 u32 local_feat_mask = l2cap_feat_mask;
2287 if (enable_ertm)
2288 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2290 switch (mode) {
2291 case L2CAP_MODE_ERTM:
2292 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2293 case L2CAP_MODE_STREAMING:
2294 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2295 default:
2296 return 0x00;
2300 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2302 switch (mode) {
2303 case L2CAP_MODE_STREAMING:
2304 case L2CAP_MODE_ERTM:
2305 if (l2cap_mode_supported(mode, remote_feat_mask))
2306 return mode;
2307 /* fall through */
2308 default:
2309 return L2CAP_MODE_BASIC;
2313 static int l2cap_build_conf_req(struct sock *sk, void *data)
2315 struct l2cap_pinfo *pi = l2cap_pi(sk);
2316 struct l2cap_conf_req *req = data;
2317 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2318 void *ptr = req->data;
2320 BT_DBG("sk %p", sk);
2322 if (pi->num_conf_req || pi->num_conf_rsp)
2323 goto done;
2325 switch (pi->mode) {
2326 case L2CAP_MODE_STREAMING:
2327 case L2CAP_MODE_ERTM:
2328 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2329 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2330 l2cap_send_disconn_req(pi->conn, sk);
2331 break;
2332 default:
2333 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2334 break;
2337 done:
2338 switch (pi->mode) {
2339 case L2CAP_MODE_BASIC:
2340 if (pi->imtu != L2CAP_DEFAULT_MTU)
2341 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2342 break;
2344 case L2CAP_MODE_ERTM:
2345 rfc.mode = L2CAP_MODE_ERTM;
2346 rfc.txwin_size = pi->tx_win;
2347 rfc.max_transmit = pi->max_tx;
2348 rfc.retrans_timeout = 0;
2349 rfc.monitor_timeout = 0;
2350 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2351 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2352 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2354 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2355 sizeof(rfc), (unsigned long) &rfc);
2357 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2358 break;
2360 if (pi->fcs == L2CAP_FCS_NONE ||
2361 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2362 pi->fcs = L2CAP_FCS_NONE;
2363 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2365 break;
2367 case L2CAP_MODE_STREAMING:
2368 rfc.mode = L2CAP_MODE_STREAMING;
2369 rfc.txwin_size = 0;
2370 rfc.max_transmit = 0;
2371 rfc.retrans_timeout = 0;
2372 rfc.monitor_timeout = 0;
2373 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2374 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2375 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2377 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2378 sizeof(rfc), (unsigned long) &rfc);
2380 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2381 break;
2383 if (pi->fcs == L2CAP_FCS_NONE ||
2384 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2385 pi->fcs = L2CAP_FCS_NONE;
2386 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2388 break;
2391 /* FIXME: Need actual value of the flush timeout */
2392 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2393 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2395 req->dcid = cpu_to_le16(pi->dcid);
2396 req->flags = cpu_to_le16(0);
2398 return ptr - data;
2401 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2403 struct l2cap_pinfo *pi = l2cap_pi(sk);
2404 struct l2cap_conf_rsp *rsp = data;
2405 void *ptr = rsp->data;
2406 void *req = pi->conf_req;
2407 int len = pi->conf_len;
2408 int type, hint, olen;
2409 unsigned long val;
2410 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2411 u16 mtu = L2CAP_DEFAULT_MTU;
2412 u16 result = L2CAP_CONF_SUCCESS;
2414 BT_DBG("sk %p", sk);
2416 while (len >= L2CAP_CONF_OPT_SIZE) {
2417 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2419 hint = type & L2CAP_CONF_HINT;
2420 type &= L2CAP_CONF_MASK;
2422 switch (type) {
2423 case L2CAP_CONF_MTU:
2424 mtu = val;
2425 break;
2427 case L2CAP_CONF_FLUSH_TO:
2428 pi->flush_to = val;
2429 break;
2431 case L2CAP_CONF_QOS:
2432 break;
2434 case L2CAP_CONF_RFC:
2435 if (olen == sizeof(rfc))
2436 memcpy(&rfc, (void *) val, olen);
2437 break;
2439 case L2CAP_CONF_FCS:
2440 if (val == L2CAP_FCS_NONE)
2441 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2443 break;
2445 default:
2446 if (hint)
2447 break;
2449 result = L2CAP_CONF_UNKNOWN;
2450 *((u8 *) ptr++) = type;
2451 break;
2455 if (pi->num_conf_rsp || pi->num_conf_req)
2456 goto done;
2458 switch (pi->mode) {
2459 case L2CAP_MODE_STREAMING:
2460 case L2CAP_MODE_ERTM:
2461 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2462 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2463 return -ECONNREFUSED;
2464 break;
2465 default:
2466 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2467 break;
2470 done:
2471 if (pi->mode != rfc.mode) {
2472 result = L2CAP_CONF_UNACCEPT;
2473 rfc.mode = pi->mode;
2475 if (pi->num_conf_rsp == 1)
2476 return -ECONNREFUSED;
2478 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2479 sizeof(rfc), (unsigned long) &rfc);
2483 if (result == L2CAP_CONF_SUCCESS) {
2484 /* Configure output options and let the other side know
2485 * which ones we don't like. */
2487 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2488 result = L2CAP_CONF_UNACCEPT;
2489 else {
2490 pi->omtu = mtu;
2491 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2493 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2495 switch (rfc.mode) {
2496 case L2CAP_MODE_BASIC:
2497 pi->fcs = L2CAP_FCS_NONE;
2498 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2499 break;
2501 case L2CAP_MODE_ERTM:
2502 pi->remote_tx_win = rfc.txwin_size;
2503 pi->remote_max_tx = rfc.max_transmit;
2504 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2505 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2507 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2509 rfc.retrans_timeout =
2510 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2511 rfc.monitor_timeout =
2512 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2514 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2517 sizeof(rfc), (unsigned long) &rfc);
2519 break;
2521 case L2CAP_MODE_STREAMING:
2522 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2523 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2525 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2527 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2529 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2530 sizeof(rfc), (unsigned long) &rfc);
2532 break;
2534 default:
2535 result = L2CAP_CONF_UNACCEPT;
2537 memset(&rfc, 0, sizeof(rfc));
2538 rfc.mode = pi->mode;
2541 if (result == L2CAP_CONF_SUCCESS)
2542 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2544 rsp->scid = cpu_to_le16(pi->dcid);
2545 rsp->result = cpu_to_le16(result);
2546 rsp->flags = cpu_to_le16(0x0000);
2548 return ptr - data;
2551 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2553 struct l2cap_pinfo *pi = l2cap_pi(sk);
2554 struct l2cap_conf_req *req = data;
2555 void *ptr = req->data;
2556 int type, olen;
2557 unsigned long val;
2558 struct l2cap_conf_rfc rfc;
2560 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2562 while (len >= L2CAP_CONF_OPT_SIZE) {
2563 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2565 switch (type) {
2566 case L2CAP_CONF_MTU:
2567 if (val < L2CAP_DEFAULT_MIN_MTU) {
2568 *result = L2CAP_CONF_UNACCEPT;
2569 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2570 } else
2571 pi->omtu = val;
2572 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2573 break;
2575 case L2CAP_CONF_FLUSH_TO:
2576 pi->flush_to = val;
2577 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2578 2, pi->flush_to);
2579 break;
2581 case L2CAP_CONF_RFC:
2582 if (olen == sizeof(rfc))
2583 memcpy(&rfc, (void *)val, olen);
2585 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2586 rfc.mode != pi->mode)
2587 return -ECONNREFUSED;
2589 pi->mode = rfc.mode;
2590 pi->fcs = 0;
2592 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2593 sizeof(rfc), (unsigned long) &rfc);
2594 break;
2598 if (*result == L2CAP_CONF_SUCCESS) {
2599 switch (rfc.mode) {
2600 case L2CAP_MODE_ERTM:
2601 pi->remote_tx_win = rfc.txwin_size;
2602 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2603 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2604 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2605 break;
2606 case L2CAP_MODE_STREAMING:
2607 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2611 req->dcid = cpu_to_le16(pi->dcid);
2612 req->flags = cpu_to_le16(0x0000);
2614 return ptr - data;
2617 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2619 struct l2cap_conf_rsp *rsp = data;
2620 void *ptr = rsp->data;
2622 BT_DBG("sk %p", sk);
2624 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2625 rsp->result = cpu_to_le16(result);
2626 rsp->flags = cpu_to_le16(flags);
2628 return ptr - data;
2631 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2633 struct l2cap_pinfo *pi = l2cap_pi(sk);
2634 int type, olen;
2635 unsigned long val;
2636 struct l2cap_conf_rfc rfc;
2638 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2640 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2641 return;
2643 while (len >= L2CAP_CONF_OPT_SIZE) {
2644 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2646 switch (type) {
2647 case L2CAP_CONF_RFC:
2648 if (olen == sizeof(rfc))
2649 memcpy(&rfc, (void *)val, olen);
2650 goto done;
2654 done:
2655 switch (rfc.mode) {
2656 case L2CAP_MODE_ERTM:
2657 pi->remote_tx_win = rfc.txwin_size;
2658 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2659 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2660 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2661 break;
2662 case L2CAP_MODE_STREAMING:
2663 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2667 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2669 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2671 if (rej->reason != 0x0000)
2672 return 0;
2674 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2675 cmd->ident == conn->info_ident) {
2676 del_timer(&conn->info_timer);
2678 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2679 conn->info_ident = 0;
2681 l2cap_conn_start(conn);
2684 return 0;
2687 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2689 struct l2cap_chan_list *list = &conn->chan_list;
2690 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2691 struct l2cap_conn_rsp rsp;
2692 struct sock *sk, *parent;
2693 int result, status = L2CAP_CS_NO_INFO;
2695 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2696 __le16 psm = req->psm;
2698 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2700 /* Check if we have socket listening on psm */
2701 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2702 if (!parent) {
2703 result = L2CAP_CR_BAD_PSM;
2704 goto sendresp;
2707 /* Check if the ACL is secure enough (if not SDP) */
2708 if (psm != cpu_to_le16(0x0001) &&
2709 !hci_conn_check_link_mode(conn->hcon)) {
2710 conn->disc_reason = 0x05;
2711 result = L2CAP_CR_SEC_BLOCK;
2712 goto response;
2715 result = L2CAP_CR_NO_MEM;
2717 /* Check for backlog size */
2718 if (sk_acceptq_is_full(parent)) {
2719 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2720 goto response;
2723 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2724 if (!sk)
2725 goto response;
2727 write_lock_bh(&list->lock);
2729 /* Check if we already have channel with that dcid */
2730 if (__l2cap_get_chan_by_dcid(list, scid)) {
2731 write_unlock_bh(&list->lock);
2732 sock_set_flag(sk, SOCK_ZAPPED);
2733 l2cap_sock_kill(sk);
2734 goto response;
2737 hci_conn_hold(conn->hcon);
2739 l2cap_sock_init(sk, parent);
2740 bacpy(&bt_sk(sk)->src, conn->src);
2741 bacpy(&bt_sk(sk)->dst, conn->dst);
2742 l2cap_pi(sk)->psm = psm;
2743 l2cap_pi(sk)->dcid = scid;
2745 __l2cap_chan_add(conn, sk, parent);
2746 dcid = l2cap_pi(sk)->scid;
2748 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2750 l2cap_pi(sk)->ident = cmd->ident;
2752 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2753 if (l2cap_check_security(sk)) {
2754 if (bt_sk(sk)->defer_setup) {
2755 sk->sk_state = BT_CONNECT2;
2756 result = L2CAP_CR_PEND;
2757 status = L2CAP_CS_AUTHOR_PEND;
2758 parent->sk_data_ready(parent, 0);
2759 } else {
2760 sk->sk_state = BT_CONFIG;
2761 result = L2CAP_CR_SUCCESS;
2762 status = L2CAP_CS_NO_INFO;
2764 } else {
2765 sk->sk_state = BT_CONNECT2;
2766 result = L2CAP_CR_PEND;
2767 status = L2CAP_CS_AUTHEN_PEND;
2769 } else {
2770 sk->sk_state = BT_CONNECT2;
2771 result = L2CAP_CR_PEND;
2772 status = L2CAP_CS_NO_INFO;
2775 write_unlock_bh(&list->lock);
2777 response:
2778 bh_unlock_sock(parent);
2780 sendresp:
2781 rsp.scid = cpu_to_le16(scid);
2782 rsp.dcid = cpu_to_le16(dcid);
2783 rsp.result = cpu_to_le16(result);
2784 rsp.status = cpu_to_le16(status);
2785 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2787 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2788 struct l2cap_info_req info;
2789 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2791 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2792 conn->info_ident = l2cap_get_ident(conn);
2794 mod_timer(&conn->info_timer, jiffies +
2795 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2797 l2cap_send_cmd(conn, conn->info_ident,
2798 L2CAP_INFO_REQ, sizeof(info), &info);
2801 return 0;
2804 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2806 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2807 u16 scid, dcid, result, status;
2808 struct sock *sk;
2809 u8 req[128];
2811 scid = __le16_to_cpu(rsp->scid);
2812 dcid = __le16_to_cpu(rsp->dcid);
2813 result = __le16_to_cpu(rsp->result);
2814 status = __le16_to_cpu(rsp->status);
2816 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2818 if (scid) {
2819 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2820 if (!sk)
2821 return 0;
2822 } else {
2823 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2824 if (!sk)
2825 return 0;
2828 switch (result) {
2829 case L2CAP_CR_SUCCESS:
2830 sk->sk_state = BT_CONFIG;
2831 l2cap_pi(sk)->ident = 0;
2832 l2cap_pi(sk)->dcid = dcid;
2833 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2835 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2837 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2838 l2cap_build_conf_req(sk, req), req);
2839 l2cap_pi(sk)->num_conf_req++;
2840 break;
2842 case L2CAP_CR_PEND:
2843 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2844 break;
2846 default:
2847 l2cap_chan_del(sk, ECONNREFUSED);
2848 break;
2851 bh_unlock_sock(sk);
2852 return 0;
2855 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2857 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2858 u16 dcid, flags;
2859 u8 rsp[64];
2860 struct sock *sk;
2861 int len;
2863 dcid = __le16_to_cpu(req->dcid);
2864 flags = __le16_to_cpu(req->flags);
2866 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2868 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2869 if (!sk)
2870 return -ENOENT;
2872 if (sk->sk_state == BT_DISCONN)
2873 goto unlock;
2875 /* Reject if config buffer is too small. */
2876 len = cmd_len - sizeof(*req);
2877 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2878 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2879 l2cap_build_conf_rsp(sk, rsp,
2880 L2CAP_CONF_REJECT, flags), rsp);
2881 goto unlock;
2884 /* Store config. */
2885 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2886 l2cap_pi(sk)->conf_len += len;
2888 if (flags & 0x0001) {
2889 /* Incomplete config. Send empty response. */
2890 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2891 l2cap_build_conf_rsp(sk, rsp,
2892 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2893 goto unlock;
2896 /* Complete config. */
2897 len = l2cap_parse_conf_req(sk, rsp);
2898 if (len < 0) {
2899 l2cap_send_disconn_req(conn, sk);
2900 goto unlock;
2903 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2904 l2cap_pi(sk)->num_conf_rsp++;
2906 /* Reset config buffer. */
2907 l2cap_pi(sk)->conf_len = 0;
2909 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2910 goto unlock;
2912 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2913 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2914 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2915 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2917 sk->sk_state = BT_CONNECTED;
2919 l2cap_pi(sk)->next_tx_seq = 0;
2920 l2cap_pi(sk)->expected_tx_seq = 0;
2921 __skb_queue_head_init(TX_QUEUE(sk));
2922 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2923 l2cap_ertm_init(sk);
2925 l2cap_chan_ready(sk);
2926 goto unlock;
2929 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2930 u8 buf[64];
2931 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2932 l2cap_build_conf_req(sk, buf), buf);
2933 l2cap_pi(sk)->num_conf_req++;
2936 unlock:
2937 bh_unlock_sock(sk);
2938 return 0;
2941 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2943 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2944 u16 scid, flags, result;
2945 struct sock *sk;
2946 int len = cmd->len - sizeof(*rsp);
2948 scid = __le16_to_cpu(rsp->scid);
2949 flags = __le16_to_cpu(rsp->flags);
2950 result = __le16_to_cpu(rsp->result);
2952 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2953 scid, flags, result);
2955 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2956 if (!sk)
2957 return 0;
2959 switch (result) {
2960 case L2CAP_CONF_SUCCESS:
2961 l2cap_conf_rfc_get(sk, rsp->data, len);
2962 break;
2964 case L2CAP_CONF_UNACCEPT:
2965 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2966 char req[64];
2968 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2969 l2cap_send_disconn_req(conn, sk);
2970 goto done;
2973 /* throw out any old stored conf requests */
2974 result = L2CAP_CONF_SUCCESS;
2975 len = l2cap_parse_conf_rsp(sk, rsp->data,
2976 len, req, &result);
2977 if (len < 0) {
2978 l2cap_send_disconn_req(conn, sk);
2979 goto done;
2982 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2983 L2CAP_CONF_REQ, len, req);
2984 l2cap_pi(sk)->num_conf_req++;
2985 if (result != L2CAP_CONF_SUCCESS)
2986 goto done;
2987 break;
2990 default:
2991 sk->sk_state = BT_DISCONN;
2992 sk->sk_err = ECONNRESET;
2993 l2cap_sock_set_timer(sk, HZ * 5);
2994 l2cap_send_disconn_req(conn, sk);
2995 goto done;
2998 if (flags & 0x01)
2999 goto done;
3001 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3003 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3004 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3005 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3006 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3008 sk->sk_state = BT_CONNECTED;
3009 l2cap_pi(sk)->next_tx_seq = 0;
3010 l2cap_pi(sk)->expected_tx_seq = 0;
3011 __skb_queue_head_init(TX_QUEUE(sk));
3012 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3013 l2cap_ertm_init(sk);
3015 l2cap_chan_ready(sk);
3018 done:
3019 bh_unlock_sock(sk);
3020 return 0;
3023 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3025 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3026 struct l2cap_disconn_rsp rsp;
3027 u16 dcid, scid;
3028 struct sock *sk;
3030 scid = __le16_to_cpu(req->scid);
3031 dcid = __le16_to_cpu(req->dcid);
3033 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3035 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3036 if (!sk)
3037 return 0;
3039 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3040 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3041 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3043 sk->sk_shutdown = SHUTDOWN_MASK;
3045 skb_queue_purge(TX_QUEUE(sk));
3047 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3048 skb_queue_purge(SREJ_QUEUE(sk));
3049 del_timer(&l2cap_pi(sk)->retrans_timer);
3050 del_timer(&l2cap_pi(sk)->monitor_timer);
3051 del_timer(&l2cap_pi(sk)->ack_timer);
3054 l2cap_chan_del(sk, ECONNRESET);
3055 bh_unlock_sock(sk);
3057 l2cap_sock_kill(sk);
3058 return 0;
3061 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3063 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3064 u16 dcid, scid;
3065 struct sock *sk;
3067 scid = __le16_to_cpu(rsp->scid);
3068 dcid = __le16_to_cpu(rsp->dcid);
3070 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3072 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3073 if (!sk)
3074 return 0;
3076 skb_queue_purge(TX_QUEUE(sk));
3078 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3079 skb_queue_purge(SREJ_QUEUE(sk));
3080 del_timer(&l2cap_pi(sk)->retrans_timer);
3081 del_timer(&l2cap_pi(sk)->monitor_timer);
3082 del_timer(&l2cap_pi(sk)->ack_timer);
3085 l2cap_chan_del(sk, 0);
3086 bh_unlock_sock(sk);
3088 l2cap_sock_kill(sk);
3089 return 0;
3092 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3094 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3095 u16 type;
3097 type = __le16_to_cpu(req->type);
3099 BT_DBG("type 0x%4.4x", type);
3101 if (type == L2CAP_IT_FEAT_MASK) {
3102 u8 buf[8];
3103 u32 feat_mask = l2cap_feat_mask;
3104 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3105 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3106 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3107 if (enable_ertm)
3108 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3109 | L2CAP_FEAT_FCS;
3110 put_unaligned_le32(feat_mask, rsp->data);
3111 l2cap_send_cmd(conn, cmd->ident,
3112 L2CAP_INFO_RSP, sizeof(buf), buf);
3113 } else if (type == L2CAP_IT_FIXED_CHAN) {
3114 u8 buf[12];
3115 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3116 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3117 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3118 memcpy(buf + 4, l2cap_fixed_chan, 8);
3119 l2cap_send_cmd(conn, cmd->ident,
3120 L2CAP_INFO_RSP, sizeof(buf), buf);
3121 } else {
3122 struct l2cap_info_rsp rsp;
3123 rsp.type = cpu_to_le16(type);
3124 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3125 l2cap_send_cmd(conn, cmd->ident,
3126 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3129 return 0;
3132 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3134 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3135 u16 type, result;
3137 type = __le16_to_cpu(rsp->type);
3138 result = __le16_to_cpu(rsp->result);
3140 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3142 del_timer(&conn->info_timer);
3144 if (type == L2CAP_IT_FEAT_MASK) {
3145 conn->feat_mask = get_unaligned_le32(rsp->data);
3147 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3148 struct l2cap_info_req req;
3149 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3151 conn->info_ident = l2cap_get_ident(conn);
3153 l2cap_send_cmd(conn, conn->info_ident,
3154 L2CAP_INFO_REQ, sizeof(req), &req);
3155 } else {
3156 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3157 conn->info_ident = 0;
3159 l2cap_conn_start(conn);
3161 } else if (type == L2CAP_IT_FIXED_CHAN) {
3162 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3163 conn->info_ident = 0;
3165 l2cap_conn_start(conn);
3168 return 0;
3171 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3173 u8 *data = skb->data;
3174 int len = skb->len;
3175 struct l2cap_cmd_hdr cmd;
3176 int err = 0;
3178 l2cap_raw_recv(conn, skb);
3180 while (len >= L2CAP_CMD_HDR_SIZE) {
3181 u16 cmd_len;
3182 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3183 data += L2CAP_CMD_HDR_SIZE;
3184 len -= L2CAP_CMD_HDR_SIZE;
3186 cmd_len = le16_to_cpu(cmd.len);
3188 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3190 if (cmd_len > len || !cmd.ident) {
3191 BT_DBG("corrupted command");
3192 break;
3195 switch (cmd.code) {
3196 case L2CAP_COMMAND_REJ:
3197 l2cap_command_rej(conn, &cmd, data);
3198 break;
3200 case L2CAP_CONN_REQ:
3201 err = l2cap_connect_req(conn, &cmd, data);
3202 break;
3204 case L2CAP_CONN_RSP:
3205 err = l2cap_connect_rsp(conn, &cmd, data);
3206 break;
3208 case L2CAP_CONF_REQ:
3209 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3210 break;
3212 case L2CAP_CONF_RSP:
3213 err = l2cap_config_rsp(conn, &cmd, data);
3214 break;
3216 case L2CAP_DISCONN_REQ:
3217 err = l2cap_disconnect_req(conn, &cmd, data);
3218 break;
3220 case L2CAP_DISCONN_RSP:
3221 err = l2cap_disconnect_rsp(conn, &cmd, data);
3222 break;
3224 case L2CAP_ECHO_REQ:
3225 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3226 break;
3228 case L2CAP_ECHO_RSP:
3229 break;
3231 case L2CAP_INFO_REQ:
3232 err = l2cap_information_req(conn, &cmd, data);
3233 break;
3235 case L2CAP_INFO_RSP:
3236 err = l2cap_information_rsp(conn, &cmd, data);
3237 break;
3239 default:
3240 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3241 err = -EINVAL;
3242 break;
3245 if (err) {
3246 struct l2cap_cmd_rej rej;
3247 BT_DBG("error %d", err);
3249 /* FIXME: Map err to a valid reason */
3250 rej.reason = cpu_to_le16(0);
3251 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3254 data += cmd_len;
3255 len -= cmd_len;
3258 kfree_skb(skb);
3261 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3263 u16 our_fcs, rcv_fcs;
3264 int hdr_size = L2CAP_HDR_SIZE + 2;
3266 if (pi->fcs == L2CAP_FCS_CRC16) {
3267 skb_trim(skb, skb->len - 2);
3268 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3269 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3271 if (our_fcs != rcv_fcs)
3272 return -EINVAL;
3274 return 0;
3277 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3279 struct l2cap_pinfo *pi = l2cap_pi(sk);
3280 u16 control = 0;
3282 pi->frames_sent = 0;
3283 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3285 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3287 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3288 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3289 l2cap_send_sframe(pi, control);
3290 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3293 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3294 __mod_retrans_timer();
3296 l2cap_ertm_send(sk);
3298 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3299 pi->frames_sent == 0) {
3300 control |= L2CAP_SUPER_RCV_READY;
3301 l2cap_send_sframe(pi, control);
3305 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3307 struct sk_buff *next_skb;
3309 bt_cb(skb)->tx_seq = tx_seq;
3310 bt_cb(skb)->sar = sar;
3312 next_skb = skb_peek(SREJ_QUEUE(sk));
3313 if (!next_skb) {
3314 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3315 return 0;
3318 do {
3319 if (bt_cb(next_skb)->tx_seq == tx_seq)
3320 return -EINVAL;
3322 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3323 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3324 return 0;
3327 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3328 break;
3330 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3332 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3334 return 0;
3337 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3339 struct l2cap_pinfo *pi = l2cap_pi(sk);
3340 struct sk_buff *_skb;
3341 int err = 0;
3343 switch (control & L2CAP_CTRL_SAR) {
3344 case L2CAP_SDU_UNSEGMENTED:
3345 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3346 goto drop;
3348 err = sock_queue_rcv_skb(sk, skb);
3349 if (!err)
3350 return err;
3352 break;
3354 case L2CAP_SDU_START:
3355 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3356 goto drop;
3358 pi->sdu_len = get_unaligned_le16(skb->data);
3359 skb_pull(skb, 2);
3361 if (pi->sdu_len > pi->imtu)
3362 goto disconnect;
3364 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3365 if (!pi->sdu) {
3366 err = -ENOMEM;
3367 break;
3370 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3372 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3373 pi->partial_sdu_len = skb->len;
3374 break;
3376 case L2CAP_SDU_CONTINUE:
3377 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3378 goto disconnect;
3380 if (!pi->sdu)
3381 goto disconnect;
3383 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3385 pi->partial_sdu_len += skb->len;
3386 if (pi->partial_sdu_len > pi->sdu_len)
3387 goto drop;
3389 break;
3391 case L2CAP_SDU_END:
3392 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3393 goto disconnect;
3395 if (!pi->sdu)
3396 goto disconnect;
3398 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3400 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3401 pi->partial_sdu_len += skb->len;
3403 if (pi->partial_sdu_len > pi->imtu)
3404 goto drop;
3406 if (pi->partial_sdu_len != pi->sdu_len)
3407 goto drop;
3409 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3410 err = sock_queue_rcv_skb(sk, _skb);
3411 if (err < 0)
3412 kfree_skb(_skb);
3414 kfree_skb(pi->sdu);
3415 break;
3418 kfree_skb(skb);
3419 return err;
3421 drop:
3422 kfree_skb(pi->sdu);
3423 pi->sdu = NULL;
3425 disconnect:
3426 l2cap_send_disconn_req(pi->conn, sk);
3427 kfree_skb(skb);
3428 return 0;
3431 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3433 struct l2cap_pinfo *pi = l2cap_pi(sk);
3434 struct sk_buff *_skb;
3435 int err = -EINVAL;
3438 * TODO: We have to notify the userland if some data is lost with the
3439 * Streaming Mode.
3442 switch (control & L2CAP_CTRL_SAR) {
3443 case L2CAP_SDU_UNSEGMENTED:
3444 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3445 kfree_skb(pi->sdu);
3446 break;
3449 err = sock_queue_rcv_skb(sk, skb);
3450 if (!err)
3451 return 0;
3453 break;
3455 case L2CAP_SDU_START:
3456 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3457 kfree_skb(pi->sdu);
3458 break;
3461 pi->sdu_len = get_unaligned_le16(skb->data);
3462 skb_pull(skb, 2);
3464 if (pi->sdu_len > pi->imtu) {
3465 err = -EMSGSIZE;
3466 break;
3469 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3470 if (!pi->sdu) {
3471 err = -ENOMEM;
3472 break;
3475 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3477 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3478 pi->partial_sdu_len = skb->len;
3479 err = 0;
3480 break;
3482 case L2CAP_SDU_CONTINUE:
3483 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3484 break;
3486 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3488 pi->partial_sdu_len += skb->len;
3489 if (pi->partial_sdu_len > pi->sdu_len)
3490 kfree_skb(pi->sdu);
3491 else
3492 err = 0;
3494 break;
3496 case L2CAP_SDU_END:
3497 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3498 break;
3500 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3502 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3503 pi->partial_sdu_len += skb->len;
3505 if (pi->partial_sdu_len > pi->imtu)
3506 goto drop;
3508 if (pi->partial_sdu_len == pi->sdu_len) {
3509 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3510 err = sock_queue_rcv_skb(sk, _skb);
3511 if (err < 0)
3512 kfree_skb(_skb);
3514 err = 0;
3516 drop:
3517 kfree_skb(pi->sdu);
3518 break;
3521 kfree_skb(skb);
3522 return err;
3525 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3527 struct sk_buff *skb;
3528 u16 control;
3530 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3531 if (bt_cb(skb)->tx_seq != tx_seq)
3532 break;
3534 skb = skb_dequeue(SREJ_QUEUE(sk));
3535 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3536 l2cap_ertm_reassembly_sdu(sk, skb, control);
3537 l2cap_pi(sk)->buffer_seq_srej =
3538 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3539 tx_seq++;
3543 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3545 struct l2cap_pinfo *pi = l2cap_pi(sk);
3546 struct srej_list *l, *tmp;
3547 u16 control;
3549 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3550 if (l->tx_seq == tx_seq) {
3551 list_del(&l->list);
3552 kfree(l);
3553 return;
3555 control = L2CAP_SUPER_SELECT_REJECT;
3556 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3557 l2cap_send_sframe(pi, control);
3558 list_del(&l->list);
3559 list_add_tail(&l->list, SREJ_LIST(sk));
3563 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3565 struct l2cap_pinfo *pi = l2cap_pi(sk);
3566 struct srej_list *new;
3567 u16 control;
3569 while (tx_seq != pi->expected_tx_seq) {
3570 control = L2CAP_SUPER_SELECT_REJECT;
3571 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3572 l2cap_send_sframe(pi, control);
3574 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3575 new->tx_seq = pi->expected_tx_seq++;
3576 list_add_tail(&new->list, SREJ_LIST(sk));
3578 pi->expected_tx_seq++;
3581 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3583 struct l2cap_pinfo *pi = l2cap_pi(sk);
3584 u8 tx_seq = __get_txseq(rx_control);
3585 u8 req_seq = __get_reqseq(rx_control);
3586 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3587 u8 tx_seq_offset, expected_tx_seq_offset;
3588 int num_to_ack = (pi->tx_win/6) + 1;
3589 int err = 0;
3591 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3593 if (L2CAP_CTRL_FINAL & rx_control &&
3594 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3595 del_timer(&pi->monitor_timer);
3596 if (pi->unacked_frames > 0)
3597 __mod_retrans_timer();
3598 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3601 pi->expected_ack_seq = req_seq;
3602 l2cap_drop_acked_frames(sk);
3604 if (tx_seq == pi->expected_tx_seq)
3605 goto expected;
3607 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3608 if (tx_seq_offset < 0)
3609 tx_seq_offset += 64;
3611 /* invalid tx_seq */
3612 if (tx_seq_offset >= pi->tx_win) {
3613 l2cap_send_disconn_req(pi->conn, sk);
3614 goto drop;
3617 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3618 struct srej_list *first;
3620 first = list_first_entry(SREJ_LIST(sk),
3621 struct srej_list, list);
3622 if (tx_seq == first->tx_seq) {
3623 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3624 l2cap_check_srej_gap(sk, tx_seq);
3626 list_del(&first->list);
3627 kfree(first);
3629 if (list_empty(SREJ_LIST(sk))) {
3630 pi->buffer_seq = pi->buffer_seq_srej;
3631 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3632 l2cap_send_ack(pi);
3634 } else {
3635 struct srej_list *l;
3637 /* duplicated tx_seq */
3638 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3639 goto drop;
3641 list_for_each_entry(l, SREJ_LIST(sk), list) {
3642 if (l->tx_seq == tx_seq) {
3643 l2cap_resend_srejframe(sk, tx_seq);
3644 return 0;
3647 l2cap_send_srejframe(sk, tx_seq);
3649 } else {
3650 expected_tx_seq_offset =
3651 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3652 if (expected_tx_seq_offset < 0)
3653 expected_tx_seq_offset += 64;
3655 /* duplicated tx_seq */
3656 if (tx_seq_offset < expected_tx_seq_offset)
3657 goto drop;
3659 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3661 INIT_LIST_HEAD(SREJ_LIST(sk));
3662 pi->buffer_seq_srej = pi->buffer_seq;
3664 __skb_queue_head_init(SREJ_QUEUE(sk));
3665 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3667 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3669 l2cap_send_srejframe(sk, tx_seq);
3671 return 0;
3673 expected:
3674 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3676 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3677 bt_cb(skb)->tx_seq = tx_seq;
3678 bt_cb(skb)->sar = sar;
3679 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3680 return 0;
3683 if (rx_control & L2CAP_CTRL_FINAL) {
3684 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3685 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3686 else {
3687 if (!skb_queue_empty(TX_QUEUE(sk)))
3688 sk->sk_send_head = TX_QUEUE(sk)->next;
3689 pi->next_tx_seq = pi->expected_ack_seq;
3690 l2cap_ertm_send(sk);
3694 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3696 err = l2cap_ertm_reassembly_sdu(sk, skb, rx_control);
3697 if (err < 0)
3698 return err;
3700 __mod_ack_timer();
3702 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3703 if (pi->num_acked == num_to_ack - 1)
3704 l2cap_send_ack(pi);
3706 return 0;
3708 drop:
3709 kfree_skb(skb);
3710 return 0;
3713 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3715 struct l2cap_pinfo *pi = l2cap_pi(sk);
3717 pi->expected_ack_seq = __get_reqseq(rx_control);
3718 l2cap_drop_acked_frames(sk);
3720 if (rx_control & L2CAP_CTRL_POLL) {
3721 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3722 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3723 (pi->unacked_frames > 0))
3724 __mod_retrans_timer();
3726 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3727 l2cap_send_srejtail(sk);
3728 } else {
3729 l2cap_send_i_or_rr_or_rnr(sk);
3730 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3733 } else if (rx_control & L2CAP_CTRL_FINAL) {
3734 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3736 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3737 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3738 else {
3739 if (!skb_queue_empty(TX_QUEUE(sk)))
3740 sk->sk_send_head = TX_QUEUE(sk)->next;
3741 pi->next_tx_seq = pi->expected_ack_seq;
3742 l2cap_ertm_send(sk);
3745 } else {
3746 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3747 (pi->unacked_frames > 0))
3748 __mod_retrans_timer();
3750 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3751 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3752 l2cap_send_ack(pi);
3753 else
3754 l2cap_ertm_send(sk);
3758 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3760 struct l2cap_pinfo *pi = l2cap_pi(sk);
3761 u8 tx_seq = __get_reqseq(rx_control);
3763 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3765 pi->expected_ack_seq = tx_seq;
3766 l2cap_drop_acked_frames(sk);
3768 if (rx_control & L2CAP_CTRL_FINAL) {
3769 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3770 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3771 else {
3772 if (!skb_queue_empty(TX_QUEUE(sk)))
3773 sk->sk_send_head = TX_QUEUE(sk)->next;
3774 pi->next_tx_seq = pi->expected_ack_seq;
3775 l2cap_ertm_send(sk);
3777 } else {
3778 if (!skb_queue_empty(TX_QUEUE(sk)))
3779 sk->sk_send_head = TX_QUEUE(sk)->next;
3780 pi->next_tx_seq = pi->expected_ack_seq;
3781 l2cap_ertm_send(sk);
3783 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3784 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3787 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3789 struct l2cap_pinfo *pi = l2cap_pi(sk);
3790 u8 tx_seq = __get_reqseq(rx_control);
3792 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3794 if (rx_control & L2CAP_CTRL_POLL) {
3795 pi->expected_ack_seq = tx_seq;
3796 l2cap_drop_acked_frames(sk);
3797 l2cap_retransmit_frame(sk, tx_seq);
3798 l2cap_ertm_send(sk);
3799 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3800 pi->srej_save_reqseq = tx_seq;
3801 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3803 } else if (rx_control & L2CAP_CTRL_FINAL) {
3804 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3805 pi->srej_save_reqseq == tx_seq)
3806 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3807 else
3808 l2cap_retransmit_frame(sk, tx_seq);
3809 } else {
3810 l2cap_retransmit_frame(sk, tx_seq);
3811 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3812 pi->srej_save_reqseq = tx_seq;
3813 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3818 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3820 struct l2cap_pinfo *pi = l2cap_pi(sk);
3821 u8 tx_seq = __get_reqseq(rx_control);
3823 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3824 pi->expected_ack_seq = tx_seq;
3825 l2cap_drop_acked_frames(sk);
3827 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3828 del_timer(&pi->retrans_timer);
3829 if (rx_control & L2CAP_CTRL_POLL)
3830 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3831 return;
3834 if (rx_control & L2CAP_CTRL_POLL)
3835 l2cap_send_srejtail(sk);
3836 else
3837 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3840 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3842 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3844 if (L2CAP_CTRL_FINAL & rx_control &&
3845 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3846 del_timer(&l2cap_pi(sk)->monitor_timer);
3847 if (l2cap_pi(sk)->unacked_frames > 0)
3848 __mod_retrans_timer();
3849 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3852 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3853 case L2CAP_SUPER_RCV_READY:
3854 l2cap_data_channel_rrframe(sk, rx_control);
3855 break;
3857 case L2CAP_SUPER_REJECT:
3858 l2cap_data_channel_rejframe(sk, rx_control);
3859 break;
3861 case L2CAP_SUPER_SELECT_REJECT:
3862 l2cap_data_channel_srejframe(sk, rx_control);
3863 break;
3865 case L2CAP_SUPER_RCV_NOT_READY:
3866 l2cap_data_channel_rnrframe(sk, rx_control);
3867 break;
3870 kfree_skb(skb);
3871 return 0;
3874 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3876 struct sock *sk;
3877 struct l2cap_pinfo *pi;
3878 u16 control, len;
3879 u8 tx_seq, req_seq, next_tx_seq_offset, req_seq_offset;
3881 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3882 if (!sk) {
3883 BT_DBG("unknown cid 0x%4.4x", cid);
3884 goto drop;
3887 pi = l2cap_pi(sk);
3889 BT_DBG("sk %p, len %d", sk, skb->len);
3891 if (sk->sk_state != BT_CONNECTED)
3892 goto drop;
3894 switch (pi->mode) {
3895 case L2CAP_MODE_BASIC:
3896 /* If socket recv buffers overflows we drop data here
3897 * which is *bad* because L2CAP has to be reliable.
3898 * But we don't have any other choice. L2CAP doesn't
3899 * provide flow control mechanism. */
3901 if (pi->imtu < skb->len)
3902 goto drop;
3904 if (!sock_queue_rcv_skb(sk, skb))
3905 goto done;
3906 break;
3908 case L2CAP_MODE_ERTM:
3909 control = get_unaligned_le16(skb->data);
3910 skb_pull(skb, 2);
3911 len = skb->len;
3913 if (__is_sar_start(control))
3914 len -= 2;
3916 if (pi->fcs == L2CAP_FCS_CRC16)
3917 len -= 2;
3920 * We can just drop the corrupted I-frame here.
3921 * Receiver will miss it and start proper recovery
3922 * procedures and ask retransmission.
3924 if (len > pi->mps) {
3925 l2cap_send_disconn_req(pi->conn, sk);
3926 goto drop;
3929 if (l2cap_check_fcs(pi, skb))
3930 goto drop;
3932 req_seq = __get_reqseq(control);
3933 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3934 if (req_seq_offset < 0)
3935 req_seq_offset += 64;
3937 next_tx_seq_offset =
3938 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3939 if (next_tx_seq_offset < 0)
3940 next_tx_seq_offset += 64;
3942 /* check for invalid req-seq */
3943 if (req_seq_offset > next_tx_seq_offset) {
3944 l2cap_send_disconn_req(pi->conn, sk);
3945 goto drop;
3948 if (__is_iframe(control)) {
3949 if (len < 4) {
3950 l2cap_send_disconn_req(pi->conn, sk);
3951 goto drop;
3954 l2cap_data_channel_iframe(sk, control, skb);
3955 } else {
3956 if (len != 0) {
3957 l2cap_send_disconn_req(pi->conn, sk);
3958 goto drop;
3961 l2cap_data_channel_sframe(sk, control, skb);
3964 goto done;
3966 case L2CAP_MODE_STREAMING:
3967 control = get_unaligned_le16(skb->data);
3968 skb_pull(skb, 2);
3969 len = skb->len;
3971 if (__is_sar_start(control))
3972 len -= 2;
3974 if (pi->fcs == L2CAP_FCS_CRC16)
3975 len -= 2;
3977 if (len > pi->mps || len < 4 || __is_sframe(control))
3978 goto drop;
3980 if (l2cap_check_fcs(pi, skb))
3981 goto drop;
3983 tx_seq = __get_txseq(control);
3985 if (pi->expected_tx_seq == tx_seq)
3986 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3987 else
3988 pi->expected_tx_seq = (tx_seq + 1) % 64;
3990 l2cap_streaming_reassembly_sdu(sk, skb, control);
3992 goto done;
3994 default:
3995 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3996 break;
3999 drop:
4000 kfree_skb(skb);
4002 done:
4003 if (sk)
4004 bh_unlock_sock(sk);
4006 return 0;
4009 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4011 struct sock *sk;
4013 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4014 if (!sk)
4015 goto drop;
4017 BT_DBG("sk %p, len %d", sk, skb->len);
4019 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4020 goto drop;
4022 if (l2cap_pi(sk)->imtu < skb->len)
4023 goto drop;
4025 if (!sock_queue_rcv_skb(sk, skb))
4026 goto done;
4028 drop:
4029 kfree_skb(skb);
4031 done:
4032 if (sk)
4033 bh_unlock_sock(sk);
4034 return 0;
4037 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4039 struct l2cap_hdr *lh = (void *) skb->data;
4040 u16 cid, len;
4041 __le16 psm;
4043 skb_pull(skb, L2CAP_HDR_SIZE);
4044 cid = __le16_to_cpu(lh->cid);
4045 len = __le16_to_cpu(lh->len);
4047 if (len != skb->len) {
4048 kfree_skb(skb);
4049 return;
4052 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4054 switch (cid) {
4055 case L2CAP_CID_SIGNALING:
4056 l2cap_sig_channel(conn, skb);
4057 break;
4059 case L2CAP_CID_CONN_LESS:
4060 psm = get_unaligned_le16(skb->data);
4061 skb_pull(skb, 2);
4062 l2cap_conless_channel(conn, psm, skb);
4063 break;
4065 default:
4066 l2cap_data_channel(conn, cid, skb);
4067 break;
4071 /* ---- L2CAP interface with lower layer (HCI) ---- */
4073 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4075 int exact = 0, lm1 = 0, lm2 = 0;
4076 register struct sock *sk;
4077 struct hlist_node *node;
4079 if (type != ACL_LINK)
4080 return 0;
4082 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4084 /* Find listening sockets and check their link_mode */
4085 read_lock(&l2cap_sk_list.lock);
4086 sk_for_each(sk, node, &l2cap_sk_list.head) {
4087 if (sk->sk_state != BT_LISTEN)
4088 continue;
4090 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4091 lm1 |= HCI_LM_ACCEPT;
4092 if (l2cap_pi(sk)->role_switch)
4093 lm1 |= HCI_LM_MASTER;
4094 exact++;
4095 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4096 lm2 |= HCI_LM_ACCEPT;
4097 if (l2cap_pi(sk)->role_switch)
4098 lm2 |= HCI_LM_MASTER;
4101 read_unlock(&l2cap_sk_list.lock);
4103 return exact ? lm1 : lm2;
4106 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4108 struct l2cap_conn *conn;
4110 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4112 if (hcon->type != ACL_LINK)
4113 return 0;
4115 if (!status) {
4116 conn = l2cap_conn_add(hcon, status);
4117 if (conn)
4118 l2cap_conn_ready(conn);
4119 } else
4120 l2cap_conn_del(hcon, bt_err(status));
4122 return 0;
4125 static int l2cap_disconn_ind(struct hci_conn *hcon)
4127 struct l2cap_conn *conn = hcon->l2cap_data;
4129 BT_DBG("hcon %p", hcon);
4131 if (hcon->type != ACL_LINK || !conn)
4132 return 0x13;
4134 return conn->disc_reason;
4137 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4139 BT_DBG("hcon %p reason %d", hcon, reason);
4141 if (hcon->type != ACL_LINK)
4142 return 0;
4144 l2cap_conn_del(hcon, bt_err(reason));
4146 return 0;
4149 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4151 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4152 return;
4154 if (encrypt == 0x00) {
4155 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4156 l2cap_sock_clear_timer(sk);
4157 l2cap_sock_set_timer(sk, HZ * 5);
4158 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4159 __l2cap_sock_close(sk, ECONNREFUSED);
4160 } else {
4161 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4162 l2cap_sock_clear_timer(sk);
4166 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4168 struct l2cap_chan_list *l;
4169 struct l2cap_conn *conn = hcon->l2cap_data;
4170 struct sock *sk;
4172 if (!conn)
4173 return 0;
4175 l = &conn->chan_list;
4177 BT_DBG("conn %p", conn);
4179 read_lock(&l->lock);
4181 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4182 bh_lock_sock(sk);
4184 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4185 bh_unlock_sock(sk);
4186 continue;
4189 if (!status && (sk->sk_state == BT_CONNECTED ||
4190 sk->sk_state == BT_CONFIG)) {
4191 l2cap_check_encryption(sk, encrypt);
4192 bh_unlock_sock(sk);
4193 continue;
4196 if (sk->sk_state == BT_CONNECT) {
4197 if (!status) {
4198 struct l2cap_conn_req req;
4199 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4200 req.psm = l2cap_pi(sk)->psm;
4202 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4204 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4205 L2CAP_CONN_REQ, sizeof(req), &req);
4206 } else {
4207 l2cap_sock_clear_timer(sk);
4208 l2cap_sock_set_timer(sk, HZ / 10);
4210 } else if (sk->sk_state == BT_CONNECT2) {
4211 struct l2cap_conn_rsp rsp;
4212 __u16 result;
4214 if (!status) {
4215 sk->sk_state = BT_CONFIG;
4216 result = L2CAP_CR_SUCCESS;
4217 } else {
4218 sk->sk_state = BT_DISCONN;
4219 l2cap_sock_set_timer(sk, HZ / 10);
4220 result = L2CAP_CR_SEC_BLOCK;
4223 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4224 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4225 rsp.result = cpu_to_le16(result);
4226 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4227 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4228 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4231 bh_unlock_sock(sk);
4234 read_unlock(&l->lock);
4236 return 0;
4239 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4241 struct l2cap_conn *conn = hcon->l2cap_data;
4243 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4244 goto drop;
4246 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4248 if (flags & ACL_START) {
4249 struct l2cap_hdr *hdr;
4250 int len;
4252 if (conn->rx_len) {
4253 BT_ERR("Unexpected start frame (len %d)", skb->len);
4254 kfree_skb(conn->rx_skb);
4255 conn->rx_skb = NULL;
4256 conn->rx_len = 0;
4257 l2cap_conn_unreliable(conn, ECOMM);
4260 if (skb->len < 2) {
4261 BT_ERR("Frame is too short (len %d)", skb->len);
4262 l2cap_conn_unreliable(conn, ECOMM);
4263 goto drop;
4266 hdr = (struct l2cap_hdr *) skb->data;
4267 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4269 if (len == skb->len) {
4270 /* Complete frame received */
4271 l2cap_recv_frame(conn, skb);
4272 return 0;
4275 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4277 if (skb->len > len) {
4278 BT_ERR("Frame is too long (len %d, expected len %d)",
4279 skb->len, len);
4280 l2cap_conn_unreliable(conn, ECOMM);
4281 goto drop;
4284 /* Allocate skb for the complete frame (with header) */
4285 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4286 if (!conn->rx_skb)
4287 goto drop;
4289 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4290 skb->len);
4291 conn->rx_len = len - skb->len;
4292 } else {
4293 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4295 if (!conn->rx_len) {
4296 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4297 l2cap_conn_unreliable(conn, ECOMM);
4298 goto drop;
4301 if (skb->len > conn->rx_len) {
4302 BT_ERR("Fragment is too long (len %d, expected %d)",
4303 skb->len, conn->rx_len);
4304 kfree_skb(conn->rx_skb);
4305 conn->rx_skb = NULL;
4306 conn->rx_len = 0;
4307 l2cap_conn_unreliable(conn, ECOMM);
4308 goto drop;
4311 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4312 skb->len);
4313 conn->rx_len -= skb->len;
4315 if (!conn->rx_len) {
4316 /* Complete frame received */
4317 l2cap_recv_frame(conn, conn->rx_skb);
4318 conn->rx_skb = NULL;
4322 drop:
4323 kfree_skb(skb);
4324 return 0;
4327 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4329 struct sock *sk;
4330 struct hlist_node *node;
4332 read_lock_bh(&l2cap_sk_list.lock);
4334 sk_for_each(sk, node, &l2cap_sk_list.head) {
4335 struct l2cap_pinfo *pi = l2cap_pi(sk);
4337 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4338 batostr(&bt_sk(sk)->src),
4339 batostr(&bt_sk(sk)->dst),
4340 sk->sk_state, __le16_to_cpu(pi->psm),
4341 pi->scid, pi->dcid,
4342 pi->imtu, pi->omtu, pi->sec_level);
4345 read_unlock_bh(&l2cap_sk_list.lock);
4347 return 0;
4350 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4352 return single_open(file, l2cap_debugfs_show, inode->i_private);
4355 static const struct file_operations l2cap_debugfs_fops = {
4356 .open = l2cap_debugfs_open,
4357 .read = seq_read,
4358 .llseek = seq_lseek,
4359 .release = single_release,
4362 static struct dentry *l2cap_debugfs;
4364 static const struct proto_ops l2cap_sock_ops = {
4365 .family = PF_BLUETOOTH,
4366 .owner = THIS_MODULE,
4367 .release = l2cap_sock_release,
4368 .bind = l2cap_sock_bind,
4369 .connect = l2cap_sock_connect,
4370 .listen = l2cap_sock_listen,
4371 .accept = l2cap_sock_accept,
4372 .getname = l2cap_sock_getname,
4373 .sendmsg = l2cap_sock_sendmsg,
4374 .recvmsg = l2cap_sock_recvmsg,
4375 .poll = bt_sock_poll,
4376 .ioctl = bt_sock_ioctl,
4377 .mmap = sock_no_mmap,
4378 .socketpair = sock_no_socketpair,
4379 .shutdown = l2cap_sock_shutdown,
4380 .setsockopt = l2cap_sock_setsockopt,
4381 .getsockopt = l2cap_sock_getsockopt
4384 static const struct net_proto_family l2cap_sock_family_ops = {
4385 .family = PF_BLUETOOTH,
4386 .owner = THIS_MODULE,
4387 .create = l2cap_sock_create,
4390 static struct hci_proto l2cap_hci_proto = {
4391 .name = "L2CAP",
4392 .id = HCI_PROTO_L2CAP,
4393 .connect_ind = l2cap_connect_ind,
4394 .connect_cfm = l2cap_connect_cfm,
4395 .disconn_ind = l2cap_disconn_ind,
4396 .disconn_cfm = l2cap_disconn_cfm,
4397 .security_cfm = l2cap_security_cfm,
4398 .recv_acldata = l2cap_recv_acldata
4401 static int __init l2cap_init(void)
4403 int err;
4405 err = proto_register(&l2cap_proto, 0);
4406 if (err < 0)
4407 return err;
4409 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4410 if (err < 0) {
4411 BT_ERR("L2CAP socket registration failed");
4412 goto error;
4415 err = hci_register_proto(&l2cap_hci_proto);
4416 if (err < 0) {
4417 BT_ERR("L2CAP protocol registration failed");
4418 bt_sock_unregister(BTPROTO_L2CAP);
4419 goto error;
4422 if (bt_debugfs) {
4423 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4424 bt_debugfs, NULL, &l2cap_debugfs_fops);
4425 if (!l2cap_debugfs)
4426 BT_ERR("Failed to create L2CAP debug file");
4429 BT_INFO("L2CAP ver %s", VERSION);
4430 BT_INFO("L2CAP socket layer initialized");
4432 return 0;
4434 error:
4435 proto_unregister(&l2cap_proto);
4436 return err;
4439 static void __exit l2cap_exit(void)
4441 debugfs_remove(l2cap_debugfs);
4443 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4444 BT_ERR("L2CAP socket unregistration failed");
4446 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4447 BT_ERR("L2CAP protocol unregistration failed");
4449 proto_unregister(&l2cap_proto);
4452 void l2cap_load(void)
4454 /* Dummy function to trigger automatic L2CAP module loading by
4455 * other modules that use L2CAP sockets but don't use any other
4456 * symbols from it. */
4457 return;
4459 EXPORT_SYMBOL(l2cap_load);
4461 module_init(l2cap_init);
4462 module_exit(l2cap_exit);
4464 module_param(enable_ertm, bool, 0644);
4465 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4467 module_param(max_transmit, uint, 0644);
4468 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4470 module_param(tx_window, uint, 0644);
4471 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4473 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4474 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4475 MODULE_VERSION(VERSION);
4476 MODULE_LICENSE("GPL");
4477 MODULE_ALIAS("bt-proto-0");