Bluetooth: Enable Streaming Mode for L2CAP
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / l2cap.c
blob7f835e761822797d86f363bf641603acc7af38ea
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <net/sock.h>
46 #include <asm/system.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.14"
55 static int enable_ertm = 0;
57 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
58 static u8 l2cap_fixed_chan[8] = { 0x02, };
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
77 int reason;
79 BT_DBG("sock %p state %d", sk, sk->sk_state);
81 bh_lock_sock(sk);
83 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
84 reason = ECONNREFUSED;
85 else if (sk->sk_state == BT_CONNECT &&
86 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
87 reason = ECONNREFUSED;
88 else
89 reason = ETIMEDOUT;
91 __l2cap_sock_close(sk, reason);
93 bh_unlock_sock(sk);
95 l2cap_sock_kill(sk);
96 sock_put(sk);
99 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
101 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
102 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
105 static void l2cap_sock_clear_timer(struct sock *sk)
107 BT_DBG("sock %p state %d", sk, sk->sk_state);
108 sk_stop_timer(sk, &sk->sk_timer);
111 /* ---- L2CAP channels ---- */
112 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
114 struct sock *s;
115 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
116 if (l2cap_pi(s)->dcid == cid)
117 break;
119 return s;
122 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
124 struct sock *s;
125 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
126 if (l2cap_pi(s)->scid == cid)
127 break;
129 return s;
132 /* Find channel with given SCID.
133 * Returns locked socket */
134 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136 struct sock *s;
137 read_lock(&l->lock);
138 s = __l2cap_get_chan_by_scid(l, cid);
139 if (s)
140 bh_lock_sock(s);
141 read_unlock(&l->lock);
142 return s;
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
147 struct sock *s;
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
150 break;
152 return s;
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
157 struct sock *s;
158 read_lock(&l->lock);
159 s = __l2cap_get_chan_by_ident(l, ident);
160 if (s)
161 bh_lock_sock(s);
162 read_unlock(&l->lock);
163 return s;
166 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(l, cid))
172 return cid;
175 return 0;
178 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
180 sock_hold(sk);
182 if (l->head)
183 l2cap_pi(l->head)->prev_c = sk;
185 l2cap_pi(sk)->next_c = l->head;
186 l2cap_pi(sk)->prev_c = NULL;
187 l->head = sk;
190 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
192 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
194 write_lock_bh(&l->lock);
195 if (sk == l->head)
196 l->head = next;
198 if (next)
199 l2cap_pi(next)->prev_c = prev;
200 if (prev)
201 l2cap_pi(prev)->next_c = next;
202 write_unlock_bh(&l->lock);
204 __sock_put(sk);
207 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
209 struct l2cap_chan_list *l = &conn->chan_list;
211 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
212 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
214 conn->disc_reason = 0x13;
216 l2cap_pi(sk)->conn = conn;
218 if (sk->sk_type == SOCK_SEQPACKET) {
219 /* Alloc CID for connection-oriented socket */
220 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
221 } else if (sk->sk_type == SOCK_DGRAM) {
222 /* Connectionless socket */
223 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
224 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
226 } else {
227 /* Raw socket can send/recv signalling messages only */
228 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
229 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 __l2cap_chan_link(l, sk);
235 if (parent)
236 bt_accept_enqueue(parent, sk);
239 /* Delete channel.
240 * Must be called on the locked socket. */
241 static void l2cap_chan_del(struct sock *sk, int err)
243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
244 struct sock *parent = bt_sk(sk)->parent;
246 l2cap_sock_clear_timer(sk);
248 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
250 if (conn) {
251 /* Unlink from channel list */
252 l2cap_chan_unlink(&conn->chan_list, sk);
253 l2cap_pi(sk)->conn = NULL;
254 hci_conn_put(conn->hcon);
257 sk->sk_state = BT_CLOSED;
258 sock_set_flag(sk, SOCK_ZAPPED);
260 if (err)
261 sk->sk_err = err;
263 if (parent) {
264 bt_accept_unlink(sk);
265 parent->sk_data_ready(parent, 0);
266 } else
267 sk->sk_state_change(sk);
270 /* Service level security */
271 static inline int l2cap_check_security(struct sock *sk)
273 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
274 __u8 auth_type;
276 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
278 auth_type = HCI_AT_NO_BONDING_MITM;
279 else
280 auth_type = HCI_AT_NO_BONDING;
282 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
283 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
284 } else {
285 switch (l2cap_pi(sk)->sec_level) {
286 case BT_SECURITY_HIGH:
287 auth_type = HCI_AT_GENERAL_BONDING_MITM;
288 break;
289 case BT_SECURITY_MEDIUM:
290 auth_type = HCI_AT_GENERAL_BONDING;
291 break;
292 default:
293 auth_type = HCI_AT_NO_BONDING;
294 break;
298 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
299 auth_type);
302 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
304 u8 id;
306 /* Get next available identificator.
307 * 1 - 128 are used by kernel.
308 * 129 - 199 are reserved.
309 * 200 - 254 are used by utilities like l2ping, etc.
312 spin_lock_bh(&conn->lock);
314 if (++conn->tx_ident > 128)
315 conn->tx_ident = 1;
317 id = conn->tx_ident;
319 spin_unlock_bh(&conn->lock);
321 return id;
324 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
326 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328 BT_DBG("code 0x%2.2x", code);
330 if (!skb)
331 return -ENOMEM;
333 return hci_send_acl(conn->hcon, skb, 0);
336 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
338 struct sk_buff *skb;
339 struct l2cap_hdr *lh;
340 struct l2cap_conn *conn = pi->conn;
341 int count;
343 BT_DBG("pi %p, control 0x%2.2x", pi, control);
345 count = min_t(unsigned int, conn->mtu, L2CAP_HDR_SIZE + 2);
346 control |= L2CAP_CTRL_FRAME_TYPE;
348 skb = bt_skb_alloc(count, GFP_ATOMIC);
349 if (!skb)
350 return -ENOMEM;
352 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
353 lh->len = cpu_to_le16(2);
354 lh->cid = cpu_to_le16(pi->dcid);
355 put_unaligned_le16(control, skb_put(skb, 2));
357 return hci_send_acl(pi->conn->hcon, skb, 0);
360 static void l2cap_do_start(struct sock *sk)
362 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
364 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
365 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
366 return;
368 if (l2cap_check_security(sk)) {
369 struct l2cap_conn_req req;
370 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
371 req.psm = l2cap_pi(sk)->psm;
373 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
375 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
376 L2CAP_CONN_REQ, sizeof(req), &req);
378 } else {
379 struct l2cap_info_req req;
380 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
382 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
383 conn->info_ident = l2cap_get_ident(conn);
385 mod_timer(&conn->info_timer, jiffies +
386 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
388 l2cap_send_cmd(conn, conn->info_ident,
389 L2CAP_INFO_REQ, sizeof(req), &req);
393 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
395 struct l2cap_disconn_req req;
397 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
398 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
399 l2cap_send_cmd(conn, l2cap_get_ident(conn),
400 L2CAP_DISCONN_REQ, sizeof(req), &req);
403 /* ---- L2CAP connections ---- */
404 static void l2cap_conn_start(struct l2cap_conn *conn)
406 struct l2cap_chan_list *l = &conn->chan_list;
407 struct sock *sk;
409 BT_DBG("conn %p", conn);
411 read_lock(&l->lock);
413 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
414 bh_lock_sock(sk);
416 if (sk->sk_type != SOCK_SEQPACKET) {
417 bh_unlock_sock(sk);
418 continue;
421 if (sk->sk_state == BT_CONNECT) {
422 if (l2cap_check_security(sk)) {
423 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
425 req.psm = l2cap_pi(sk)->psm;
427 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
429 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
430 L2CAP_CONN_REQ, sizeof(req), &req);
432 } else if (sk->sk_state == BT_CONNECT2) {
433 struct l2cap_conn_rsp rsp;
434 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
435 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
437 if (l2cap_check_security(sk)) {
438 if (bt_sk(sk)->defer_setup) {
439 struct sock *parent = bt_sk(sk)->parent;
440 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
441 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
442 parent->sk_data_ready(parent, 0);
444 } else {
445 sk->sk_state = BT_CONFIG;
446 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
447 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
449 } else {
450 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
451 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
454 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
455 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
458 bh_unlock_sock(sk);
461 read_unlock(&l->lock);
464 static void l2cap_conn_ready(struct l2cap_conn *conn)
466 struct l2cap_chan_list *l = &conn->chan_list;
467 struct sock *sk;
469 BT_DBG("conn %p", conn);
471 read_lock(&l->lock);
473 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
474 bh_lock_sock(sk);
476 if (sk->sk_type != SOCK_SEQPACKET) {
477 l2cap_sock_clear_timer(sk);
478 sk->sk_state = BT_CONNECTED;
479 sk->sk_state_change(sk);
480 } else if (sk->sk_state == BT_CONNECT)
481 l2cap_do_start(sk);
483 bh_unlock_sock(sk);
486 read_unlock(&l->lock);
489 /* Notify sockets that we cannot guaranty reliability anymore */
490 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
492 struct l2cap_chan_list *l = &conn->chan_list;
493 struct sock *sk;
495 BT_DBG("conn %p", conn);
497 read_lock(&l->lock);
499 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
500 if (l2cap_pi(sk)->force_reliable)
501 sk->sk_err = err;
504 read_unlock(&l->lock);
507 static void l2cap_info_timeout(unsigned long arg)
509 struct l2cap_conn *conn = (void *) arg;
511 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
512 conn->info_ident = 0;
514 l2cap_conn_start(conn);
517 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
519 struct l2cap_conn *conn = hcon->l2cap_data;
521 if (conn || status)
522 return conn;
524 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
525 if (!conn)
526 return NULL;
528 hcon->l2cap_data = conn;
529 conn->hcon = hcon;
531 BT_DBG("hcon %p conn %p", hcon, conn);
533 conn->mtu = hcon->hdev->acl_mtu;
534 conn->src = &hcon->hdev->bdaddr;
535 conn->dst = &hcon->dst;
537 conn->feat_mask = 0;
539 setup_timer(&conn->info_timer, l2cap_info_timeout,
540 (unsigned long) conn);
542 spin_lock_init(&conn->lock);
543 rwlock_init(&conn->chan_list.lock);
545 conn->disc_reason = 0x13;
547 return conn;
550 static void l2cap_conn_del(struct hci_conn *hcon, int err)
552 struct l2cap_conn *conn = hcon->l2cap_data;
553 struct sock *sk;
555 if (!conn)
556 return;
558 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
560 kfree_skb(conn->rx_skb);
562 /* Kill channels */
563 while ((sk = conn->chan_list.head)) {
564 bh_lock_sock(sk);
565 l2cap_chan_del(sk, err);
566 bh_unlock_sock(sk);
567 l2cap_sock_kill(sk);
570 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
571 del_timer_sync(&conn->info_timer);
573 hcon->l2cap_data = NULL;
574 kfree(conn);
577 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
579 struct l2cap_chan_list *l = &conn->chan_list;
580 write_lock_bh(&l->lock);
581 __l2cap_chan_add(conn, sk, parent);
582 write_unlock_bh(&l->lock);
585 /* ---- Socket interface ---- */
586 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
588 struct sock *sk;
589 struct hlist_node *node;
590 sk_for_each(sk, node, &l2cap_sk_list.head)
591 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
592 goto found;
593 sk = NULL;
594 found:
595 return sk;
598 /* Find socket with psm and source bdaddr.
599 * Returns closest match.
601 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
603 struct sock *sk = NULL, *sk1 = NULL;
604 struct hlist_node *node;
606 sk_for_each(sk, node, &l2cap_sk_list.head) {
607 if (state && sk->sk_state != state)
608 continue;
610 if (l2cap_pi(sk)->psm == psm) {
611 /* Exact match. */
612 if (!bacmp(&bt_sk(sk)->src, src))
613 break;
615 /* Closest match */
616 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
617 sk1 = sk;
620 return node ? sk : sk1;
623 /* Find socket with given address (psm, src).
624 * Returns locked socket */
625 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
627 struct sock *s;
628 read_lock(&l2cap_sk_list.lock);
629 s = __l2cap_get_sock_by_psm(state, psm, src);
630 if (s)
631 bh_lock_sock(s);
632 read_unlock(&l2cap_sk_list.lock);
633 return s;
636 static void l2cap_sock_destruct(struct sock *sk)
638 BT_DBG("sk %p", sk);
640 skb_queue_purge(&sk->sk_receive_queue);
641 skb_queue_purge(&sk->sk_write_queue);
644 static void l2cap_sock_cleanup_listen(struct sock *parent)
646 struct sock *sk;
648 BT_DBG("parent %p", parent);
650 /* Close not yet accepted channels */
651 while ((sk = bt_accept_dequeue(parent, NULL)))
652 l2cap_sock_close(sk);
654 parent->sk_state = BT_CLOSED;
655 sock_set_flag(parent, SOCK_ZAPPED);
658 /* Kill socket (only if zapped and orphan)
659 * Must be called on unlocked socket.
661 static void l2cap_sock_kill(struct sock *sk)
663 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
664 return;
666 BT_DBG("sk %p state %d", sk, sk->sk_state);
668 /* Kill poor orphan */
669 bt_sock_unlink(&l2cap_sk_list, sk);
670 sock_set_flag(sk, SOCK_DEAD);
671 sock_put(sk);
674 static void __l2cap_sock_close(struct sock *sk, int reason)
676 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
678 switch (sk->sk_state) {
679 case BT_LISTEN:
680 l2cap_sock_cleanup_listen(sk);
681 break;
683 case BT_CONNECTED:
684 case BT_CONFIG:
685 if (sk->sk_type == SOCK_SEQPACKET) {
686 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
688 sk->sk_state = BT_DISCONN;
689 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
690 l2cap_send_disconn_req(conn, sk);
691 } else
692 l2cap_chan_del(sk, reason);
693 break;
695 case BT_CONNECT2:
696 if (sk->sk_type == SOCK_SEQPACKET) {
697 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
698 struct l2cap_conn_rsp rsp;
699 __u16 result;
701 if (bt_sk(sk)->defer_setup)
702 result = L2CAP_CR_SEC_BLOCK;
703 else
704 result = L2CAP_CR_BAD_PSM;
706 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
707 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
708 rsp.result = cpu_to_le16(result);
709 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
710 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
711 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
712 } else
713 l2cap_chan_del(sk, reason);
714 break;
716 case BT_CONNECT:
717 case BT_DISCONN:
718 l2cap_chan_del(sk, reason);
719 break;
721 default:
722 sock_set_flag(sk, SOCK_ZAPPED);
723 break;
727 /* Must be called on unlocked socket. */
728 static void l2cap_sock_close(struct sock *sk)
730 l2cap_sock_clear_timer(sk);
731 lock_sock(sk);
732 __l2cap_sock_close(sk, ECONNRESET);
733 release_sock(sk);
734 l2cap_sock_kill(sk);
737 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
739 struct l2cap_pinfo *pi = l2cap_pi(sk);
741 BT_DBG("sk %p", sk);
743 if (parent) {
744 sk->sk_type = parent->sk_type;
745 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
747 pi->imtu = l2cap_pi(parent)->imtu;
748 pi->omtu = l2cap_pi(parent)->omtu;
749 pi->mode = l2cap_pi(parent)->mode;
750 pi->fcs = l2cap_pi(parent)->fcs;
751 pi->sec_level = l2cap_pi(parent)->sec_level;
752 pi->role_switch = l2cap_pi(parent)->role_switch;
753 pi->force_reliable = l2cap_pi(parent)->force_reliable;
754 } else {
755 pi->imtu = L2CAP_DEFAULT_MTU;
756 pi->omtu = 0;
757 pi->mode = L2CAP_MODE_BASIC;
758 pi->fcs = L2CAP_FCS_CRC16;
759 pi->sec_level = BT_SECURITY_LOW;
760 pi->role_switch = 0;
761 pi->force_reliable = 0;
764 /* Default config options */
765 pi->conf_len = 0;
766 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
769 static struct proto l2cap_proto = {
770 .name = "L2CAP",
771 .owner = THIS_MODULE,
772 .obj_size = sizeof(struct l2cap_pinfo)
775 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
777 struct sock *sk;
779 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
780 if (!sk)
781 return NULL;
783 sock_init_data(sock, sk);
784 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
786 sk->sk_destruct = l2cap_sock_destruct;
787 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
789 sock_reset_flag(sk, SOCK_ZAPPED);
791 sk->sk_protocol = proto;
792 sk->sk_state = BT_OPEN;
794 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
796 bt_sock_link(&l2cap_sk_list, sk);
797 return sk;
800 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
802 struct sock *sk;
804 BT_DBG("sock %p", sock);
806 sock->state = SS_UNCONNECTED;
808 if (sock->type != SOCK_SEQPACKET &&
809 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
810 return -ESOCKTNOSUPPORT;
812 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
813 return -EPERM;
815 sock->ops = &l2cap_sock_ops;
817 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
818 if (!sk)
819 return -ENOMEM;
821 l2cap_sock_init(sk, NULL);
822 return 0;
825 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
827 struct sock *sk = sock->sk;
828 struct sockaddr_l2 la;
829 int len, err = 0;
831 BT_DBG("sk %p", sk);
833 if (!addr || addr->sa_family != AF_BLUETOOTH)
834 return -EINVAL;
836 memset(&la, 0, sizeof(la));
837 len = min_t(unsigned int, sizeof(la), alen);
838 memcpy(&la, addr, len);
840 if (la.l2_cid)
841 return -EINVAL;
843 lock_sock(sk);
845 if (sk->sk_state != BT_OPEN) {
846 err = -EBADFD;
847 goto done;
850 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
851 !capable(CAP_NET_BIND_SERVICE)) {
852 err = -EACCES;
853 goto done;
856 write_lock_bh(&l2cap_sk_list.lock);
858 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
859 err = -EADDRINUSE;
860 } else {
861 /* Save source address */
862 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
863 l2cap_pi(sk)->psm = la.l2_psm;
864 l2cap_pi(sk)->sport = la.l2_psm;
865 sk->sk_state = BT_BOUND;
867 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
868 __le16_to_cpu(la.l2_psm) == 0x0003)
869 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
872 write_unlock_bh(&l2cap_sk_list.lock);
874 done:
875 release_sock(sk);
876 return err;
879 static int l2cap_do_connect(struct sock *sk)
881 bdaddr_t *src = &bt_sk(sk)->src;
882 bdaddr_t *dst = &bt_sk(sk)->dst;
883 struct l2cap_conn *conn;
884 struct hci_conn *hcon;
885 struct hci_dev *hdev;
886 __u8 auth_type;
887 int err;
889 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
890 l2cap_pi(sk)->psm);
892 hdev = hci_get_route(dst, src);
893 if (!hdev)
894 return -EHOSTUNREACH;
896 hci_dev_lock_bh(hdev);
898 err = -ENOMEM;
900 if (sk->sk_type == SOCK_RAW) {
901 switch (l2cap_pi(sk)->sec_level) {
902 case BT_SECURITY_HIGH:
903 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
904 break;
905 case BT_SECURITY_MEDIUM:
906 auth_type = HCI_AT_DEDICATED_BONDING;
907 break;
908 default:
909 auth_type = HCI_AT_NO_BONDING;
910 break;
912 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
913 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
914 auth_type = HCI_AT_NO_BONDING_MITM;
915 else
916 auth_type = HCI_AT_NO_BONDING;
918 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
919 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
920 } else {
921 switch (l2cap_pi(sk)->sec_level) {
922 case BT_SECURITY_HIGH:
923 auth_type = HCI_AT_GENERAL_BONDING_MITM;
924 break;
925 case BT_SECURITY_MEDIUM:
926 auth_type = HCI_AT_GENERAL_BONDING;
927 break;
928 default:
929 auth_type = HCI_AT_NO_BONDING;
930 break;
934 hcon = hci_connect(hdev, ACL_LINK, dst,
935 l2cap_pi(sk)->sec_level, auth_type);
936 if (!hcon)
937 goto done;
939 conn = l2cap_conn_add(hcon, 0);
940 if (!conn) {
941 hci_conn_put(hcon);
942 goto done;
945 err = 0;
947 /* Update source addr of the socket */
948 bacpy(src, conn->src);
950 l2cap_chan_add(conn, sk, NULL);
952 sk->sk_state = BT_CONNECT;
953 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
955 if (hcon->state == BT_CONNECTED) {
956 if (sk->sk_type != SOCK_SEQPACKET) {
957 l2cap_sock_clear_timer(sk);
958 sk->sk_state = BT_CONNECTED;
959 } else
960 l2cap_do_start(sk);
963 done:
964 hci_dev_unlock_bh(hdev);
965 hci_dev_put(hdev);
966 return err;
969 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
971 struct sock *sk = sock->sk;
972 struct sockaddr_l2 la;
973 int len, err = 0;
975 BT_DBG("sk %p", sk);
977 if (!addr || addr->sa_family != AF_BLUETOOTH)
978 return -EINVAL;
980 memset(&la, 0, sizeof(la));
981 len = min_t(unsigned int, sizeof(la), alen);
982 memcpy(&la, addr, len);
984 if (la.l2_cid)
985 return -EINVAL;
987 lock_sock(sk);
989 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
990 err = -EINVAL;
991 goto done;
994 switch (l2cap_pi(sk)->mode) {
995 case L2CAP_MODE_BASIC:
996 break;
997 case L2CAP_MODE_ERTM:
998 case L2CAP_MODE_STREAMING:
999 if (enable_ertm)
1000 break;
1001 /* fall through */
1002 default:
1003 err = -ENOTSUPP;
1004 goto done;
1007 switch (sk->sk_state) {
1008 case BT_CONNECT:
1009 case BT_CONNECT2:
1010 case BT_CONFIG:
1011 /* Already connecting */
1012 goto wait;
1014 case BT_CONNECTED:
1015 /* Already connected */
1016 goto done;
1018 case BT_OPEN:
1019 case BT_BOUND:
1020 /* Can connect */
1021 break;
1023 default:
1024 err = -EBADFD;
1025 goto done;
1028 /* Set destination address and psm */
1029 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1030 l2cap_pi(sk)->psm = la.l2_psm;
1032 err = l2cap_do_connect(sk);
1033 if (err)
1034 goto done;
1036 wait:
1037 err = bt_sock_wait_state(sk, BT_CONNECTED,
1038 sock_sndtimeo(sk, flags & O_NONBLOCK));
1039 done:
1040 release_sock(sk);
1041 return err;
1044 static int l2cap_sock_listen(struct socket *sock, int backlog)
1046 struct sock *sk = sock->sk;
1047 int err = 0;
1049 BT_DBG("sk %p backlog %d", sk, backlog);
1051 lock_sock(sk);
1053 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1054 err = -EBADFD;
1055 goto done;
1058 switch (l2cap_pi(sk)->mode) {
1059 case L2CAP_MODE_BASIC:
1060 break;
1061 case L2CAP_MODE_ERTM:
1062 case L2CAP_MODE_STREAMING:
1063 if (enable_ertm)
1064 break;
1065 /* fall through */
1066 default:
1067 err = -ENOTSUPP;
1068 goto done;
1071 if (!l2cap_pi(sk)->psm) {
1072 bdaddr_t *src = &bt_sk(sk)->src;
1073 u16 psm;
1075 err = -EINVAL;
1077 write_lock_bh(&l2cap_sk_list.lock);
1079 for (psm = 0x1001; psm < 0x1100; psm += 2)
1080 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1081 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1082 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1083 err = 0;
1084 break;
1087 write_unlock_bh(&l2cap_sk_list.lock);
1089 if (err < 0)
1090 goto done;
1093 sk->sk_max_ack_backlog = backlog;
1094 sk->sk_ack_backlog = 0;
1095 sk->sk_state = BT_LISTEN;
1097 done:
1098 release_sock(sk);
1099 return err;
1102 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1104 DECLARE_WAITQUEUE(wait, current);
1105 struct sock *sk = sock->sk, *nsk;
1106 long timeo;
1107 int err = 0;
1109 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1111 if (sk->sk_state != BT_LISTEN) {
1112 err = -EBADFD;
1113 goto done;
1116 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1118 BT_DBG("sk %p timeo %ld", sk, timeo);
1120 /* Wait for an incoming connection. (wake-one). */
1121 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1122 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1123 set_current_state(TASK_INTERRUPTIBLE);
1124 if (!timeo) {
1125 err = -EAGAIN;
1126 break;
1129 release_sock(sk);
1130 timeo = schedule_timeout(timeo);
1131 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1133 if (sk->sk_state != BT_LISTEN) {
1134 err = -EBADFD;
1135 break;
1138 if (signal_pending(current)) {
1139 err = sock_intr_errno(timeo);
1140 break;
1143 set_current_state(TASK_RUNNING);
1144 remove_wait_queue(sk->sk_sleep, &wait);
1146 if (err)
1147 goto done;
1149 newsock->state = SS_CONNECTED;
1151 BT_DBG("new socket %p", nsk);
1153 done:
1154 release_sock(sk);
1155 return err;
1158 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1160 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1161 struct sock *sk = sock->sk;
1163 BT_DBG("sock %p, sk %p", sock, sk);
1165 addr->sa_family = AF_BLUETOOTH;
1166 *len = sizeof(struct sockaddr_l2);
1168 if (peer) {
1169 la->l2_psm = l2cap_pi(sk)->psm;
1170 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1171 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1172 } else {
1173 la->l2_psm = l2cap_pi(sk)->sport;
1174 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1175 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1178 return 0;
1181 static void l2cap_monitor_timeout(unsigned long arg)
1183 struct sock *sk = (void *) arg;
1184 u16 control;
1186 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1187 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1188 return;
1191 l2cap_pi(sk)->retry_count++;
1192 __mod_monitor_timer();
1194 control = L2CAP_CTRL_POLL;
1195 control |= L2CAP_SUPER_RCV_READY;
1196 l2cap_send_sframe(l2cap_pi(sk), control);
1199 static void l2cap_retrans_timeout(unsigned long arg)
1201 struct sock *sk = (void *) arg;
1202 u16 control;
1204 l2cap_pi(sk)->retry_count = 1;
1205 __mod_monitor_timer();
1207 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1209 control = L2CAP_CTRL_POLL;
1210 control |= L2CAP_SUPER_RCV_READY;
1211 l2cap_send_sframe(l2cap_pi(sk), control);
1214 static void l2cap_drop_acked_frames(struct sock *sk)
1216 struct sk_buff *skb;
1218 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1219 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1220 break;
1222 skb = skb_dequeue(TX_QUEUE(sk));
1223 kfree_skb(skb);
1225 l2cap_pi(sk)->unacked_frames--;
1228 if (!l2cap_pi(sk)->unacked_frames)
1229 del_timer(&l2cap_pi(sk)->retrans_timer);
1231 return;
1234 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1236 struct l2cap_pinfo *pi = l2cap_pi(sk);
1237 int err;
1239 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1241 err = hci_send_acl(pi->conn->hcon, skb, 0);
1242 if (err < 0)
1243 kfree_skb(skb);
1245 return err;
1248 static int l2cap_streaming_send(struct sock *sk)
1250 struct sk_buff *skb, *tx_skb;
1251 struct l2cap_pinfo *pi = l2cap_pi(sk);
1252 u16 control;
1253 int err;
1255 while ((skb = sk->sk_send_head)) {
1256 tx_skb = skb_clone(skb, GFP_ATOMIC);
1258 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1259 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1260 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1262 err = l2cap_do_send(sk, tx_skb);
1263 if (err < 0) {
1264 l2cap_send_disconn_req(pi->conn, sk);
1265 return err;
1268 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1270 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1271 sk->sk_send_head = NULL;
1272 else
1273 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1275 skb = skb_dequeue(TX_QUEUE(sk));
1276 kfree_skb(skb);
1278 return 0;
1281 static int l2cap_ertm_send(struct sock *sk)
1283 struct sk_buff *skb, *tx_skb;
1284 struct l2cap_pinfo *pi = l2cap_pi(sk);
1285 u16 control;
1286 int err;
1288 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1289 return 0;
1291 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1292 tx_skb = skb_clone(skb, GFP_ATOMIC);
1294 if (pi->remote_max_tx &&
1295 bt_cb(skb)->retries == pi->remote_max_tx) {
1296 l2cap_send_disconn_req(pi->conn, sk);
1297 break;
1300 bt_cb(skb)->retries++;
1302 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1303 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1304 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1305 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1308 err = l2cap_do_send(sk, tx_skb);
1309 if (err < 0) {
1310 l2cap_send_disconn_req(pi->conn, sk);
1311 return err;
1313 __mod_retrans_timer();
1315 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1316 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1318 pi->unacked_frames++;
1320 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1321 sk->sk_send_head = NULL;
1322 else
1323 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1326 return 0;
1329 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1331 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1332 struct sk_buff **frag;
1333 int err, sent = 0;
1335 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1336 return -EFAULT;
1339 sent += count;
1340 len -= count;
1342 /* Continuation fragments (no L2CAP header) */
1343 frag = &skb_shinfo(skb)->frag_list;
1344 while (len) {
1345 count = min_t(unsigned int, conn->mtu, len);
1347 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1348 if (!*frag)
1349 return -EFAULT;
1350 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1351 return -EFAULT;
1353 sent += count;
1354 len -= count;
1356 frag = &(*frag)->next;
1359 return sent;
1362 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1364 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1365 struct sk_buff *skb;
1366 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1367 struct l2cap_hdr *lh;
1369 BT_DBG("sk %p len %d", sk, (int)len);
1371 count = min_t(unsigned int, (conn->mtu - hlen), len);
1372 skb = bt_skb_send_alloc(sk, count + hlen,
1373 msg->msg_flags & MSG_DONTWAIT, &err);
1374 if (!skb)
1375 return ERR_PTR(-ENOMEM);
1377 /* Create L2CAP header */
1378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1379 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1380 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1381 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1383 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1384 if (unlikely(err < 0)) {
1385 kfree_skb(skb);
1386 return ERR_PTR(err);
1388 return skb;
1391 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1393 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1394 struct sk_buff *skb;
1395 int err, count, hlen = L2CAP_HDR_SIZE;
1396 struct l2cap_hdr *lh;
1398 BT_DBG("sk %p len %d", sk, (int)len);
1400 count = min_t(unsigned int, (conn->mtu - hlen), len);
1401 skb = bt_skb_send_alloc(sk, count + hlen,
1402 msg->msg_flags & MSG_DONTWAIT, &err);
1403 if (!skb)
1404 return ERR_PTR(-ENOMEM);
1406 /* Create L2CAP header */
1407 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1408 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1409 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1411 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1412 if (unlikely(err < 0)) {
1413 kfree_skb(skb);
1414 return ERR_PTR(err);
1416 return skb;
1419 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1421 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1422 struct sk_buff *skb;
1423 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1424 struct l2cap_hdr *lh;
1426 BT_DBG("sk %p len %d", sk, (int)len);
1428 if (sdulen)
1429 hlen += 2;
1431 count = min_t(unsigned int, (conn->mtu - hlen), len);
1432 skb = bt_skb_send_alloc(sk, count + hlen,
1433 msg->msg_flags & MSG_DONTWAIT, &err);
1434 if (!skb)
1435 return ERR_PTR(-ENOMEM);
1437 /* Create L2CAP header */
1438 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1439 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1440 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1441 put_unaligned_le16(control, skb_put(skb, 2));
1442 if (sdulen)
1443 put_unaligned_le16(sdulen, skb_put(skb, 2));
1445 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1446 if (unlikely(err < 0)) {
1447 kfree_skb(skb);
1448 return ERR_PTR(err);
1451 bt_cb(skb)->retries = 0;
1452 return skb;
1455 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1457 struct l2cap_pinfo *pi = l2cap_pi(sk);
1458 struct sk_buff *skb;
1459 struct sk_buff_head sar_queue;
1460 u16 control;
1461 size_t size = 0;
1463 __skb_queue_head_init(&sar_queue);
1464 control = L2CAP_SDU_START;
1465 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1466 if (IS_ERR(skb))
1467 return PTR_ERR(skb);
1469 __skb_queue_tail(&sar_queue, skb);
1470 len -= pi->max_pdu_size;
1471 size +=pi->max_pdu_size;
1472 control = 0;
1474 while (len > 0) {
1475 size_t buflen;
1477 if (len > pi->max_pdu_size) {
1478 control |= L2CAP_SDU_CONTINUE;
1479 buflen = pi->max_pdu_size;
1480 } else {
1481 control |= L2CAP_SDU_END;
1482 buflen = len;
1485 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1486 if (IS_ERR(skb)) {
1487 skb_queue_purge(&sar_queue);
1488 return PTR_ERR(skb);
1491 __skb_queue_tail(&sar_queue, skb);
1492 len -= buflen;
1493 size += buflen;
1494 control = 0;
1496 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1497 if (sk->sk_send_head == NULL)
1498 sk->sk_send_head = sar_queue.next;
1500 return size;
1503 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1505 struct sock *sk = sock->sk;
1506 struct l2cap_pinfo *pi = l2cap_pi(sk);
1507 struct sk_buff *skb;
1508 u16 control;
1509 int err;
1511 BT_DBG("sock %p, sk %p", sock, sk);
1513 err = sock_error(sk);
1514 if (err)
1515 return err;
1517 if (msg->msg_flags & MSG_OOB)
1518 return -EOPNOTSUPP;
1520 /* Check outgoing MTU */
1521 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1522 && len > pi->omtu)
1523 return -EINVAL;
1525 lock_sock(sk);
1527 if (sk->sk_state != BT_CONNECTED) {
1528 err = -ENOTCONN;
1529 goto done;
1532 /* Connectionless channel */
1533 if (sk->sk_type == SOCK_DGRAM) {
1534 skb = l2cap_create_connless_pdu(sk, msg, len);
1535 err = l2cap_do_send(sk, skb);
1536 goto done;
1539 switch (pi->mode) {
1540 case L2CAP_MODE_BASIC:
1541 /* Create a basic PDU */
1542 skb = l2cap_create_basic_pdu(sk, msg, len);
1543 if (IS_ERR(skb)) {
1544 err = PTR_ERR(skb);
1545 goto done;
1548 err = l2cap_do_send(sk, skb);
1549 if (!err)
1550 err = len;
1551 break;
1553 case L2CAP_MODE_ERTM:
1554 case L2CAP_MODE_STREAMING:
1555 /* Entire SDU fits into one PDU */
1556 if (len <= pi->max_pdu_size) {
1557 control = L2CAP_SDU_UNSEGMENTED;
1558 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1559 if (IS_ERR(skb)) {
1560 err = PTR_ERR(skb);
1561 goto done;
1563 __skb_queue_tail(TX_QUEUE(sk), skb);
1564 if (sk->sk_send_head == NULL)
1565 sk->sk_send_head = skb;
1566 } else {
1567 /* Segment SDU into multiples PDUs */
1568 err = l2cap_sar_segment_sdu(sk, msg, len);
1569 if (err < 0)
1570 goto done;
1573 if (pi->mode == L2CAP_MODE_STREAMING)
1574 err = l2cap_streaming_send(sk);
1575 else
1576 err = l2cap_ertm_send(sk);
1578 if (!err)
1579 err = len;
1580 break;
1582 default:
1583 BT_DBG("bad state %1.1x", pi->mode);
1584 err = -EINVAL;
1587 done:
1588 release_sock(sk);
1589 return err;
1592 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1594 struct sock *sk = sock->sk;
1596 lock_sock(sk);
1598 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1599 struct l2cap_conn_rsp rsp;
1601 sk->sk_state = BT_CONFIG;
1603 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1604 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1605 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1606 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1607 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1608 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1610 release_sock(sk);
1611 return 0;
1614 release_sock(sk);
1616 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1619 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1621 struct sock *sk = sock->sk;
1622 struct l2cap_options opts;
1623 int len, err = 0;
1624 u32 opt;
1626 BT_DBG("sk %p", sk);
1628 lock_sock(sk);
1630 switch (optname) {
1631 case L2CAP_OPTIONS:
1632 opts.imtu = l2cap_pi(sk)->imtu;
1633 opts.omtu = l2cap_pi(sk)->omtu;
1634 opts.flush_to = l2cap_pi(sk)->flush_to;
1635 opts.mode = l2cap_pi(sk)->mode;
1637 len = min_t(unsigned int, sizeof(opts), optlen);
1638 if (copy_from_user((char *) &opts, optval, len)) {
1639 err = -EFAULT;
1640 break;
1643 l2cap_pi(sk)->imtu = opts.imtu;
1644 l2cap_pi(sk)->omtu = opts.omtu;
1645 l2cap_pi(sk)->mode = opts.mode;
1646 break;
1648 case L2CAP_LM:
1649 if (get_user(opt, (u32 __user *) optval)) {
1650 err = -EFAULT;
1651 break;
1654 if (opt & L2CAP_LM_AUTH)
1655 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1656 if (opt & L2CAP_LM_ENCRYPT)
1657 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1658 if (opt & L2CAP_LM_SECURE)
1659 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1661 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1662 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1663 break;
1665 default:
1666 err = -ENOPROTOOPT;
1667 break;
1670 release_sock(sk);
1671 return err;
1674 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1676 struct sock *sk = sock->sk;
1677 struct bt_security sec;
1678 int len, err = 0;
1679 u32 opt;
1681 BT_DBG("sk %p", sk);
1683 if (level == SOL_L2CAP)
1684 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1686 if (level != SOL_BLUETOOTH)
1687 return -ENOPROTOOPT;
1689 lock_sock(sk);
1691 switch (optname) {
1692 case BT_SECURITY:
1693 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1694 err = -EINVAL;
1695 break;
1698 sec.level = BT_SECURITY_LOW;
1700 len = min_t(unsigned int, sizeof(sec), optlen);
1701 if (copy_from_user((char *) &sec, optval, len)) {
1702 err = -EFAULT;
1703 break;
1706 if (sec.level < BT_SECURITY_LOW ||
1707 sec.level > BT_SECURITY_HIGH) {
1708 err = -EINVAL;
1709 break;
1712 l2cap_pi(sk)->sec_level = sec.level;
1713 break;
1715 case BT_DEFER_SETUP:
1716 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1717 err = -EINVAL;
1718 break;
1721 if (get_user(opt, (u32 __user *) optval)) {
1722 err = -EFAULT;
1723 break;
1726 bt_sk(sk)->defer_setup = opt;
1727 break;
1729 default:
1730 err = -ENOPROTOOPT;
1731 break;
1734 release_sock(sk);
1735 return err;
1738 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1740 struct sock *sk = sock->sk;
1741 struct l2cap_options opts;
1742 struct l2cap_conninfo cinfo;
1743 int len, err = 0;
1744 u32 opt;
1746 BT_DBG("sk %p", sk);
1748 if (get_user(len, optlen))
1749 return -EFAULT;
1751 lock_sock(sk);
1753 switch (optname) {
1754 case L2CAP_OPTIONS:
1755 opts.imtu = l2cap_pi(sk)->imtu;
1756 opts.omtu = l2cap_pi(sk)->omtu;
1757 opts.flush_to = l2cap_pi(sk)->flush_to;
1758 opts.mode = l2cap_pi(sk)->mode;
1760 len = min_t(unsigned int, len, sizeof(opts));
1761 if (copy_to_user(optval, (char *) &opts, len))
1762 err = -EFAULT;
1764 break;
1766 case L2CAP_LM:
1767 switch (l2cap_pi(sk)->sec_level) {
1768 case BT_SECURITY_LOW:
1769 opt = L2CAP_LM_AUTH;
1770 break;
1771 case BT_SECURITY_MEDIUM:
1772 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1773 break;
1774 case BT_SECURITY_HIGH:
1775 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1776 L2CAP_LM_SECURE;
1777 break;
1778 default:
1779 opt = 0;
1780 break;
1783 if (l2cap_pi(sk)->role_switch)
1784 opt |= L2CAP_LM_MASTER;
1786 if (l2cap_pi(sk)->force_reliable)
1787 opt |= L2CAP_LM_RELIABLE;
1789 if (put_user(opt, (u32 __user *) optval))
1790 err = -EFAULT;
1791 break;
1793 case L2CAP_CONNINFO:
1794 if (sk->sk_state != BT_CONNECTED &&
1795 !(sk->sk_state == BT_CONNECT2 &&
1796 bt_sk(sk)->defer_setup)) {
1797 err = -ENOTCONN;
1798 break;
1801 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1802 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1804 len = min_t(unsigned int, len, sizeof(cinfo));
1805 if (copy_to_user(optval, (char *) &cinfo, len))
1806 err = -EFAULT;
1808 break;
1810 default:
1811 err = -ENOPROTOOPT;
1812 break;
1815 release_sock(sk);
1816 return err;
1819 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1821 struct sock *sk = sock->sk;
1822 struct bt_security sec;
1823 int len, err = 0;
1825 BT_DBG("sk %p", sk);
1827 if (level == SOL_L2CAP)
1828 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1830 if (level != SOL_BLUETOOTH)
1831 return -ENOPROTOOPT;
1833 if (get_user(len, optlen))
1834 return -EFAULT;
1836 lock_sock(sk);
1838 switch (optname) {
1839 case BT_SECURITY:
1840 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1841 err = -EINVAL;
1842 break;
1845 sec.level = l2cap_pi(sk)->sec_level;
1847 len = min_t(unsigned int, len, sizeof(sec));
1848 if (copy_to_user(optval, (char *) &sec, len))
1849 err = -EFAULT;
1851 break;
1853 case BT_DEFER_SETUP:
1854 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1855 err = -EINVAL;
1856 break;
1859 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1860 err = -EFAULT;
1862 break;
1864 default:
1865 err = -ENOPROTOOPT;
1866 break;
1869 release_sock(sk);
1870 return err;
1873 static int l2cap_sock_shutdown(struct socket *sock, int how)
1875 struct sock *sk = sock->sk;
1876 int err = 0;
1878 BT_DBG("sock %p, sk %p", sock, sk);
1880 if (!sk)
1881 return 0;
1883 lock_sock(sk);
1884 if (!sk->sk_shutdown) {
1885 sk->sk_shutdown = SHUTDOWN_MASK;
1886 l2cap_sock_clear_timer(sk);
1887 __l2cap_sock_close(sk, 0);
1889 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1890 err = bt_sock_wait_state(sk, BT_CLOSED,
1891 sk->sk_lingertime);
1893 release_sock(sk);
1894 return err;
1897 static int l2cap_sock_release(struct socket *sock)
1899 struct sock *sk = sock->sk;
1900 int err;
1902 BT_DBG("sock %p, sk %p", sock, sk);
1904 if (!sk)
1905 return 0;
1907 err = l2cap_sock_shutdown(sock, 2);
1909 sock_orphan(sk);
1910 l2cap_sock_kill(sk);
1911 return err;
1914 static void l2cap_chan_ready(struct sock *sk)
1916 struct sock *parent = bt_sk(sk)->parent;
1918 BT_DBG("sk %p, parent %p", sk, parent);
1920 l2cap_pi(sk)->conf_state = 0;
1921 l2cap_sock_clear_timer(sk);
1923 if (!parent) {
1924 /* Outgoing channel.
1925 * Wake up socket sleeping on connect.
1927 sk->sk_state = BT_CONNECTED;
1928 sk->sk_state_change(sk);
1929 } else {
1930 /* Incoming channel.
1931 * Wake up socket sleeping on accept.
1933 parent->sk_data_ready(parent, 0);
1937 /* Copy frame to all raw sockets on that connection */
1938 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1940 struct l2cap_chan_list *l = &conn->chan_list;
1941 struct sk_buff *nskb;
1942 struct sock *sk;
1944 BT_DBG("conn %p", conn);
1946 read_lock(&l->lock);
1947 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1948 if (sk->sk_type != SOCK_RAW)
1949 continue;
1951 /* Don't send frame to the socket it came from */
1952 if (skb->sk == sk)
1953 continue;
1954 nskb = skb_clone(skb, GFP_ATOMIC);
1955 if (!nskb)
1956 continue;
1958 if (sock_queue_rcv_skb(sk, nskb))
1959 kfree_skb(nskb);
1961 read_unlock(&l->lock);
1964 /* ---- L2CAP signalling commands ---- */
1965 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1966 u8 code, u8 ident, u16 dlen, void *data)
1968 struct sk_buff *skb, **frag;
1969 struct l2cap_cmd_hdr *cmd;
1970 struct l2cap_hdr *lh;
1971 int len, count;
1973 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1974 conn, code, ident, dlen);
1976 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1977 count = min_t(unsigned int, conn->mtu, len);
1979 skb = bt_skb_alloc(count, GFP_ATOMIC);
1980 if (!skb)
1981 return NULL;
1983 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1984 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1985 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1987 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1988 cmd->code = code;
1989 cmd->ident = ident;
1990 cmd->len = cpu_to_le16(dlen);
1992 if (dlen) {
1993 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1994 memcpy(skb_put(skb, count), data, count);
1995 data += count;
1998 len -= skb->len;
2000 /* Continuation fragments (no L2CAP header) */
2001 frag = &skb_shinfo(skb)->frag_list;
2002 while (len) {
2003 count = min_t(unsigned int, conn->mtu, len);
2005 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2006 if (!*frag)
2007 goto fail;
2009 memcpy(skb_put(*frag, count), data, count);
2011 len -= count;
2012 data += count;
2014 frag = &(*frag)->next;
2017 return skb;
2019 fail:
2020 kfree_skb(skb);
2021 return NULL;
2024 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2026 struct l2cap_conf_opt *opt = *ptr;
2027 int len;
2029 len = L2CAP_CONF_OPT_SIZE + opt->len;
2030 *ptr += len;
2032 *type = opt->type;
2033 *olen = opt->len;
2035 switch (opt->len) {
2036 case 1:
2037 *val = *((u8 *) opt->val);
2038 break;
2040 case 2:
2041 *val = __le16_to_cpu(*((__le16 *) opt->val));
2042 break;
2044 case 4:
2045 *val = __le32_to_cpu(*((__le32 *) opt->val));
2046 break;
2048 default:
2049 *val = (unsigned long) opt->val;
2050 break;
2053 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2054 return len;
2057 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2059 struct l2cap_conf_opt *opt = *ptr;
2061 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2063 opt->type = type;
2064 opt->len = len;
2066 switch (len) {
2067 case 1:
2068 *((u8 *) opt->val) = val;
2069 break;
2071 case 2:
2072 *((__le16 *) opt->val) = cpu_to_le16(val);
2073 break;
2075 case 4:
2076 *((__le32 *) opt->val) = cpu_to_le32(val);
2077 break;
2079 default:
2080 memcpy(opt->val, (void *) val, len);
2081 break;
2084 *ptr += L2CAP_CONF_OPT_SIZE + len;
2087 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2089 u32 local_feat_mask = l2cap_feat_mask;
2090 if (enable_ertm)
2091 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2093 switch (mode) {
2094 case L2CAP_MODE_ERTM:
2095 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2096 case L2CAP_MODE_STREAMING:
2097 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2098 default:
2099 return 0x00;
2103 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2105 switch (mode) {
2106 case L2CAP_MODE_STREAMING:
2107 case L2CAP_MODE_ERTM:
2108 if (l2cap_mode_supported(mode, remote_feat_mask))
2109 return mode;
2110 /* fall through */
2111 default:
2112 return L2CAP_MODE_BASIC;
2116 static int l2cap_build_conf_req(struct sock *sk, void *data)
2118 struct l2cap_pinfo *pi = l2cap_pi(sk);
2119 struct l2cap_conf_req *req = data;
2120 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
2121 void *ptr = req->data;
2123 BT_DBG("sk %p", sk);
2125 if (pi->num_conf_req || pi->num_conf_rsp)
2126 goto done;
2128 switch (pi->mode) {
2129 case L2CAP_MODE_STREAMING:
2130 case L2CAP_MODE_ERTM:
2131 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2132 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2133 l2cap_send_disconn_req(pi->conn, sk);
2134 break;
2135 default:
2136 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2137 break;
2140 done:
2141 switch (pi->mode) {
2142 case L2CAP_MODE_BASIC:
2143 if (pi->imtu != L2CAP_DEFAULT_MTU)
2144 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2145 break;
2147 case L2CAP_MODE_ERTM:
2148 rfc.mode = L2CAP_MODE_ERTM;
2149 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2150 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2151 rfc.retrans_timeout = 0;
2152 rfc.monitor_timeout = 0;
2153 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2155 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2156 sizeof(rfc), (unsigned long) &rfc);
2157 break;
2159 case L2CAP_MODE_STREAMING:
2160 rfc.mode = L2CAP_MODE_STREAMING;
2161 rfc.txwin_size = 0;
2162 rfc.max_transmit = 0;
2163 rfc.retrans_timeout = 0;
2164 rfc.monitor_timeout = 0;
2165 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2167 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2168 sizeof(rfc), (unsigned long) &rfc);
2169 break;
2172 /* FIXME: Need actual value of the flush timeout */
2173 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2174 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2176 req->dcid = cpu_to_le16(pi->dcid);
2177 req->flags = cpu_to_le16(0);
2179 return ptr - data;
2182 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2184 struct l2cap_pinfo *pi = l2cap_pi(sk);
2185 struct l2cap_conf_rsp *rsp = data;
2186 void *ptr = rsp->data;
2187 void *req = pi->conf_req;
2188 int len = pi->conf_len;
2189 int type, hint, olen;
2190 unsigned long val;
2191 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2192 u16 mtu = L2CAP_DEFAULT_MTU;
2193 u16 result = L2CAP_CONF_SUCCESS;
2195 BT_DBG("sk %p", sk);
2197 while (len >= L2CAP_CONF_OPT_SIZE) {
2198 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2200 hint = type & L2CAP_CONF_HINT;
2201 type &= L2CAP_CONF_MASK;
2203 switch (type) {
2204 case L2CAP_CONF_MTU:
2205 mtu = val;
2206 break;
2208 case L2CAP_CONF_FLUSH_TO:
2209 pi->flush_to = val;
2210 break;
2212 case L2CAP_CONF_QOS:
2213 break;
2215 case L2CAP_CONF_RFC:
2216 if (olen == sizeof(rfc))
2217 memcpy(&rfc, (void *) val, olen);
2218 break;
2220 default:
2221 if (hint)
2222 break;
2224 result = L2CAP_CONF_UNKNOWN;
2225 *((u8 *) ptr++) = type;
2226 break;
2230 if (pi->num_conf_rsp || pi->num_conf_req)
2231 goto done;
2233 switch (pi->mode) {
2234 case L2CAP_MODE_STREAMING:
2235 case L2CAP_MODE_ERTM:
2236 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2237 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2238 return -ECONNREFUSED;
2239 break;
2240 default:
2241 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2242 break;
2245 done:
2246 if (pi->mode != rfc.mode) {
2247 result = L2CAP_CONF_UNACCEPT;
2248 rfc.mode = pi->mode;
2250 if (pi->num_conf_rsp == 1)
2251 return -ECONNREFUSED;
2253 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2254 sizeof(rfc), (unsigned long) &rfc);
2258 if (result == L2CAP_CONF_SUCCESS) {
2259 /* Configure output options and let the other side know
2260 * which ones we don't like. */
2262 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2263 result = L2CAP_CONF_UNACCEPT;
2264 else {
2265 pi->omtu = mtu;
2266 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2268 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2270 switch (rfc.mode) {
2271 case L2CAP_MODE_BASIC:
2272 pi->fcs = L2CAP_FCS_NONE;
2273 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2274 break;
2276 case L2CAP_MODE_ERTM:
2277 pi->remote_tx_win = rfc.txwin_size;
2278 pi->remote_max_tx = rfc.max_transmit;
2279 pi->max_pdu_size = rfc.max_pdu_size;
2281 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2282 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2284 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2285 break;
2287 case L2CAP_MODE_STREAMING:
2288 pi->remote_tx_win = rfc.txwin_size;
2289 pi->max_pdu_size = rfc.max_pdu_size;
2291 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2292 break;
2294 default:
2295 result = L2CAP_CONF_UNACCEPT;
2297 memset(&rfc, 0, sizeof(rfc));
2298 rfc.mode = pi->mode;
2301 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2302 sizeof(rfc), (unsigned long) &rfc);
2304 if (result == L2CAP_CONF_SUCCESS)
2305 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2307 rsp->scid = cpu_to_le16(pi->dcid);
2308 rsp->result = cpu_to_le16(result);
2309 rsp->flags = cpu_to_le16(0x0000);
2311 return ptr - data;
2314 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2316 struct l2cap_pinfo *pi = l2cap_pi(sk);
2317 struct l2cap_conf_req *req = data;
2318 void *ptr = req->data;
2319 int type, olen;
2320 unsigned long val;
2321 struct l2cap_conf_rfc rfc;
2323 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2325 while (len >= L2CAP_CONF_OPT_SIZE) {
2326 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2328 switch (type) {
2329 case L2CAP_CONF_MTU:
2330 if (val < L2CAP_DEFAULT_MIN_MTU) {
2331 *result = L2CAP_CONF_UNACCEPT;
2332 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2333 } else
2334 pi->omtu = val;
2335 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2336 break;
2338 case L2CAP_CONF_FLUSH_TO:
2339 pi->flush_to = val;
2340 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2341 2, pi->flush_to);
2342 break;
2344 case L2CAP_CONF_RFC:
2345 if (olen == sizeof(rfc))
2346 memcpy(&rfc, (void *)val, olen);
2348 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2349 rfc.mode != pi->mode)
2350 return -ECONNREFUSED;
2352 pi->mode = rfc.mode;
2353 pi->fcs = 0;
2355 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2356 sizeof(rfc), (unsigned long) &rfc);
2357 break;
2361 if (*result == L2CAP_CONF_SUCCESS) {
2362 switch (rfc.mode) {
2363 case L2CAP_MODE_ERTM:
2364 pi->remote_tx_win = rfc.txwin_size;
2365 pi->retrans_timeout = rfc.retrans_timeout;
2366 pi->monitor_timeout = rfc.monitor_timeout;
2367 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2368 break;
2369 case L2CAP_MODE_STREAMING:
2370 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2371 break;
2375 req->dcid = cpu_to_le16(pi->dcid);
2376 req->flags = cpu_to_le16(0x0000);
2378 return ptr - data;
2381 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2383 struct l2cap_conf_rsp *rsp = data;
2384 void *ptr = rsp->data;
2386 BT_DBG("sk %p", sk);
2388 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2389 rsp->result = cpu_to_le16(result);
2390 rsp->flags = cpu_to_le16(flags);
2392 return ptr - data;
2395 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2397 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2399 if (rej->reason != 0x0000)
2400 return 0;
2402 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2403 cmd->ident == conn->info_ident) {
2404 del_timer(&conn->info_timer);
2406 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2407 conn->info_ident = 0;
2409 l2cap_conn_start(conn);
2412 return 0;
2415 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2417 struct l2cap_chan_list *list = &conn->chan_list;
2418 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2419 struct l2cap_conn_rsp rsp;
2420 struct sock *sk, *parent;
2421 int result, status = L2CAP_CS_NO_INFO;
2423 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2424 __le16 psm = req->psm;
2426 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2428 /* Check if we have socket listening on psm */
2429 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2430 if (!parent) {
2431 result = L2CAP_CR_BAD_PSM;
2432 goto sendresp;
2435 /* Check if the ACL is secure enough (if not SDP) */
2436 if (psm != cpu_to_le16(0x0001) &&
2437 !hci_conn_check_link_mode(conn->hcon)) {
2438 conn->disc_reason = 0x05;
2439 result = L2CAP_CR_SEC_BLOCK;
2440 goto response;
2443 result = L2CAP_CR_NO_MEM;
2445 /* Check for backlog size */
2446 if (sk_acceptq_is_full(parent)) {
2447 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2448 goto response;
2451 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2452 if (!sk)
2453 goto response;
2455 write_lock_bh(&list->lock);
2457 /* Check if we already have channel with that dcid */
2458 if (__l2cap_get_chan_by_dcid(list, scid)) {
2459 write_unlock_bh(&list->lock);
2460 sock_set_flag(sk, SOCK_ZAPPED);
2461 l2cap_sock_kill(sk);
2462 goto response;
2465 hci_conn_hold(conn->hcon);
2467 l2cap_sock_init(sk, parent);
2468 bacpy(&bt_sk(sk)->src, conn->src);
2469 bacpy(&bt_sk(sk)->dst, conn->dst);
2470 l2cap_pi(sk)->psm = psm;
2471 l2cap_pi(sk)->dcid = scid;
2473 __l2cap_chan_add(conn, sk, parent);
2474 dcid = l2cap_pi(sk)->scid;
2476 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2478 l2cap_pi(sk)->ident = cmd->ident;
2480 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2481 if (l2cap_check_security(sk)) {
2482 if (bt_sk(sk)->defer_setup) {
2483 sk->sk_state = BT_CONNECT2;
2484 result = L2CAP_CR_PEND;
2485 status = L2CAP_CS_AUTHOR_PEND;
2486 parent->sk_data_ready(parent, 0);
2487 } else {
2488 sk->sk_state = BT_CONFIG;
2489 result = L2CAP_CR_SUCCESS;
2490 status = L2CAP_CS_NO_INFO;
2492 } else {
2493 sk->sk_state = BT_CONNECT2;
2494 result = L2CAP_CR_PEND;
2495 status = L2CAP_CS_AUTHEN_PEND;
2497 } else {
2498 sk->sk_state = BT_CONNECT2;
2499 result = L2CAP_CR_PEND;
2500 status = L2CAP_CS_NO_INFO;
2503 write_unlock_bh(&list->lock);
2505 response:
2506 bh_unlock_sock(parent);
2508 sendresp:
2509 rsp.scid = cpu_to_le16(scid);
2510 rsp.dcid = cpu_to_le16(dcid);
2511 rsp.result = cpu_to_le16(result);
2512 rsp.status = cpu_to_le16(status);
2513 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2515 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2516 struct l2cap_info_req info;
2517 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2519 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2520 conn->info_ident = l2cap_get_ident(conn);
2522 mod_timer(&conn->info_timer, jiffies +
2523 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2525 l2cap_send_cmd(conn, conn->info_ident,
2526 L2CAP_INFO_REQ, sizeof(info), &info);
2529 return 0;
2532 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2534 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2535 u16 scid, dcid, result, status;
2536 struct sock *sk;
2537 u8 req[128];
2539 scid = __le16_to_cpu(rsp->scid);
2540 dcid = __le16_to_cpu(rsp->dcid);
2541 result = __le16_to_cpu(rsp->result);
2542 status = __le16_to_cpu(rsp->status);
2544 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2546 if (scid) {
2547 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2548 if (!sk)
2549 return 0;
2550 } else {
2551 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2552 if (!sk)
2553 return 0;
2556 switch (result) {
2557 case L2CAP_CR_SUCCESS:
2558 sk->sk_state = BT_CONFIG;
2559 l2cap_pi(sk)->ident = 0;
2560 l2cap_pi(sk)->dcid = dcid;
2561 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2563 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2565 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2566 l2cap_build_conf_req(sk, req), req);
2567 l2cap_pi(sk)->num_conf_req++;
2568 break;
2570 case L2CAP_CR_PEND:
2571 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2572 break;
2574 default:
2575 l2cap_chan_del(sk, ECONNREFUSED);
2576 break;
2579 bh_unlock_sock(sk);
2580 return 0;
2583 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2585 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2586 u16 dcid, flags;
2587 u8 rsp[64];
2588 struct sock *sk;
2589 int len;
2591 dcid = __le16_to_cpu(req->dcid);
2592 flags = __le16_to_cpu(req->flags);
2594 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2596 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2597 if (!sk)
2598 return -ENOENT;
2600 if (sk->sk_state == BT_DISCONN)
2601 goto unlock;
2603 /* Reject if config buffer is too small. */
2604 len = cmd_len - sizeof(*req);
2605 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2606 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2607 l2cap_build_conf_rsp(sk, rsp,
2608 L2CAP_CONF_REJECT, flags), rsp);
2609 goto unlock;
2612 /* Store config. */
2613 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2614 l2cap_pi(sk)->conf_len += len;
2616 if (flags & 0x0001) {
2617 /* Incomplete config. Send empty response. */
2618 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2619 l2cap_build_conf_rsp(sk, rsp,
2620 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2621 goto unlock;
2624 /* Complete config. */
2625 len = l2cap_parse_conf_req(sk, rsp);
2626 if (len < 0) {
2627 l2cap_send_disconn_req(conn, sk);
2628 goto unlock;
2631 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2632 l2cap_pi(sk)->num_conf_rsp++;
2634 /* Reset config buffer. */
2635 l2cap_pi(sk)->conf_len = 0;
2637 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2638 goto unlock;
2640 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2641 sk->sk_state = BT_CONNECTED;
2642 l2cap_pi(sk)->next_tx_seq = 0;
2643 l2cap_pi(sk)->expected_ack_seq = 0;
2644 l2cap_pi(sk)->unacked_frames = 0;
2646 setup_timer(&l2cap_pi(sk)->retrans_timer,
2647 l2cap_retrans_timeout, (unsigned long) sk);
2648 setup_timer(&l2cap_pi(sk)->monitor_timer,
2649 l2cap_monitor_timeout, (unsigned long) sk);
2651 __skb_queue_head_init(TX_QUEUE(sk));
2652 l2cap_chan_ready(sk);
2653 goto unlock;
2656 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2657 u8 buf[64];
2658 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2659 l2cap_build_conf_req(sk, buf), buf);
2660 l2cap_pi(sk)->num_conf_req++;
2663 unlock:
2664 bh_unlock_sock(sk);
2665 return 0;
2668 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2670 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2671 u16 scid, flags, result;
2672 struct sock *sk;
2674 scid = __le16_to_cpu(rsp->scid);
2675 flags = __le16_to_cpu(rsp->flags);
2676 result = __le16_to_cpu(rsp->result);
2678 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2679 scid, flags, result);
2681 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2682 if (!sk)
2683 return 0;
2685 switch (result) {
2686 case L2CAP_CONF_SUCCESS:
2687 break;
2689 case L2CAP_CONF_UNACCEPT:
2690 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2691 int len = cmd->len - sizeof(*rsp);
2692 char req[64];
2694 /* throw out any old stored conf requests */
2695 result = L2CAP_CONF_SUCCESS;
2696 len = l2cap_parse_conf_rsp(sk, rsp->data,
2697 len, req, &result);
2698 if (len < 0) {
2699 l2cap_send_disconn_req(conn, sk);
2700 goto done;
2703 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2704 L2CAP_CONF_REQ, len, req);
2705 l2cap_pi(sk)->num_conf_req++;
2706 if (result != L2CAP_CONF_SUCCESS)
2707 goto done;
2708 break;
2711 default:
2712 sk->sk_state = BT_DISCONN;
2713 sk->sk_err = ECONNRESET;
2714 l2cap_sock_set_timer(sk, HZ * 5);
2715 l2cap_send_disconn_req(conn, sk);
2716 goto done;
2719 if (flags & 0x01)
2720 goto done;
2722 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2724 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2725 sk->sk_state = BT_CONNECTED;
2726 l2cap_pi(sk)->expected_tx_seq = 0;
2727 l2cap_pi(sk)->num_to_ack = 0;
2728 __skb_queue_head_init(TX_QUEUE(sk));
2729 l2cap_chan_ready(sk);
2732 done:
2733 bh_unlock_sock(sk);
2734 return 0;
2737 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2739 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2740 struct l2cap_disconn_rsp rsp;
2741 u16 dcid, scid;
2742 struct sock *sk;
2744 scid = __le16_to_cpu(req->scid);
2745 dcid = __le16_to_cpu(req->dcid);
2747 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2749 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2750 if (!sk)
2751 return 0;
2753 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2754 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2755 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2757 sk->sk_shutdown = SHUTDOWN_MASK;
2759 skb_queue_purge(TX_QUEUE(sk));
2760 del_timer(&l2cap_pi(sk)->retrans_timer);
2761 del_timer(&l2cap_pi(sk)->monitor_timer);
2763 l2cap_chan_del(sk, ECONNRESET);
2764 bh_unlock_sock(sk);
2766 l2cap_sock_kill(sk);
2767 return 0;
2770 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2772 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2773 u16 dcid, scid;
2774 struct sock *sk;
2776 scid = __le16_to_cpu(rsp->scid);
2777 dcid = __le16_to_cpu(rsp->dcid);
2779 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2781 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2782 if (!sk)
2783 return 0;
2785 skb_queue_purge(TX_QUEUE(sk));
2786 del_timer(&l2cap_pi(sk)->retrans_timer);
2787 del_timer(&l2cap_pi(sk)->monitor_timer);
2789 l2cap_chan_del(sk, 0);
2790 bh_unlock_sock(sk);
2792 l2cap_sock_kill(sk);
2793 return 0;
2796 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2798 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2799 u16 type;
2801 type = __le16_to_cpu(req->type);
2803 BT_DBG("type 0x%4.4x", type);
2805 if (type == L2CAP_IT_FEAT_MASK) {
2806 u8 buf[8];
2807 u32 feat_mask = l2cap_feat_mask;
2808 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2809 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2810 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2811 if (enable_ertm)
2812 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2813 put_unaligned(cpu_to_le32(feat_mask), (__le32 *) rsp->data);
2814 l2cap_send_cmd(conn, cmd->ident,
2815 L2CAP_INFO_RSP, sizeof(buf), buf);
2816 } else if (type == L2CAP_IT_FIXED_CHAN) {
2817 u8 buf[12];
2818 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2819 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2820 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2821 memcpy(buf + 4, l2cap_fixed_chan, 8);
2822 l2cap_send_cmd(conn, cmd->ident,
2823 L2CAP_INFO_RSP, sizeof(buf), buf);
2824 } else {
2825 struct l2cap_info_rsp rsp;
2826 rsp.type = cpu_to_le16(type);
2827 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2828 l2cap_send_cmd(conn, cmd->ident,
2829 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2832 return 0;
2835 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2837 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2838 u16 type, result;
2840 type = __le16_to_cpu(rsp->type);
2841 result = __le16_to_cpu(rsp->result);
2843 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2845 del_timer(&conn->info_timer);
2847 if (type == L2CAP_IT_FEAT_MASK) {
2848 conn->feat_mask = get_unaligned_le32(rsp->data);
2850 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2851 struct l2cap_info_req req;
2852 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2854 conn->info_ident = l2cap_get_ident(conn);
2856 l2cap_send_cmd(conn, conn->info_ident,
2857 L2CAP_INFO_REQ, sizeof(req), &req);
2858 } else {
2859 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2860 conn->info_ident = 0;
2862 l2cap_conn_start(conn);
2864 } else if (type == L2CAP_IT_FIXED_CHAN) {
2865 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2866 conn->info_ident = 0;
2868 l2cap_conn_start(conn);
2871 return 0;
2874 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2876 u8 *data = skb->data;
2877 int len = skb->len;
2878 struct l2cap_cmd_hdr cmd;
2879 int err = 0;
2881 l2cap_raw_recv(conn, skb);
2883 while (len >= L2CAP_CMD_HDR_SIZE) {
2884 u16 cmd_len;
2885 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2886 data += L2CAP_CMD_HDR_SIZE;
2887 len -= L2CAP_CMD_HDR_SIZE;
2889 cmd_len = le16_to_cpu(cmd.len);
2891 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2893 if (cmd_len > len || !cmd.ident) {
2894 BT_DBG("corrupted command");
2895 break;
2898 switch (cmd.code) {
2899 case L2CAP_COMMAND_REJ:
2900 l2cap_command_rej(conn, &cmd, data);
2901 break;
2903 case L2CAP_CONN_REQ:
2904 err = l2cap_connect_req(conn, &cmd, data);
2905 break;
2907 case L2CAP_CONN_RSP:
2908 err = l2cap_connect_rsp(conn, &cmd, data);
2909 break;
2911 case L2CAP_CONF_REQ:
2912 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2913 break;
2915 case L2CAP_CONF_RSP:
2916 err = l2cap_config_rsp(conn, &cmd, data);
2917 break;
2919 case L2CAP_DISCONN_REQ:
2920 err = l2cap_disconnect_req(conn, &cmd, data);
2921 break;
2923 case L2CAP_DISCONN_RSP:
2924 err = l2cap_disconnect_rsp(conn, &cmd, data);
2925 break;
2927 case L2CAP_ECHO_REQ:
2928 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2929 break;
2931 case L2CAP_ECHO_RSP:
2932 break;
2934 case L2CAP_INFO_REQ:
2935 err = l2cap_information_req(conn, &cmd, data);
2936 break;
2938 case L2CAP_INFO_RSP:
2939 err = l2cap_information_rsp(conn, &cmd, data);
2940 break;
2942 default:
2943 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2944 err = -EINVAL;
2945 break;
2948 if (err) {
2949 struct l2cap_cmd_rej rej;
2950 BT_DBG("error %d", err);
2952 /* FIXME: Map err to a valid reason */
2953 rej.reason = cpu_to_le16(0);
2954 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2957 data += cmd_len;
2958 len -= cmd_len;
2961 kfree_skb(skb);
2964 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2966 struct l2cap_pinfo *pi = l2cap_pi(sk);
2967 struct sk_buff *_skb;
2968 int err = -EINVAL;
2970 switch (control & L2CAP_CTRL_SAR) {
2971 case L2CAP_SDU_UNSEGMENTED:
2972 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2973 kfree_skb(pi->sdu);
2974 break;
2977 err = sock_queue_rcv_skb(sk, skb);
2978 if (!err)
2979 return 0;
2981 break;
2983 case L2CAP_SDU_START:
2984 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2985 kfree_skb(pi->sdu);
2986 break;
2989 pi->sdu_len = get_unaligned_le16(skb->data);
2990 skb_pull(skb, 2);
2992 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2993 if (!pi->sdu) {
2994 err = -ENOMEM;
2995 break;
2998 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3000 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3001 pi->partial_sdu_len = skb->len;
3002 err = 0;
3003 break;
3005 case L2CAP_SDU_CONTINUE:
3006 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3007 break;
3009 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3011 pi->partial_sdu_len += skb->len;
3012 if (pi->partial_sdu_len > pi->sdu_len)
3013 kfree_skb(pi->sdu);
3014 else
3015 err = 0;
3017 break;
3019 case L2CAP_SDU_END:
3020 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3021 break;
3023 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3025 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3026 pi->partial_sdu_len += skb->len;
3028 if (pi->partial_sdu_len == pi->sdu_len) {
3029 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3030 err = sock_queue_rcv_skb(sk, _skb);
3031 if (err < 0)
3032 kfree_skb(_skb);
3034 kfree_skb(pi->sdu);
3035 err = 0;
3037 break;
3040 kfree_skb(skb);
3041 return err;
3044 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3046 struct l2cap_pinfo *pi = l2cap_pi(sk);
3047 u8 tx_seq = __get_txseq(rx_control);
3048 u16 tx_control = 0;
3049 int err = 0;
3051 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3053 if (tx_seq == pi->expected_tx_seq) {
3054 if (pi->conn_state & L2CAP_CONN_UNDER_REJ)
3055 pi->conn_state &= ~L2CAP_CONN_UNDER_REJ;
3057 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3058 if (err < 0)
3059 return err;
3061 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3062 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3063 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3064 tx_control |= L2CAP_SUPER_RCV_READY;
3065 tx_control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3066 goto send;
3068 } else {
3069 /* Unexpected txSeq. Send a REJ S-frame */
3070 kfree_skb(skb);
3071 if (!(pi->conn_state & L2CAP_CONN_UNDER_REJ)) {
3072 tx_control |= L2CAP_SUPER_REJECT;
3073 tx_control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3074 pi->conn_state |= L2CAP_CONN_UNDER_REJ;
3076 goto send;
3079 return 0;
3081 send:
3082 return l2cap_send_sframe(pi, tx_control);
3085 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3087 struct l2cap_pinfo *pi = l2cap_pi(sk);
3089 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3091 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3092 case L2CAP_SUPER_RCV_READY:
3093 if (rx_control & L2CAP_CTRL_POLL) {
3094 u16 control = L2CAP_CTRL_FINAL;
3095 control |= L2CAP_SUPER_RCV_READY;
3096 l2cap_send_sframe(l2cap_pi(sk), control);
3097 } else if (rx_control & L2CAP_CTRL_FINAL) {
3098 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3099 break;
3101 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3102 del_timer(&pi->monitor_timer);
3104 if (pi->unacked_frames > 0)
3105 __mod_retrans_timer();
3106 } else {
3107 pi->expected_ack_seq = __get_reqseq(rx_control);
3108 l2cap_drop_acked_frames(sk);
3109 if (pi->unacked_frames > 0)
3110 __mod_retrans_timer();
3111 l2cap_ertm_send(sk);
3113 break;
3115 case L2CAP_SUPER_REJECT:
3116 pi->expected_ack_seq = __get_reqseq(rx_control);
3117 l2cap_drop_acked_frames(sk);
3119 sk->sk_send_head = TX_QUEUE(sk)->next;
3120 pi->next_tx_seq = pi->expected_ack_seq;
3122 l2cap_ertm_send(sk);
3124 break;
3126 case L2CAP_SUPER_RCV_NOT_READY:
3127 case L2CAP_SUPER_SELECT_REJECT:
3128 break;
3131 return 0;
3134 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3136 struct sock *sk;
3137 struct l2cap_pinfo *pi;
3138 u16 control, len;
3139 u8 tx_seq;
3140 int err;
3142 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3143 if (!sk) {
3144 BT_DBG("unknown cid 0x%4.4x", cid);
3145 goto drop;
3148 pi = l2cap_pi(sk);
3150 BT_DBG("sk %p, len %d", sk, skb->len);
3152 if (sk->sk_state != BT_CONNECTED)
3153 goto drop;
3155 switch (pi->mode) {
3156 case L2CAP_MODE_BASIC:
3157 /* If socket recv buffers overflows we drop data here
3158 * which is *bad* because L2CAP has to be reliable.
3159 * But we don't have any other choice. L2CAP doesn't
3160 * provide flow control mechanism. */
3162 if (pi->imtu < skb->len)
3163 goto drop;
3165 if (!sock_queue_rcv_skb(sk, skb))
3166 goto done;
3167 break;
3169 case L2CAP_MODE_ERTM:
3170 control = get_unaligned_le16(skb->data);
3171 skb_pull(skb, 2);
3172 len = skb->len;
3174 if (__is_sar_start(control))
3175 len -= 2;
3178 * We can just drop the corrupted I-frame here.
3179 * Receiver will miss it and start proper recovery
3180 * procedures and ask retransmission.
3182 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3183 goto drop;
3185 if (__is_iframe(control))
3186 err = l2cap_data_channel_iframe(sk, control, skb);
3187 else
3188 err = l2cap_data_channel_sframe(sk, control, skb);
3190 if (!err)
3191 goto done;
3192 break;
3194 case L2CAP_MODE_STREAMING:
3195 control = get_unaligned_le16(skb->data);
3196 skb_pull(skb, 2);
3197 len = skb->len;
3199 if (__is_sar_start(control))
3200 len -= 2;
3202 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3203 goto drop;
3205 tx_seq = __get_txseq(control);
3207 if (pi->expected_tx_seq == tx_seq)
3208 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3209 else
3210 pi->expected_tx_seq = tx_seq + 1;
3212 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3214 goto done;
3216 default:
3217 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3218 break;
3221 drop:
3222 kfree_skb(skb);
3224 done:
3225 if (sk)
3226 bh_unlock_sock(sk);
3228 return 0;
3231 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3233 struct sock *sk;
3235 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3236 if (!sk)
3237 goto drop;
3239 BT_DBG("sk %p, len %d", sk, skb->len);
3241 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3242 goto drop;
3244 if (l2cap_pi(sk)->imtu < skb->len)
3245 goto drop;
3247 if (!sock_queue_rcv_skb(sk, skb))
3248 goto done;
3250 drop:
3251 kfree_skb(skb);
3253 done:
3254 if (sk)
3255 bh_unlock_sock(sk);
3256 return 0;
3259 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3261 struct l2cap_hdr *lh = (void *) skb->data;
3262 u16 cid, len;
3263 __le16 psm;
3265 skb_pull(skb, L2CAP_HDR_SIZE);
3266 cid = __le16_to_cpu(lh->cid);
3267 len = __le16_to_cpu(lh->len);
3269 if (len != skb->len) {
3270 kfree_skb(skb);
3271 return;
3274 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3276 switch (cid) {
3277 case L2CAP_CID_SIGNALING:
3278 l2cap_sig_channel(conn, skb);
3279 break;
3281 case L2CAP_CID_CONN_LESS:
3282 psm = get_unaligned((__le16 *) skb->data);
3283 skb_pull(skb, 2);
3284 l2cap_conless_channel(conn, psm, skb);
3285 break;
3287 default:
3288 l2cap_data_channel(conn, cid, skb);
3289 break;
3293 /* ---- L2CAP interface with lower layer (HCI) ---- */
3295 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3297 int exact = 0, lm1 = 0, lm2 = 0;
3298 register struct sock *sk;
3299 struct hlist_node *node;
3301 if (type != ACL_LINK)
3302 return 0;
3304 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3306 /* Find listening sockets and check their link_mode */
3307 read_lock(&l2cap_sk_list.lock);
3308 sk_for_each(sk, node, &l2cap_sk_list.head) {
3309 if (sk->sk_state != BT_LISTEN)
3310 continue;
3312 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3313 lm1 |= HCI_LM_ACCEPT;
3314 if (l2cap_pi(sk)->role_switch)
3315 lm1 |= HCI_LM_MASTER;
3316 exact++;
3317 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3318 lm2 |= HCI_LM_ACCEPT;
3319 if (l2cap_pi(sk)->role_switch)
3320 lm2 |= HCI_LM_MASTER;
3323 read_unlock(&l2cap_sk_list.lock);
3325 return exact ? lm1 : lm2;
3328 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3330 struct l2cap_conn *conn;
3332 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3334 if (hcon->type != ACL_LINK)
3335 return 0;
3337 if (!status) {
3338 conn = l2cap_conn_add(hcon, status);
3339 if (conn)
3340 l2cap_conn_ready(conn);
3341 } else
3342 l2cap_conn_del(hcon, bt_err(status));
3344 return 0;
3347 static int l2cap_disconn_ind(struct hci_conn *hcon)
3349 struct l2cap_conn *conn = hcon->l2cap_data;
3351 BT_DBG("hcon %p", hcon);
3353 if (hcon->type != ACL_LINK || !conn)
3354 return 0x13;
3356 return conn->disc_reason;
3359 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3361 BT_DBG("hcon %p reason %d", hcon, reason);
3363 if (hcon->type != ACL_LINK)
3364 return 0;
3366 l2cap_conn_del(hcon, bt_err(reason));
3368 return 0;
3371 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3373 if (sk->sk_type != SOCK_SEQPACKET)
3374 return;
3376 if (encrypt == 0x00) {
3377 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3378 l2cap_sock_clear_timer(sk);
3379 l2cap_sock_set_timer(sk, HZ * 5);
3380 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3381 __l2cap_sock_close(sk, ECONNREFUSED);
3382 } else {
3383 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3384 l2cap_sock_clear_timer(sk);
3388 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3390 struct l2cap_chan_list *l;
3391 struct l2cap_conn *conn = hcon->l2cap_data;
3392 struct sock *sk;
3394 if (!conn)
3395 return 0;
3397 l = &conn->chan_list;
3399 BT_DBG("conn %p", conn);
3401 read_lock(&l->lock);
3403 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3404 bh_lock_sock(sk);
3406 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3407 bh_unlock_sock(sk);
3408 continue;
3411 if (!status && (sk->sk_state == BT_CONNECTED ||
3412 sk->sk_state == BT_CONFIG)) {
3413 l2cap_check_encryption(sk, encrypt);
3414 bh_unlock_sock(sk);
3415 continue;
3418 if (sk->sk_state == BT_CONNECT) {
3419 if (!status) {
3420 struct l2cap_conn_req req;
3421 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3422 req.psm = l2cap_pi(sk)->psm;
3424 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3426 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3427 L2CAP_CONN_REQ, sizeof(req), &req);
3428 } else {
3429 l2cap_sock_clear_timer(sk);
3430 l2cap_sock_set_timer(sk, HZ / 10);
3432 } else if (sk->sk_state == BT_CONNECT2) {
3433 struct l2cap_conn_rsp rsp;
3434 __u16 result;
3436 if (!status) {
3437 sk->sk_state = BT_CONFIG;
3438 result = L2CAP_CR_SUCCESS;
3439 } else {
3440 sk->sk_state = BT_DISCONN;
3441 l2cap_sock_set_timer(sk, HZ / 10);
3442 result = L2CAP_CR_SEC_BLOCK;
3445 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3446 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3447 rsp.result = cpu_to_le16(result);
3448 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3449 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3450 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3453 bh_unlock_sock(sk);
3456 read_unlock(&l->lock);
3458 return 0;
3461 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3463 struct l2cap_conn *conn = hcon->l2cap_data;
3465 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3466 goto drop;
3468 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3470 if (flags & ACL_START) {
3471 struct l2cap_hdr *hdr;
3472 int len;
3474 if (conn->rx_len) {
3475 BT_ERR("Unexpected start frame (len %d)", skb->len);
3476 kfree_skb(conn->rx_skb);
3477 conn->rx_skb = NULL;
3478 conn->rx_len = 0;
3479 l2cap_conn_unreliable(conn, ECOMM);
3482 if (skb->len < 2) {
3483 BT_ERR("Frame is too short (len %d)", skb->len);
3484 l2cap_conn_unreliable(conn, ECOMM);
3485 goto drop;
3488 hdr = (struct l2cap_hdr *) skb->data;
3489 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3491 if (len == skb->len) {
3492 /* Complete frame received */
3493 l2cap_recv_frame(conn, skb);
3494 return 0;
3497 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3499 if (skb->len > len) {
3500 BT_ERR("Frame is too long (len %d, expected len %d)",
3501 skb->len, len);
3502 l2cap_conn_unreliable(conn, ECOMM);
3503 goto drop;
3506 /* Allocate skb for the complete frame (with header) */
3507 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3508 if (!conn->rx_skb)
3509 goto drop;
3511 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3512 skb->len);
3513 conn->rx_len = len - skb->len;
3514 } else {
3515 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3517 if (!conn->rx_len) {
3518 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3519 l2cap_conn_unreliable(conn, ECOMM);
3520 goto drop;
3523 if (skb->len > conn->rx_len) {
3524 BT_ERR("Fragment is too long (len %d, expected %d)",
3525 skb->len, conn->rx_len);
3526 kfree_skb(conn->rx_skb);
3527 conn->rx_skb = NULL;
3528 conn->rx_len = 0;
3529 l2cap_conn_unreliable(conn, ECOMM);
3530 goto drop;
3533 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3534 skb->len);
3535 conn->rx_len -= skb->len;
3537 if (!conn->rx_len) {
3538 /* Complete frame received */
3539 l2cap_recv_frame(conn, conn->rx_skb);
3540 conn->rx_skb = NULL;
3544 drop:
3545 kfree_skb(skb);
3546 return 0;
3549 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3551 struct sock *sk;
3552 struct hlist_node *node;
3553 char *str = buf;
3555 read_lock_bh(&l2cap_sk_list.lock);
3557 sk_for_each(sk, node, &l2cap_sk_list.head) {
3558 struct l2cap_pinfo *pi = l2cap_pi(sk);
3560 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3561 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3562 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3563 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3566 read_unlock_bh(&l2cap_sk_list.lock);
3568 return str - buf;
3571 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3573 static const struct proto_ops l2cap_sock_ops = {
3574 .family = PF_BLUETOOTH,
3575 .owner = THIS_MODULE,
3576 .release = l2cap_sock_release,
3577 .bind = l2cap_sock_bind,
3578 .connect = l2cap_sock_connect,
3579 .listen = l2cap_sock_listen,
3580 .accept = l2cap_sock_accept,
3581 .getname = l2cap_sock_getname,
3582 .sendmsg = l2cap_sock_sendmsg,
3583 .recvmsg = l2cap_sock_recvmsg,
3584 .poll = bt_sock_poll,
3585 .ioctl = bt_sock_ioctl,
3586 .mmap = sock_no_mmap,
3587 .socketpair = sock_no_socketpair,
3588 .shutdown = l2cap_sock_shutdown,
3589 .setsockopt = l2cap_sock_setsockopt,
3590 .getsockopt = l2cap_sock_getsockopt
3593 static struct net_proto_family l2cap_sock_family_ops = {
3594 .family = PF_BLUETOOTH,
3595 .owner = THIS_MODULE,
3596 .create = l2cap_sock_create,
3599 static struct hci_proto l2cap_hci_proto = {
3600 .name = "L2CAP",
3601 .id = HCI_PROTO_L2CAP,
3602 .connect_ind = l2cap_connect_ind,
3603 .connect_cfm = l2cap_connect_cfm,
3604 .disconn_ind = l2cap_disconn_ind,
3605 .disconn_cfm = l2cap_disconn_cfm,
3606 .security_cfm = l2cap_security_cfm,
3607 .recv_acldata = l2cap_recv_acldata
3610 static int __init l2cap_init(void)
3612 int err;
3614 err = proto_register(&l2cap_proto, 0);
3615 if (err < 0)
3616 return err;
3618 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3619 if (err < 0) {
3620 BT_ERR("L2CAP socket registration failed");
3621 goto error;
3624 err = hci_register_proto(&l2cap_hci_proto);
3625 if (err < 0) {
3626 BT_ERR("L2CAP protocol registration failed");
3627 bt_sock_unregister(BTPROTO_L2CAP);
3628 goto error;
3631 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3632 BT_ERR("Failed to create L2CAP info file");
3634 BT_INFO("L2CAP ver %s", VERSION);
3635 BT_INFO("L2CAP socket layer initialized");
3637 return 0;
3639 error:
3640 proto_unregister(&l2cap_proto);
3641 return err;
3644 static void __exit l2cap_exit(void)
3646 class_remove_file(bt_class, &class_attr_l2cap);
3648 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3649 BT_ERR("L2CAP socket unregistration failed");
3651 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3652 BT_ERR("L2CAP protocol unregistration failed");
3654 proto_unregister(&l2cap_proto);
3657 void l2cap_load(void)
3659 /* Dummy function to trigger automatic L2CAP module loading by
3660 * other modules that use L2CAP sockets but don't use any other
3661 * symbols from it. */
3662 return;
3664 EXPORT_SYMBOL(l2cap_load);
3666 module_init(l2cap_init);
3667 module_exit(l2cap_exit);
3669 module_param(enable_ertm, bool, 0644);
3670 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
3672 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
3673 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
3674 MODULE_VERSION(VERSION);
3675 MODULE_LICENSE("GPL");
3676 MODULE_ALIAS("bt-proto-0");