[Bluetooth] Reject L2CAP connections on an insecure ACL link
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / hci_conn.c
blobb7002429f1525311053f14171a1d94a25bf1ff04
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/unaligned.h>
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
48 #ifndef CONFIG_BT_HCI_CORE_DEBUG
49 #undef BT_DBG
50 #define BT_DBG(D...)
51 #endif
53 void hci_acl_connect(struct hci_conn *conn)
55 struct hci_dev *hdev = conn->hdev;
56 struct inquiry_entry *ie;
57 struct hci_cp_create_conn cp;
59 BT_DBG("%p", conn);
61 conn->state = BT_CONNECT;
62 conn->out = 1;
64 conn->link_mode = HCI_LM_MASTER;
66 conn->attempt++;
68 conn->link_policy = hdev->link_policy;
70 memset(&cp, 0, sizeof(cp));
71 bacpy(&cp.bdaddr, &conn->dst);
72 cp.pscan_rep_mode = 0x02;
74 if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) {
75 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
76 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
77 cp.pscan_mode = ie->data.pscan_mode;
78 cp.clock_offset = ie->data.clock_offset |
79 cpu_to_le16(0x8000);
82 memcpy(conn->dev_class, ie->data.dev_class, 3);
83 conn->ssp_mode = ie->data.ssp_mode;
86 cp.pkt_type = cpu_to_le16(conn->pkt_type);
87 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
88 cp.role_switch = 0x01;
89 else
90 cp.role_switch = 0x00;
92 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
95 static void hci_acl_connect_cancel(struct hci_conn *conn)
97 struct hci_cp_create_conn_cancel cp;
99 BT_DBG("%p", conn);
101 if (conn->hdev->hci_ver < 2)
102 return;
104 bacpy(&cp.bdaddr, &conn->dst);
105 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
108 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
110 struct hci_cp_disconnect cp;
112 BT_DBG("%p", conn);
114 conn->state = BT_DISCONN;
116 cp.handle = cpu_to_le16(conn->handle);
117 cp.reason = reason;
118 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
121 void hci_add_sco(struct hci_conn *conn, __u16 handle)
123 struct hci_dev *hdev = conn->hdev;
124 struct hci_cp_add_sco cp;
126 BT_DBG("%p", conn);
128 conn->state = BT_CONNECT;
129 conn->out = 1;
131 cp.handle = cpu_to_le16(handle);
132 cp.pkt_type = cpu_to_le16(conn->pkt_type);
134 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
137 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
139 struct hci_dev *hdev = conn->hdev;
140 struct hci_cp_setup_sync_conn cp;
142 BT_DBG("%p", conn);
144 conn->state = BT_CONNECT;
145 conn->out = 1;
147 cp.handle = cpu_to_le16(handle);
148 cp.pkt_type = cpu_to_le16(conn->pkt_type);
150 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
151 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
152 cp.max_latency = cpu_to_le16(0xffff);
153 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
154 cp.retrans_effort = 0xff;
156 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
159 static void hci_conn_timeout(unsigned long arg)
161 struct hci_conn *conn = (void *) arg;
162 struct hci_dev *hdev = conn->hdev;
164 BT_DBG("conn %p state %d", conn, conn->state);
166 if (atomic_read(&conn->refcnt))
167 return;
169 hci_dev_lock(hdev);
171 switch (conn->state) {
172 case BT_CONNECT:
173 case BT_CONNECT2:
174 if (conn->type == ACL_LINK)
175 hci_acl_connect_cancel(conn);
176 else
177 hci_acl_disconn(conn, 0x13);
178 break;
179 case BT_CONFIG:
180 case BT_CONNECTED:
181 hci_acl_disconn(conn, 0x13);
182 break;
183 default:
184 conn->state = BT_CLOSED;
185 break;
188 hci_dev_unlock(hdev);
191 static void hci_conn_idle(unsigned long arg)
193 struct hci_conn *conn = (void *) arg;
195 BT_DBG("conn %p mode %d", conn, conn->mode);
197 hci_conn_enter_sniff_mode(conn);
200 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
202 struct hci_conn *conn;
204 BT_DBG("%s dst %s", hdev->name, batostr(dst));
206 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
207 if (!conn)
208 return NULL;
210 bacpy(&conn->dst, dst);
211 conn->hdev = hdev;
212 conn->type = type;
213 conn->mode = HCI_CM_ACTIVE;
214 conn->state = BT_OPEN;
216 conn->power_save = 1;
218 switch (type) {
219 case ACL_LINK:
220 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
221 break;
222 case SCO_LINK:
223 if (lmp_esco_capable(hdev))
224 conn->pkt_type = hdev->esco_type & SCO_ESCO_MASK;
225 else
226 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
227 break;
228 case ESCO_LINK:
229 conn->pkt_type = hdev->esco_type;
230 break;
233 skb_queue_head_init(&conn->data_q);
235 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
236 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
238 atomic_set(&conn->refcnt, 0);
240 hci_dev_hold(hdev);
242 tasklet_disable(&hdev->tx_task);
244 hci_conn_hash_add(hdev, conn);
245 if (hdev->notify)
246 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
248 tasklet_enable(&hdev->tx_task);
250 return conn;
253 int hci_conn_del(struct hci_conn *conn)
255 struct hci_dev *hdev = conn->hdev;
257 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
259 del_timer(&conn->idle_timer);
261 del_timer(&conn->disc_timer);
263 if (conn->type == ACL_LINK) {
264 struct hci_conn *sco = conn->link;
265 if (sco)
266 sco->link = NULL;
268 /* Unacked frames */
269 hdev->acl_cnt += conn->sent;
270 } else {
271 struct hci_conn *acl = conn->link;
272 if (acl) {
273 acl->link = NULL;
274 hci_conn_put(acl);
278 tasklet_disable(&hdev->tx_task);
280 hci_conn_hash_del(hdev, conn);
281 if (hdev->notify)
282 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
284 tasklet_enable(&hdev->tx_task);
286 skb_queue_purge(&conn->data_q);
288 return 0;
291 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
293 int use_src = bacmp(src, BDADDR_ANY);
294 struct hci_dev *hdev = NULL;
295 struct list_head *p;
297 BT_DBG("%s -> %s", batostr(src), batostr(dst));
299 read_lock_bh(&hci_dev_list_lock);
301 list_for_each(p, &hci_dev_list) {
302 struct hci_dev *d = list_entry(p, struct hci_dev, list);
304 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
305 continue;
307 /* Simple routing:
308 * No source address - find interface with bdaddr != dst
309 * Source address - find interface with bdaddr == src
312 if (use_src) {
313 if (!bacmp(&d->bdaddr, src)) {
314 hdev = d; break;
316 } else {
317 if (bacmp(&d->bdaddr, dst)) {
318 hdev = d; break;
323 if (hdev)
324 hdev = hci_dev_hold(hdev);
326 read_unlock_bh(&hci_dev_list_lock);
327 return hdev;
329 EXPORT_SYMBOL(hci_get_route);
331 /* Create SCO or ACL connection.
332 * Device _must_ be locked */
333 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 auth_type)
335 struct hci_conn *acl;
336 struct hci_conn *sco;
338 BT_DBG("%s dst %s", hdev->name, batostr(dst));
340 if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) {
341 if (!(acl = hci_conn_add(hdev, ACL_LINK, dst)))
342 return NULL;
345 hci_conn_hold(acl);
347 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
348 acl->auth_type = auth_type;
349 hci_acl_connect(acl);
352 if (type == ACL_LINK)
353 return acl;
355 if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) {
356 if (!(sco = hci_conn_add(hdev, type, dst))) {
357 hci_conn_put(acl);
358 return NULL;
362 acl->link = sco;
363 sco->link = acl;
365 hci_conn_hold(sco);
367 if (acl->state == BT_CONNECTED &&
368 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
369 if (lmp_esco_capable(hdev))
370 hci_setup_sync(sco, acl->handle);
371 else
372 hci_add_sco(sco, acl->handle);
375 return sco;
377 EXPORT_SYMBOL(hci_connect);
379 /* Check link security requirement */
380 int hci_conn_check_link_mode(struct hci_conn *conn)
382 BT_DBG("conn %p", conn);
384 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
385 !(conn->link_mode & HCI_LM_ENCRYPT))
386 return 0;
388 return 1;
390 EXPORT_SYMBOL(hci_conn_check_link_mode);
392 /* Authenticate remote device */
393 int hci_conn_auth(struct hci_conn *conn)
395 BT_DBG("conn %p", conn);
397 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) {
398 if (!(conn->auth_type & 0x01)) {
399 conn->auth_type |= 0x01;
400 conn->link_mode &= ~HCI_LM_AUTH;
404 if (conn->link_mode & HCI_LM_AUTH)
405 return 1;
407 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
408 struct hci_cp_auth_requested cp;
409 cp.handle = cpu_to_le16(conn->handle);
410 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
411 sizeof(cp), &cp);
413 return 0;
415 EXPORT_SYMBOL(hci_conn_auth);
417 /* Enable encryption */
418 int hci_conn_encrypt(struct hci_conn *conn)
420 BT_DBG("conn %p", conn);
422 if (conn->link_mode & HCI_LM_ENCRYPT)
423 return hci_conn_auth(conn);
425 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
426 return 0;
428 if (hci_conn_auth(conn)) {
429 struct hci_cp_set_conn_encrypt cp;
430 cp.handle = cpu_to_le16(conn->handle);
431 cp.encrypt = 1;
432 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
433 sizeof(cp), &cp);
435 return 0;
437 EXPORT_SYMBOL(hci_conn_encrypt);
439 /* Change link key */
440 int hci_conn_change_link_key(struct hci_conn *conn)
442 BT_DBG("conn %p", conn);
444 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
445 struct hci_cp_change_conn_link_key cp;
446 cp.handle = cpu_to_le16(conn->handle);
447 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
448 sizeof(cp), &cp);
450 return 0;
452 EXPORT_SYMBOL(hci_conn_change_link_key);
454 /* Switch role */
455 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role)
457 BT_DBG("conn %p", conn);
459 if (!role && conn->link_mode & HCI_LM_MASTER)
460 return 1;
462 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
463 struct hci_cp_switch_role cp;
464 bacpy(&cp.bdaddr, &conn->dst);
465 cp.role = role;
466 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
468 return 0;
470 EXPORT_SYMBOL(hci_conn_switch_role);
472 /* Enter active mode */
473 void hci_conn_enter_active_mode(struct hci_conn *conn)
475 struct hci_dev *hdev = conn->hdev;
477 BT_DBG("conn %p mode %d", conn, conn->mode);
479 if (test_bit(HCI_RAW, &hdev->flags))
480 return;
482 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
483 goto timer;
485 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
486 struct hci_cp_exit_sniff_mode cp;
487 cp.handle = cpu_to_le16(conn->handle);
488 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
491 timer:
492 if (hdev->idle_timeout > 0)
493 mod_timer(&conn->idle_timer,
494 jiffies + msecs_to_jiffies(hdev->idle_timeout));
497 /* Enter sniff mode */
498 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
500 struct hci_dev *hdev = conn->hdev;
502 BT_DBG("conn %p mode %d", conn, conn->mode);
504 if (test_bit(HCI_RAW, &hdev->flags))
505 return;
507 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
508 return;
510 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
511 return;
513 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
514 struct hci_cp_sniff_subrate cp;
515 cp.handle = cpu_to_le16(conn->handle);
516 cp.max_latency = cpu_to_le16(0);
517 cp.min_remote_timeout = cpu_to_le16(0);
518 cp.min_local_timeout = cpu_to_le16(0);
519 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
522 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
523 struct hci_cp_sniff_mode cp;
524 cp.handle = cpu_to_le16(conn->handle);
525 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
526 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
527 cp.attempt = cpu_to_le16(4);
528 cp.timeout = cpu_to_le16(1);
529 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
533 /* Drop all connection on the device */
534 void hci_conn_hash_flush(struct hci_dev *hdev)
536 struct hci_conn_hash *h = &hdev->conn_hash;
537 struct list_head *p;
539 BT_DBG("hdev %s", hdev->name);
541 p = h->list.next;
542 while (p != &h->list) {
543 struct hci_conn *c;
545 c = list_entry(p, struct hci_conn, list);
546 p = p->next;
548 c->state = BT_CLOSED;
550 hci_conn_del_sysfs(c);
552 hci_proto_disconn_ind(c, 0x16);
553 hci_conn_del(c);
557 /* Check pending connect attempts */
558 void hci_conn_check_pending(struct hci_dev *hdev)
560 struct hci_conn *conn;
562 BT_DBG("hdev %s", hdev->name);
564 hci_dev_lock(hdev);
566 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
567 if (conn)
568 hci_acl_connect(conn);
570 hci_dev_unlock(hdev);
573 int hci_get_conn_list(void __user *arg)
575 struct hci_conn_list_req req, *cl;
576 struct hci_conn_info *ci;
577 struct hci_dev *hdev;
578 struct list_head *p;
579 int n = 0, size, err;
581 if (copy_from_user(&req, arg, sizeof(req)))
582 return -EFAULT;
584 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
585 return -EINVAL;
587 size = sizeof(req) + req.conn_num * sizeof(*ci);
589 if (!(cl = kmalloc(size, GFP_KERNEL)))
590 return -ENOMEM;
592 if (!(hdev = hci_dev_get(req.dev_id))) {
593 kfree(cl);
594 return -ENODEV;
597 ci = cl->conn_info;
599 hci_dev_lock_bh(hdev);
600 list_for_each(p, &hdev->conn_hash.list) {
601 register struct hci_conn *c;
602 c = list_entry(p, struct hci_conn, list);
604 bacpy(&(ci + n)->bdaddr, &c->dst);
605 (ci + n)->handle = c->handle;
606 (ci + n)->type = c->type;
607 (ci + n)->out = c->out;
608 (ci + n)->state = c->state;
609 (ci + n)->link_mode = c->link_mode;
610 if (++n >= req.conn_num)
611 break;
613 hci_dev_unlock_bh(hdev);
615 cl->dev_id = hdev->id;
616 cl->conn_num = n;
617 size = sizeof(req) + n * sizeof(*ci);
619 hci_dev_put(hdev);
621 err = copy_to_user(arg, cl, size);
622 kfree(cl);
624 return err ? -EFAULT : 0;
627 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
629 struct hci_conn_info_req req;
630 struct hci_conn_info ci;
631 struct hci_conn *conn;
632 char __user *ptr = arg + sizeof(req);
634 if (copy_from_user(&req, arg, sizeof(req)))
635 return -EFAULT;
637 hci_dev_lock_bh(hdev);
638 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
639 if (conn) {
640 bacpy(&ci.bdaddr, &conn->dst);
641 ci.handle = conn->handle;
642 ci.type = conn->type;
643 ci.out = conn->out;
644 ci.state = conn->state;
645 ci.link_mode = conn->link_mode;
647 hci_dev_unlock_bh(hdev);
649 if (!conn)
650 return -ENOENT;
652 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
655 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
657 struct hci_auth_info_req req;
658 struct hci_conn *conn;
660 if (copy_from_user(&req, arg, sizeof(req)))
661 return -EFAULT;
663 hci_dev_lock_bh(hdev);
664 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
665 if (conn)
666 req.type = conn->auth_type;
667 hci_dev_unlock_bh(hdev);
669 if (!conn)
670 return -ENOENT;
672 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;