brd: handle on-demand devices correctly
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / hci_conn.c
blob99cd8d9d891b475c57925be6ae54a88240f7adc8
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
48 void hci_acl_connect(struct hci_conn *conn)
50 struct hci_dev *hdev = conn->hdev;
51 struct inquiry_entry *ie;
52 struct hci_cp_create_conn cp;
54 BT_DBG("%p", conn);
56 conn->state = BT_CONNECT;
57 conn->out = 1;
59 conn->link_mode = HCI_LM_MASTER;
61 conn->attempt++;
63 conn->link_policy = hdev->link_policy;
65 memset(&cp, 0, sizeof(cp));
66 bacpy(&cp.bdaddr, &conn->dst);
67 cp.pscan_rep_mode = 0x02;
69 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
70 if (ie) {
71 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
72 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
73 cp.pscan_mode = ie->data.pscan_mode;
74 cp.clock_offset = ie->data.clock_offset |
75 cpu_to_le16(0x8000);
78 memcpy(conn->dev_class, ie->data.dev_class, 3);
79 conn->ssp_mode = ie->data.ssp_mode;
82 cp.pkt_type = cpu_to_le16(conn->pkt_type);
83 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
84 cp.role_switch = 0x01;
85 else
86 cp.role_switch = 0x00;
88 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
91 static void hci_acl_connect_cancel(struct hci_conn *conn)
93 struct hci_cp_create_conn_cancel cp;
95 BT_DBG("%p", conn);
97 if (conn->hdev->hci_ver < 2)
98 return;
100 bacpy(&cp.bdaddr, &conn->dst);
101 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
104 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
106 struct hci_cp_disconnect cp;
108 BT_DBG("%p", conn);
110 conn->state = BT_DISCONN;
112 cp.handle = cpu_to_le16(conn->handle);
113 cp.reason = reason;
114 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
117 void hci_add_sco(struct hci_conn *conn, __u16 handle)
119 struct hci_dev *hdev = conn->hdev;
120 struct hci_cp_add_sco cp;
122 BT_DBG("%p", conn);
124 conn->state = BT_CONNECT;
125 conn->out = 1;
127 conn->attempt++;
129 cp.handle = cpu_to_le16(handle);
130 cp.pkt_type = cpu_to_le16(conn->pkt_type);
132 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
135 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
137 struct hci_dev *hdev = conn->hdev;
138 struct hci_cp_setup_sync_conn cp;
140 BT_DBG("%p", conn);
142 conn->state = BT_CONNECT;
143 conn->out = 1;
145 conn->attempt++;
147 cp.handle = cpu_to_le16(handle);
148 cp.pkt_type = cpu_to_le16(conn->pkt_type);
150 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
151 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
152 cp.max_latency = cpu_to_le16(0xffff);
153 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
154 cp.retrans_effort = 0xff;
156 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
159 /* Device _must_ be locked */
160 void hci_sco_setup(struct hci_conn *conn, __u8 status)
162 struct hci_conn *sco = conn->link;
164 BT_DBG("%p", conn);
166 if (!sco)
167 return;
169 if (!status) {
170 if (lmp_esco_capable(conn->hdev))
171 hci_setup_sync(sco, conn->handle);
172 else
173 hci_add_sco(sco, conn->handle);
174 } else {
175 hci_proto_connect_cfm(sco, status);
176 hci_conn_del(sco);
180 static void hci_conn_timeout(unsigned long arg)
182 struct hci_conn *conn = (void *) arg;
183 struct hci_dev *hdev = conn->hdev;
184 __u8 reason;
186 BT_DBG("conn %p state %d", conn, conn->state);
188 if (atomic_read(&conn->refcnt))
189 return;
191 hci_dev_lock(hdev);
193 switch (conn->state) {
194 case BT_CONNECT:
195 case BT_CONNECT2:
196 if (conn->type == ACL_LINK && conn->out)
197 hci_acl_connect_cancel(conn);
198 break;
199 case BT_CONFIG:
200 case BT_CONNECTED:
201 reason = hci_proto_disconn_ind(conn);
202 hci_acl_disconn(conn, reason);
203 break;
204 default:
205 conn->state = BT_CLOSED;
206 break;
209 hci_dev_unlock(hdev);
212 static void hci_conn_idle(unsigned long arg)
214 struct hci_conn *conn = (void *) arg;
216 BT_DBG("conn %p mode %d", conn, conn->mode);
218 hci_conn_enter_sniff_mode(conn);
221 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
223 struct hci_conn *conn;
225 BT_DBG("%s dst %s", hdev->name, batostr(dst));
227 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
228 if (!conn)
229 return NULL;
231 bacpy(&conn->dst, dst);
232 conn->hdev = hdev;
233 conn->type = type;
234 conn->mode = HCI_CM_ACTIVE;
235 conn->state = BT_OPEN;
236 conn->auth_type = HCI_AT_GENERAL_BONDING;
238 conn->power_save = 1;
239 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
241 switch (type) {
242 case ACL_LINK:
243 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
244 break;
245 case SCO_LINK:
246 if (lmp_esco_capable(hdev))
247 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
248 (hdev->esco_type & EDR_ESCO_MASK);
249 else
250 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
251 break;
252 case ESCO_LINK:
253 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
254 break;
257 skb_queue_head_init(&conn->data_q);
259 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
260 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
262 atomic_set(&conn->refcnt, 0);
264 hci_dev_hold(hdev);
266 tasklet_disable(&hdev->tx_task);
268 hci_conn_hash_add(hdev, conn);
269 if (hdev->notify)
270 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
272 atomic_set(&conn->devref, 0);
274 hci_conn_init_sysfs(conn);
276 tasklet_enable(&hdev->tx_task);
278 return conn;
281 int hci_conn_del(struct hci_conn *conn)
283 struct hci_dev *hdev = conn->hdev;
285 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
287 del_timer(&conn->idle_timer);
289 del_timer(&conn->disc_timer);
291 if (conn->type == ACL_LINK) {
292 struct hci_conn *sco = conn->link;
293 if (sco)
294 sco->link = NULL;
296 /* Unacked frames */
297 hdev->acl_cnt += conn->sent;
298 } else {
299 struct hci_conn *acl = conn->link;
300 if (acl) {
301 acl->link = NULL;
302 hci_conn_put(acl);
306 tasklet_disable(&hdev->tx_task);
308 hci_conn_hash_del(hdev, conn);
309 if (hdev->notify)
310 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
312 tasklet_enable(&hdev->tx_task);
314 skb_queue_purge(&conn->data_q);
316 hci_conn_put_device(conn);
318 hci_dev_put(hdev);
320 return 0;
323 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
325 int use_src = bacmp(src, BDADDR_ANY);
326 struct hci_dev *hdev = NULL;
327 struct list_head *p;
329 BT_DBG("%s -> %s", batostr(src), batostr(dst));
331 read_lock_bh(&hci_dev_list_lock);
333 list_for_each(p, &hci_dev_list) {
334 struct hci_dev *d = list_entry(p, struct hci_dev, list);
336 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
337 continue;
339 /* Simple routing:
340 * No source address - find interface with bdaddr != dst
341 * Source address - find interface with bdaddr == src
344 if (use_src) {
345 if (!bacmp(&d->bdaddr, src)) {
346 hdev = d; break;
348 } else {
349 if (bacmp(&d->bdaddr, dst)) {
350 hdev = d; break;
355 if (hdev)
356 hdev = hci_dev_hold(hdev);
358 read_unlock_bh(&hci_dev_list_lock);
359 return hdev;
361 EXPORT_SYMBOL(hci_get_route);
363 /* Create SCO or ACL connection.
364 * Device _must_ be locked */
365 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
367 struct hci_conn *acl;
368 struct hci_conn *sco;
370 BT_DBG("%s dst %s", hdev->name, batostr(dst));
372 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
373 if (!acl) {
374 acl = hci_conn_add(hdev, ACL_LINK, dst);
375 if (!acl)
376 return NULL;
379 hci_conn_hold(acl);
381 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
382 acl->sec_level = BT_SECURITY_LOW;
383 acl->pending_sec_level = sec_level;
384 acl->auth_type = auth_type;
385 hci_acl_connect(acl);
388 if (type == ACL_LINK)
389 return acl;
391 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
392 if (!sco) {
393 sco = hci_conn_add(hdev, type, dst);
394 if (!sco) {
395 hci_conn_put(acl);
396 return NULL;
400 acl->link = sco;
401 sco->link = acl;
403 hci_conn_hold(sco);
405 if (acl->state == BT_CONNECTED &&
406 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
407 acl->power_save = 1;
408 hci_conn_enter_active_mode(acl);
410 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
411 /* defer SCO setup until mode change completed */
412 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
413 return sco;
416 hci_sco_setup(acl, 0x00);
419 return sco;
421 EXPORT_SYMBOL(hci_connect);
423 /* Check link security requirement */
424 int hci_conn_check_link_mode(struct hci_conn *conn)
426 BT_DBG("conn %p", conn);
428 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
429 !(conn->link_mode & HCI_LM_ENCRYPT))
430 return 0;
432 return 1;
434 EXPORT_SYMBOL(hci_conn_check_link_mode);
436 /* Authenticate remote device */
437 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
439 BT_DBG("conn %p", conn);
441 if (conn->pending_sec_level > sec_level)
442 sec_level = conn->pending_sec_level;
444 if (sec_level > conn->sec_level)
445 conn->pending_sec_level = sec_level;
446 else if (conn->link_mode & HCI_LM_AUTH)
447 return 1;
449 /* Make sure we preserve an existing MITM requirement*/
450 auth_type |= (conn->auth_type & 0x01);
452 conn->auth_type = auth_type;
454 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
455 struct hci_cp_auth_requested cp;
456 cp.handle = cpu_to_le16(conn->handle);
457 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
458 sizeof(cp), &cp);
461 return 0;
464 /* Enable security */
465 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
467 BT_DBG("conn %p", conn);
469 if (sec_level == BT_SECURITY_SDP)
470 return 1;
472 if (sec_level == BT_SECURITY_LOW &&
473 (!conn->ssp_mode || !conn->hdev->ssp_mode))
474 return 1;
476 if (conn->link_mode & HCI_LM_ENCRYPT)
477 return hci_conn_auth(conn, sec_level, auth_type);
479 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
480 return 0;
482 if (hci_conn_auth(conn, sec_level, auth_type)) {
483 struct hci_cp_set_conn_encrypt cp;
484 cp.handle = cpu_to_le16(conn->handle);
485 cp.encrypt = 1;
486 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
487 sizeof(cp), &cp);
490 return 0;
492 EXPORT_SYMBOL(hci_conn_security);
494 /* Change link key */
495 int hci_conn_change_link_key(struct hci_conn *conn)
497 BT_DBG("conn %p", conn);
499 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
500 struct hci_cp_change_conn_link_key cp;
501 cp.handle = cpu_to_le16(conn->handle);
502 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
503 sizeof(cp), &cp);
506 return 0;
508 EXPORT_SYMBOL(hci_conn_change_link_key);
510 /* Switch role */
511 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
513 BT_DBG("conn %p", conn);
515 if (!role && conn->link_mode & HCI_LM_MASTER)
516 return 1;
518 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
519 struct hci_cp_switch_role cp;
520 bacpy(&cp.bdaddr, &conn->dst);
521 cp.role = role;
522 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
525 return 0;
527 EXPORT_SYMBOL(hci_conn_switch_role);
529 /* Enter active mode */
530 void hci_conn_enter_active_mode(struct hci_conn *conn)
532 struct hci_dev *hdev = conn->hdev;
534 BT_DBG("conn %p mode %d", conn, conn->mode);
536 if (test_bit(HCI_RAW, &hdev->flags))
537 return;
539 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
540 goto timer;
542 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
543 struct hci_cp_exit_sniff_mode cp;
544 cp.handle = cpu_to_le16(conn->handle);
545 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
548 timer:
549 if (hdev->idle_timeout > 0)
550 mod_timer(&conn->idle_timer,
551 jiffies + msecs_to_jiffies(hdev->idle_timeout));
554 /* Enter sniff mode */
555 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
557 struct hci_dev *hdev = conn->hdev;
559 BT_DBG("conn %p mode %d", conn, conn->mode);
561 if (test_bit(HCI_RAW, &hdev->flags))
562 return;
564 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
565 return;
567 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
568 return;
570 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
571 struct hci_cp_sniff_subrate cp;
572 cp.handle = cpu_to_le16(conn->handle);
573 cp.max_latency = cpu_to_le16(0);
574 cp.min_remote_timeout = cpu_to_le16(0);
575 cp.min_local_timeout = cpu_to_le16(0);
576 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
579 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
580 struct hci_cp_sniff_mode cp;
581 cp.handle = cpu_to_le16(conn->handle);
582 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
583 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
584 cp.attempt = cpu_to_le16(4);
585 cp.timeout = cpu_to_le16(1);
586 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
590 /* Drop all connection on the device */
591 void hci_conn_hash_flush(struct hci_dev *hdev)
593 struct hci_conn_hash *h = &hdev->conn_hash;
594 struct list_head *p;
596 BT_DBG("hdev %s", hdev->name);
598 p = h->list.next;
599 while (p != &h->list) {
600 struct hci_conn *c;
602 c = list_entry(p, struct hci_conn, list);
603 p = p->next;
605 c->state = BT_CLOSED;
607 hci_proto_disconn_cfm(c, 0x16);
608 hci_conn_del(c);
612 /* Check pending connect attempts */
613 void hci_conn_check_pending(struct hci_dev *hdev)
615 struct hci_conn *conn;
617 BT_DBG("hdev %s", hdev->name);
619 hci_dev_lock(hdev);
621 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
622 if (conn)
623 hci_acl_connect(conn);
625 hci_dev_unlock(hdev);
628 void hci_conn_hold_device(struct hci_conn *conn)
630 atomic_inc(&conn->devref);
632 EXPORT_SYMBOL(hci_conn_hold_device);
634 void hci_conn_put_device(struct hci_conn *conn)
636 if (atomic_dec_and_test(&conn->devref))
637 hci_conn_del_sysfs(conn);
639 EXPORT_SYMBOL(hci_conn_put_device);
641 int hci_get_conn_list(void __user *arg)
643 struct hci_conn_list_req req, *cl;
644 struct hci_conn_info *ci;
645 struct hci_dev *hdev;
646 struct list_head *p;
647 int n = 0, size, err;
649 if (copy_from_user(&req, arg, sizeof(req)))
650 return -EFAULT;
652 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
653 return -EINVAL;
655 size = sizeof(req) + req.conn_num * sizeof(*ci);
657 cl = kmalloc(size, GFP_KERNEL);
658 if (!cl)
659 return -ENOMEM;
661 hdev = hci_dev_get(req.dev_id);
662 if (!hdev) {
663 kfree(cl);
664 return -ENODEV;
667 ci = cl->conn_info;
669 hci_dev_lock_bh(hdev);
670 list_for_each(p, &hdev->conn_hash.list) {
671 register struct hci_conn *c;
672 c = list_entry(p, struct hci_conn, list);
674 bacpy(&(ci + n)->bdaddr, &c->dst);
675 (ci + n)->handle = c->handle;
676 (ci + n)->type = c->type;
677 (ci + n)->out = c->out;
678 (ci + n)->state = c->state;
679 (ci + n)->link_mode = c->link_mode;
680 if (++n >= req.conn_num)
681 break;
683 hci_dev_unlock_bh(hdev);
685 cl->dev_id = hdev->id;
686 cl->conn_num = n;
687 size = sizeof(req) + n * sizeof(*ci);
689 hci_dev_put(hdev);
691 err = copy_to_user(arg, cl, size);
692 kfree(cl);
694 return err ? -EFAULT : 0;
697 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
699 struct hci_conn_info_req req;
700 struct hci_conn_info ci;
701 struct hci_conn *conn;
702 char __user *ptr = arg + sizeof(req);
704 if (copy_from_user(&req, arg, sizeof(req)))
705 return -EFAULT;
707 hci_dev_lock_bh(hdev);
708 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
709 if (conn) {
710 bacpy(&ci.bdaddr, &conn->dst);
711 ci.handle = conn->handle;
712 ci.type = conn->type;
713 ci.out = conn->out;
714 ci.state = conn->state;
715 ci.link_mode = conn->link_mode;
717 hci_dev_unlock_bh(hdev);
719 if (!conn)
720 return -ENOENT;
722 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
725 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
727 struct hci_auth_info_req req;
728 struct hci_conn *conn;
730 if (copy_from_user(&req, arg, sizeof(req)))
731 return -EFAULT;
733 hci_dev_lock_bh(hdev);
734 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
735 if (conn)
736 req.type = conn->auth_type;
737 hci_dev_unlock_bh(hdev);
739 if (!conn)
740 return -ENOENT;
742 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;