msm: scm: Fix improper register assignment
[linux-2.6/cjktty.git] / net / bluetooth / hci_conn.c
blob6b90a41917347826f2a81fea590124a54af1f9bd
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
48 void hci_acl_connect(struct hci_conn *conn)
50 struct hci_dev *hdev = conn->hdev;
51 struct inquiry_entry *ie;
52 struct hci_cp_create_conn cp;
54 BT_DBG("%p", conn);
56 conn->state = BT_CONNECT;
57 conn->out = 1;
59 conn->link_mode = HCI_LM_MASTER;
61 conn->attempt++;
63 conn->link_policy = hdev->link_policy;
65 memset(&cp, 0, sizeof(cp));
66 bacpy(&cp.bdaddr, &conn->dst);
67 cp.pscan_rep_mode = 0x02;
69 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
70 if (ie) {
71 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
72 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
73 cp.pscan_mode = ie->data.pscan_mode;
74 cp.clock_offset = ie->data.clock_offset |
75 cpu_to_le16(0x8000);
78 memcpy(conn->dev_class, ie->data.dev_class, 3);
79 conn->ssp_mode = ie->data.ssp_mode;
82 cp.pkt_type = cpu_to_le16(conn->pkt_type);
83 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
84 cp.role_switch = 0x01;
85 else
86 cp.role_switch = 0x00;
88 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
91 static void hci_acl_connect_cancel(struct hci_conn *conn)
93 struct hci_cp_create_conn_cancel cp;
95 BT_DBG("%p", conn);
97 if (conn->hdev->hci_ver < 2)
98 return;
100 bacpy(&cp.bdaddr, &conn->dst);
101 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
104 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
106 struct hci_cp_disconnect cp;
108 BT_DBG("%p", conn);
110 conn->state = BT_DISCONN;
112 cp.handle = cpu_to_le16(conn->handle);
113 cp.reason = reason;
114 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
117 void hci_add_sco(struct hci_conn *conn, __u16 handle)
119 struct hci_dev *hdev = conn->hdev;
120 struct hci_cp_add_sco cp;
122 BT_DBG("%p", conn);
124 conn->state = BT_CONNECT;
125 conn->out = 1;
127 conn->attempt++;
129 cp.handle = cpu_to_le16(handle);
130 cp.pkt_type = cpu_to_le16(conn->pkt_type);
132 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
135 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
137 struct hci_dev *hdev = conn->hdev;
138 struct hci_cp_setup_sync_conn cp;
140 BT_DBG("%p", conn);
142 conn->state = BT_CONNECT;
143 conn->out = 1;
145 conn->attempt++;
147 cp.handle = cpu_to_le16(handle);
148 cp.pkt_type = cpu_to_le16(conn->pkt_type);
150 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
151 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
152 cp.max_latency = cpu_to_le16(0xffff);
153 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
154 cp.retrans_effort = 0xff;
156 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
159 /* Device _must_ be locked */
160 void hci_sco_setup(struct hci_conn *conn, __u8 status)
162 struct hci_conn *sco = conn->link;
164 BT_DBG("%p", conn);
166 if (!sco)
167 return;
169 if (!status) {
170 if (lmp_esco_capable(conn->hdev))
171 hci_setup_sync(sco, conn->handle);
172 else
173 hci_add_sco(sco, conn->handle);
174 } else {
175 hci_proto_connect_cfm(sco, status);
176 hci_conn_del(sco);
180 static void hci_conn_timeout(unsigned long arg)
182 struct hci_conn *conn = (void *) arg;
183 struct hci_dev *hdev = conn->hdev;
184 __u8 reason;
186 BT_DBG("conn %p state %d", conn, conn->state);
188 if (atomic_read(&conn->refcnt))
189 return;
191 hci_dev_lock(hdev);
193 switch (conn->state) {
194 case BT_CONNECT:
195 case BT_CONNECT2:
196 if (conn->type == ACL_LINK && conn->out)
197 hci_acl_connect_cancel(conn);
198 break;
199 case BT_CONFIG:
200 case BT_CONNECTED:
201 reason = hci_proto_disconn_ind(conn);
202 hci_acl_disconn(conn, reason);
203 break;
204 default:
205 conn->state = BT_CLOSED;
206 break;
209 hci_dev_unlock(hdev);
212 static void hci_conn_idle(unsigned long arg)
214 struct hci_conn *conn = (void *) arg;
216 BT_DBG("conn %p mode %d", conn, conn->mode);
218 hci_conn_enter_sniff_mode(conn);
221 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
223 struct hci_conn *conn;
225 BT_DBG("%s dst %s", hdev->name, batostr(dst));
227 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
228 if (!conn)
229 return NULL;
231 bacpy(&conn->dst, dst);
232 conn->hdev = hdev;
233 conn->type = type;
234 conn->mode = HCI_CM_ACTIVE;
235 conn->state = BT_OPEN;
236 conn->auth_type = HCI_AT_GENERAL_BONDING;
238 conn->power_save = 1;
239 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
241 switch (type) {
242 case ACL_LINK:
243 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
244 break;
245 case SCO_LINK:
246 if (lmp_esco_capable(hdev))
247 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
248 (hdev->esco_type & EDR_ESCO_MASK);
249 else
250 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
251 break;
252 case ESCO_LINK:
253 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
254 break;
257 skb_queue_head_init(&conn->data_q);
259 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
260 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
262 atomic_set(&conn->refcnt, 0);
264 hci_dev_hold(hdev);
266 tasklet_disable(&hdev->tx_task);
268 hci_conn_hash_add(hdev, conn);
269 if (hdev->notify)
270 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
272 atomic_set(&conn->devref, 0);
274 hci_conn_init_sysfs(conn);
276 tasklet_enable(&hdev->tx_task);
278 return conn;
281 int hci_conn_del(struct hci_conn *conn)
283 struct hci_dev *hdev = conn->hdev;
285 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
287 del_timer(&conn->idle_timer);
289 del_timer(&conn->disc_timer);
291 if (conn->type == ACL_LINK) {
292 struct hci_conn *sco = conn->link;
293 if (sco)
294 sco->link = NULL;
296 /* Unacked frames */
297 hdev->acl_cnt += conn->sent;
298 } else {
299 struct hci_conn *acl = conn->link;
300 if (acl) {
301 acl->link = NULL;
302 hci_conn_put(acl);
306 tasklet_disable(&hdev->tx_task);
308 hci_conn_hash_del(hdev, conn);
309 if (hdev->notify)
310 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
312 tasklet_enable(&hdev->tx_task);
314 skb_queue_purge(&conn->data_q);
316 hci_conn_put_device(conn);
318 hci_dev_put(hdev);
320 return 0;
323 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
325 int use_src = bacmp(src, BDADDR_ANY);
326 struct hci_dev *hdev = NULL;
327 struct list_head *p;
329 BT_DBG("%s -> %s", batostr(src), batostr(dst));
331 read_lock_bh(&hci_dev_list_lock);
333 list_for_each(p, &hci_dev_list) {
334 struct hci_dev *d = list_entry(p, struct hci_dev, list);
336 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
337 continue;
339 /* Simple routing:
340 * No source address - find interface with bdaddr != dst
341 * Source address - find interface with bdaddr == src
344 if (use_src) {
345 if (!bacmp(&d->bdaddr, src)) {
346 hdev = d; break;
348 } else {
349 if (bacmp(&d->bdaddr, dst)) {
350 hdev = d; break;
355 if (hdev)
356 hdev = hci_dev_hold(hdev);
358 read_unlock_bh(&hci_dev_list_lock);
359 return hdev;
361 EXPORT_SYMBOL(hci_get_route);
363 /* Create SCO or ACL connection.
364 * Device _must_ be locked */
365 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
367 struct hci_conn *acl;
368 struct hci_conn *sco;
370 BT_DBG("%s dst %s", hdev->name, batostr(dst));
372 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
373 if (!acl) {
374 acl = hci_conn_add(hdev, ACL_LINK, dst);
375 if (!acl)
376 return NULL;
379 hci_conn_hold(acl);
381 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
382 acl->sec_level = sec_level;
383 acl->auth_type = auth_type;
384 hci_acl_connect(acl);
385 } else {
386 if (acl->sec_level < sec_level)
387 acl->sec_level = sec_level;
388 if (acl->auth_type < auth_type)
389 acl->auth_type = auth_type;
392 if (type == ACL_LINK)
393 return acl;
395 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
396 if (!sco) {
397 sco = hci_conn_add(hdev, type, dst);
398 if (!sco) {
399 hci_conn_put(acl);
400 return NULL;
404 acl->link = sco;
405 sco->link = acl;
407 hci_conn_hold(sco);
409 if (acl->state == BT_CONNECTED &&
410 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
411 acl->power_save = 1;
412 hci_conn_enter_active_mode(acl);
414 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
415 /* defer SCO setup until mode change completed */
416 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
417 return sco;
420 hci_sco_setup(acl, 0x00);
423 return sco;
425 EXPORT_SYMBOL(hci_connect);
427 /* Check link security requirement */
428 int hci_conn_check_link_mode(struct hci_conn *conn)
430 BT_DBG("conn %p", conn);
432 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
433 !(conn->link_mode & HCI_LM_ENCRYPT))
434 return 0;
436 return 1;
438 EXPORT_SYMBOL(hci_conn_check_link_mode);
440 /* Authenticate remote device */
441 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
443 BT_DBG("conn %p", conn);
445 if (sec_level > conn->sec_level)
446 conn->sec_level = sec_level;
447 else if (conn->link_mode & HCI_LM_AUTH)
448 return 1;
450 conn->auth_type = auth_type;
452 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
453 struct hci_cp_auth_requested cp;
454 cp.handle = cpu_to_le16(conn->handle);
455 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
456 sizeof(cp), &cp);
459 return 0;
462 /* Enable security */
463 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
465 BT_DBG("conn %p", conn);
467 if (sec_level == BT_SECURITY_SDP)
468 return 1;
470 if (sec_level == BT_SECURITY_LOW &&
471 (!conn->ssp_mode || !conn->hdev->ssp_mode))
472 return 1;
474 if (conn->link_mode & HCI_LM_ENCRYPT)
475 return hci_conn_auth(conn, sec_level, auth_type);
477 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
478 return 0;
480 if (hci_conn_auth(conn, sec_level, auth_type)) {
481 struct hci_cp_set_conn_encrypt cp;
482 cp.handle = cpu_to_le16(conn->handle);
483 cp.encrypt = 1;
484 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
485 sizeof(cp), &cp);
488 return 0;
490 EXPORT_SYMBOL(hci_conn_security);
492 /* Change link key */
493 int hci_conn_change_link_key(struct hci_conn *conn)
495 BT_DBG("conn %p", conn);
497 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
498 struct hci_cp_change_conn_link_key cp;
499 cp.handle = cpu_to_le16(conn->handle);
500 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
501 sizeof(cp), &cp);
504 return 0;
506 EXPORT_SYMBOL(hci_conn_change_link_key);
508 /* Switch role */
509 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
511 BT_DBG("conn %p", conn);
513 if (!role && conn->link_mode & HCI_LM_MASTER)
514 return 1;
516 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
517 struct hci_cp_switch_role cp;
518 bacpy(&cp.bdaddr, &conn->dst);
519 cp.role = role;
520 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
523 return 0;
525 EXPORT_SYMBOL(hci_conn_switch_role);
527 /* Enter active mode */
528 void hci_conn_enter_active_mode(struct hci_conn *conn)
530 struct hci_dev *hdev = conn->hdev;
532 BT_DBG("conn %p mode %d", conn, conn->mode);
534 if (test_bit(HCI_RAW, &hdev->flags))
535 return;
537 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
538 goto timer;
540 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
541 struct hci_cp_exit_sniff_mode cp;
542 cp.handle = cpu_to_le16(conn->handle);
543 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
546 timer:
547 if (hdev->idle_timeout > 0)
548 mod_timer(&conn->idle_timer,
549 jiffies + msecs_to_jiffies(hdev->idle_timeout));
552 /* Enter sniff mode */
553 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
555 struct hci_dev *hdev = conn->hdev;
557 BT_DBG("conn %p mode %d", conn, conn->mode);
559 if (test_bit(HCI_RAW, &hdev->flags))
560 return;
562 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
563 return;
565 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
566 return;
568 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
569 struct hci_cp_sniff_subrate cp;
570 cp.handle = cpu_to_le16(conn->handle);
571 cp.max_latency = cpu_to_le16(0);
572 cp.min_remote_timeout = cpu_to_le16(0);
573 cp.min_local_timeout = cpu_to_le16(0);
574 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
577 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
578 struct hci_cp_sniff_mode cp;
579 cp.handle = cpu_to_le16(conn->handle);
580 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
581 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
582 cp.attempt = cpu_to_le16(4);
583 cp.timeout = cpu_to_le16(1);
584 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
588 /* Drop all connection on the device */
589 void hci_conn_hash_flush(struct hci_dev *hdev)
591 struct hci_conn_hash *h = &hdev->conn_hash;
592 struct list_head *p;
594 BT_DBG("hdev %s", hdev->name);
596 p = h->list.next;
597 while (p != &h->list) {
598 struct hci_conn *c;
600 c = list_entry(p, struct hci_conn, list);
601 p = p->next;
603 c->state = BT_CLOSED;
605 hci_proto_disconn_cfm(c, 0x16);
606 hci_conn_del(c);
610 /* Check pending connect attempts */
611 void hci_conn_check_pending(struct hci_dev *hdev)
613 struct hci_conn *conn;
615 BT_DBG("hdev %s", hdev->name);
617 hci_dev_lock(hdev);
619 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
620 if (conn)
621 hci_acl_connect(conn);
623 hci_dev_unlock(hdev);
626 void hci_conn_hold_device(struct hci_conn *conn)
628 atomic_inc(&conn->devref);
630 EXPORT_SYMBOL(hci_conn_hold_device);
632 void hci_conn_put_device(struct hci_conn *conn)
634 if (atomic_dec_and_test(&conn->devref))
635 hci_conn_del_sysfs(conn);
637 EXPORT_SYMBOL(hci_conn_put_device);
639 int hci_get_conn_list(void __user *arg)
641 struct hci_conn_list_req req, *cl;
642 struct hci_conn_info *ci;
643 struct hci_dev *hdev;
644 struct list_head *p;
645 int n = 0, size, err;
647 if (copy_from_user(&req, arg, sizeof(req)))
648 return -EFAULT;
650 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
651 return -EINVAL;
653 size = sizeof(req) + req.conn_num * sizeof(*ci);
655 cl = kmalloc(size, GFP_KERNEL);
656 if (!cl)
657 return -ENOMEM;
659 hdev = hci_dev_get(req.dev_id);
660 if (!hdev) {
661 kfree(cl);
662 return -ENODEV;
665 ci = cl->conn_info;
667 hci_dev_lock_bh(hdev);
668 list_for_each(p, &hdev->conn_hash.list) {
669 register struct hci_conn *c;
670 c = list_entry(p, struct hci_conn, list);
672 bacpy(&(ci + n)->bdaddr, &c->dst);
673 (ci + n)->handle = c->handle;
674 (ci + n)->type = c->type;
675 (ci + n)->out = c->out;
676 (ci + n)->state = c->state;
677 (ci + n)->link_mode = c->link_mode;
678 if (++n >= req.conn_num)
679 break;
681 hci_dev_unlock_bh(hdev);
683 cl->dev_id = hdev->id;
684 cl->conn_num = n;
685 size = sizeof(req) + n * sizeof(*ci);
687 hci_dev_put(hdev);
689 err = copy_to_user(arg, cl, size);
690 kfree(cl);
692 return err ? -EFAULT : 0;
695 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
697 struct hci_conn_info_req req;
698 struct hci_conn_info ci;
699 struct hci_conn *conn;
700 char __user *ptr = arg + sizeof(req);
702 if (copy_from_user(&req, arg, sizeof(req)))
703 return -EFAULT;
705 hci_dev_lock_bh(hdev);
706 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
707 if (conn) {
708 bacpy(&ci.bdaddr, &conn->dst);
709 ci.handle = conn->handle;
710 ci.type = conn->type;
711 ci.out = conn->out;
712 ci.state = conn->state;
713 ci.link_mode = conn->link_mode;
715 hci_dev_unlock_bh(hdev);
717 if (!conn)
718 return -ENOENT;
720 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
723 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
725 struct hci_auth_info_req req;
726 struct hci_conn *conn;
728 if (copy_from_user(&req, arg, sizeof(req)))
729 return -EFAULT;
731 hci_dev_lock_bh(hdev);
732 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
733 if (conn)
734 req.type = conn->auth_type;
735 hci_dev_unlock_bh(hdev);
737 if (!conn)
738 return -ENOENT;
740 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;