GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / net / bluetooth / hci_conn.c
blob0b1e460fe440cfb07a86c0a15e87b7e98c03ba0f
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/unaligned.h>
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
48 void hci_acl_connect(struct hci_conn *conn)
50 struct hci_dev *hdev = conn->hdev;
51 struct inquiry_entry *ie;
52 struct hci_cp_create_conn cp;
54 BT_DBG("%p", conn);
56 conn->state = BT_CONNECT;
57 conn->out = 1;
59 conn->link_mode = HCI_LM_MASTER;
61 conn->attempt++;
63 conn->link_policy = hdev->link_policy;
65 memset(&cp, 0, sizeof(cp));
66 bacpy(&cp.bdaddr, &conn->dst);
67 cp.pscan_rep_mode = 0x02;
69 if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) {
70 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
71 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
72 cp.pscan_mode = ie->data.pscan_mode;
73 cp.clock_offset = ie->data.clock_offset |
74 cpu_to_le16(0x8000);
77 memcpy(conn->dev_class, ie->data.dev_class, 3);
78 conn->ssp_mode = ie->data.ssp_mode;
81 cp.pkt_type = cpu_to_le16(conn->pkt_type);
82 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
83 cp.role_switch = 0x01;
84 else
85 cp.role_switch = 0x00;
87 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
90 static void hci_acl_connect_cancel(struct hci_conn *conn)
92 struct hci_cp_create_conn_cancel cp;
94 BT_DBG("%p", conn);
96 if (conn->hdev->hci_ver < 2)
97 return;
99 bacpy(&cp.bdaddr, &conn->dst);
100 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
103 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
105 struct hci_cp_disconnect cp;
107 BT_DBG("%p", conn);
109 conn->state = BT_DISCONN;
111 cp.handle = cpu_to_le16(conn->handle);
112 cp.reason = reason;
113 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
116 void hci_add_sco(struct hci_conn *conn, __u16 handle)
118 struct hci_dev *hdev = conn->hdev;
119 struct hci_cp_add_sco cp;
121 BT_DBG("%p", conn);
123 conn->state = BT_CONNECT;
124 conn->out = 1;
126 conn->attempt++;
128 cp.handle = cpu_to_le16(handle);
129 cp.pkt_type = cpu_to_le16(conn->pkt_type);
131 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
134 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
136 struct hci_dev *hdev = conn->hdev;
137 struct hci_cp_setup_sync_conn cp;
139 BT_DBG("%p", conn);
141 conn->state = BT_CONNECT;
142 conn->out = 1;
144 conn->attempt++;
146 cp.handle = cpu_to_le16(handle);
147 cp.pkt_type = cpu_to_le16(conn->pkt_type);
149 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
150 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
151 cp.max_latency = cpu_to_le16(0xffff);
152 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
153 cp.retrans_effort = 0xff;
155 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
158 /* Device _must_ be locked */
159 void hci_sco_setup(struct hci_conn *conn, __u8 status)
161 struct hci_conn *sco = conn->link;
163 BT_DBG("%p", conn);
165 if (!sco)
166 return;
168 if (!status) {
169 if (lmp_esco_capable(conn->hdev))
170 hci_setup_sync(sco, conn->handle);
171 else
172 hci_add_sco(sco, conn->handle);
173 } else {
174 hci_proto_connect_cfm(sco, status);
175 hci_conn_del(sco);
179 static void hci_conn_timeout(unsigned long arg)
181 struct hci_conn *conn = (void *) arg;
182 struct hci_dev *hdev = conn->hdev;
183 __u8 reason;
185 BT_DBG("conn %p state %d", conn, conn->state);
187 if (atomic_read(&conn->refcnt))
188 return;
190 hci_dev_lock(hdev);
192 switch (conn->state) {
193 case BT_CONNECT:
194 case BT_CONNECT2:
195 if (conn->type == ACL_LINK && conn->out)
196 hci_acl_connect_cancel(conn);
197 break;
198 case BT_CONFIG:
199 case BT_CONNECTED:
200 reason = hci_proto_disconn_ind(conn);
201 hci_acl_disconn(conn, reason);
202 break;
203 default:
204 conn->state = BT_CLOSED;
205 break;
208 hci_dev_unlock(hdev);
211 static void hci_conn_idle(unsigned long arg)
213 struct hci_conn *conn = (void *) arg;
215 BT_DBG("conn %p mode %d", conn, conn->mode);
217 hci_conn_enter_sniff_mode(conn);
220 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
222 struct hci_conn *conn;
224 BT_DBG("%s dst %s", hdev->name, batostr(dst));
226 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
227 if (!conn)
228 return NULL;
230 bacpy(&conn->dst, dst);
231 conn->hdev = hdev;
232 conn->type = type;
233 conn->mode = HCI_CM_ACTIVE;
234 conn->state = BT_OPEN;
235 conn->auth_type = HCI_AT_GENERAL_BONDING;
237 conn->power_save = 1;
238 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
240 switch (type) {
241 case ACL_LINK:
242 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
243 break;
244 case SCO_LINK:
245 if (lmp_esco_capable(hdev))
246 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
247 (hdev->esco_type & EDR_ESCO_MASK);
248 else
249 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
250 break;
251 case ESCO_LINK:
252 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
253 break;
256 skb_queue_head_init(&conn->data_q);
258 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
259 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
261 atomic_set(&conn->refcnt, 0);
263 hci_dev_hold(hdev);
265 tasklet_disable(&hdev->tx_task);
267 hci_conn_hash_add(hdev, conn);
268 if (hdev->notify)
269 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
271 atomic_set(&conn->devref, 0);
273 hci_conn_init_sysfs(conn);
275 tasklet_enable(&hdev->tx_task);
277 return conn;
280 int hci_conn_del(struct hci_conn *conn)
282 struct hci_dev *hdev = conn->hdev;
284 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
286 del_timer(&conn->idle_timer);
288 del_timer(&conn->disc_timer);
290 if (conn->type == ACL_LINK) {
291 struct hci_conn *sco = conn->link;
292 if (sco)
293 sco->link = NULL;
295 /* Unacked frames */
296 hdev->acl_cnt += conn->sent;
297 } else {
298 struct hci_conn *acl = conn->link;
299 if (acl) {
300 acl->link = NULL;
301 hci_conn_put(acl);
305 tasklet_disable(&hdev->tx_task);
307 hci_conn_hash_del(hdev, conn);
308 if (hdev->notify)
309 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
311 tasklet_enable(&hdev->tx_task);
313 skb_queue_purge(&conn->data_q);
315 hci_conn_put_device(conn);
317 hci_dev_put(hdev);
319 return 0;
322 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
324 int use_src = bacmp(src, BDADDR_ANY);
325 struct hci_dev *hdev = NULL;
326 struct list_head *p;
328 BT_DBG("%s -> %s", batostr(src), batostr(dst));
330 read_lock_bh(&hci_dev_list_lock);
332 list_for_each(p, &hci_dev_list) {
333 struct hci_dev *d = list_entry(p, struct hci_dev, list);
335 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
336 continue;
338 /* Simple routing:
339 * No source address - find interface with bdaddr != dst
340 * Source address - find interface with bdaddr == src
343 if (use_src) {
344 if (!bacmp(&d->bdaddr, src)) {
345 hdev = d; break;
347 } else {
348 if (bacmp(&d->bdaddr, dst)) {
349 hdev = d; break;
354 if (hdev)
355 hdev = hci_dev_hold(hdev);
357 read_unlock_bh(&hci_dev_list_lock);
358 return hdev;
360 EXPORT_SYMBOL(hci_get_route);
362 /* Create SCO or ACL connection.
363 * Device _must_ be locked */
364 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
366 struct hci_conn *acl;
367 struct hci_conn *sco;
369 BT_DBG("%s dst %s", hdev->name, batostr(dst));
371 if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) {
372 if (!(acl = hci_conn_add(hdev, ACL_LINK, dst)))
373 return NULL;
376 hci_conn_hold(acl);
378 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
379 acl->sec_level = sec_level;
380 acl->auth_type = auth_type;
381 hci_acl_connect(acl);
382 } else {
383 if (acl->sec_level < sec_level)
384 acl->sec_level = sec_level;
385 if (acl->auth_type < auth_type)
386 acl->auth_type = auth_type;
389 if (type == ACL_LINK)
390 return acl;
392 if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) {
393 if (!(sco = hci_conn_add(hdev, type, dst))) {
394 hci_conn_put(acl);
395 return NULL;
399 acl->link = sco;
400 sco->link = acl;
402 hci_conn_hold(sco);
404 if (acl->state == BT_CONNECTED &&
405 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
406 acl->power_save = 1;
407 hci_conn_enter_active_mode(acl);
409 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
410 /* defer SCO setup until mode change completed */
411 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
412 return sco;
415 hci_sco_setup(acl, 0x00);
418 return sco;
420 EXPORT_SYMBOL(hci_connect);
422 /* Check link security requirement */
423 int hci_conn_check_link_mode(struct hci_conn *conn)
425 BT_DBG("conn %p", conn);
427 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
428 !(conn->link_mode & HCI_LM_ENCRYPT))
429 return 0;
431 return 1;
433 EXPORT_SYMBOL(hci_conn_check_link_mode);
435 /* Authenticate remote device */
436 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
438 BT_DBG("conn %p", conn);
440 if (sec_level > conn->sec_level)
441 conn->sec_level = sec_level;
442 else if (conn->link_mode & HCI_LM_AUTH)
443 return 1;
445 conn->auth_type = auth_type;
447 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
448 struct hci_cp_auth_requested cp;
449 cp.handle = cpu_to_le16(conn->handle);
450 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
451 sizeof(cp), &cp);
454 return 0;
457 /* Enable security */
458 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
460 BT_DBG("conn %p", conn);
462 if (sec_level == BT_SECURITY_SDP)
463 return 1;
465 if (sec_level == BT_SECURITY_LOW &&
466 (!conn->ssp_mode || !conn->hdev->ssp_mode))
467 return 1;
469 if (conn->link_mode & HCI_LM_ENCRYPT)
470 return hci_conn_auth(conn, sec_level, auth_type);
472 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
473 return 0;
475 if (hci_conn_auth(conn, sec_level, auth_type)) {
476 struct hci_cp_set_conn_encrypt cp;
477 cp.handle = cpu_to_le16(conn->handle);
478 cp.encrypt = 1;
479 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
480 sizeof(cp), &cp);
483 return 0;
485 EXPORT_SYMBOL(hci_conn_security);
487 /* Change link key */
488 int hci_conn_change_link_key(struct hci_conn *conn)
490 BT_DBG("conn %p", conn);
492 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
493 struct hci_cp_change_conn_link_key cp;
494 cp.handle = cpu_to_le16(conn->handle);
495 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
496 sizeof(cp), &cp);
499 return 0;
501 EXPORT_SYMBOL(hci_conn_change_link_key);
503 /* Switch role */
504 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
506 BT_DBG("conn %p", conn);
508 if (!role && conn->link_mode & HCI_LM_MASTER)
509 return 1;
511 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
512 struct hci_cp_switch_role cp;
513 bacpy(&cp.bdaddr, &conn->dst);
514 cp.role = role;
515 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
518 return 0;
520 EXPORT_SYMBOL(hci_conn_switch_role);
522 /* Enter active mode */
523 void hci_conn_enter_active_mode(struct hci_conn *conn)
525 struct hci_dev *hdev = conn->hdev;
527 BT_DBG("conn %p mode %d", conn, conn->mode);
529 if (test_bit(HCI_RAW, &hdev->flags))
530 return;
532 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
533 goto timer;
535 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
536 struct hci_cp_exit_sniff_mode cp;
537 cp.handle = cpu_to_le16(conn->handle);
538 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
541 timer:
542 if (hdev->idle_timeout > 0)
543 mod_timer(&conn->idle_timer,
544 jiffies + msecs_to_jiffies(hdev->idle_timeout));
547 /* Enter sniff mode */
548 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
550 struct hci_dev *hdev = conn->hdev;
552 BT_DBG("conn %p mode %d", conn, conn->mode);
554 if (test_bit(HCI_RAW, &hdev->flags))
555 return;
557 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
558 return;
560 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
561 return;
563 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
564 struct hci_cp_sniff_subrate cp;
565 cp.handle = cpu_to_le16(conn->handle);
566 cp.max_latency = cpu_to_le16(0);
567 cp.min_remote_timeout = cpu_to_le16(0);
568 cp.min_local_timeout = cpu_to_le16(0);
569 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
572 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
573 struct hci_cp_sniff_mode cp;
574 cp.handle = cpu_to_le16(conn->handle);
575 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
576 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
577 cp.attempt = cpu_to_le16(4);
578 cp.timeout = cpu_to_le16(1);
579 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
583 /* Drop all connection on the device */
584 void hci_conn_hash_flush(struct hci_dev *hdev)
586 struct hci_conn_hash *h = &hdev->conn_hash;
587 struct list_head *p;
589 BT_DBG("hdev %s", hdev->name);
591 p = h->list.next;
592 while (p != &h->list) {
593 struct hci_conn *c;
595 c = list_entry(p, struct hci_conn, list);
596 p = p->next;
598 c->state = BT_CLOSED;
600 hci_proto_disconn_cfm(c, 0x16);
601 hci_conn_del(c);
605 /* Check pending connect attempts */
606 void hci_conn_check_pending(struct hci_dev *hdev)
608 struct hci_conn *conn;
610 BT_DBG("hdev %s", hdev->name);
612 hci_dev_lock(hdev);
614 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
615 if (conn)
616 hci_acl_connect(conn);
618 hci_dev_unlock(hdev);
621 void hci_conn_hold_device(struct hci_conn *conn)
623 atomic_inc(&conn->devref);
625 EXPORT_SYMBOL(hci_conn_hold_device);
627 void hci_conn_put_device(struct hci_conn *conn)
629 if (atomic_dec_and_test(&conn->devref))
630 hci_conn_del_sysfs(conn);
632 EXPORT_SYMBOL(hci_conn_put_device);
634 int hci_get_conn_list(void __user *arg)
636 struct hci_conn_list_req req, *cl;
637 struct hci_conn_info *ci;
638 struct hci_dev *hdev;
639 struct list_head *p;
640 int n = 0, size, err;
642 if (copy_from_user(&req, arg, sizeof(req)))
643 return -EFAULT;
645 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
646 return -EINVAL;
648 size = sizeof(req) + req.conn_num * sizeof(*ci);
650 if (!(cl = kmalloc(size, GFP_KERNEL)))
651 return -ENOMEM;
653 if (!(hdev = hci_dev_get(req.dev_id))) {
654 kfree(cl);
655 return -ENODEV;
658 ci = cl->conn_info;
660 hci_dev_lock_bh(hdev);
661 list_for_each(p, &hdev->conn_hash.list) {
662 register struct hci_conn *c;
663 c = list_entry(p, struct hci_conn, list);
665 bacpy(&(ci + n)->bdaddr, &c->dst);
666 (ci + n)->handle = c->handle;
667 (ci + n)->type = c->type;
668 (ci + n)->out = c->out;
669 (ci + n)->state = c->state;
670 (ci + n)->link_mode = c->link_mode;
671 if (++n >= req.conn_num)
672 break;
674 hci_dev_unlock_bh(hdev);
676 cl->dev_id = hdev->id;
677 cl->conn_num = n;
678 size = sizeof(req) + n * sizeof(*ci);
680 hci_dev_put(hdev);
682 err = copy_to_user(arg, cl, size);
683 kfree(cl);
685 return err ? -EFAULT : 0;
688 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
690 struct hci_conn_info_req req;
691 struct hci_conn_info ci;
692 struct hci_conn *conn;
693 char __user *ptr = arg + sizeof(req);
695 if (copy_from_user(&req, arg, sizeof(req)))
696 return -EFAULT;
698 hci_dev_lock_bh(hdev);
699 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
700 if (conn) {
701 bacpy(&ci.bdaddr, &conn->dst);
702 ci.handle = conn->handle;
703 ci.type = conn->type;
704 ci.out = conn->out;
705 ci.state = conn->state;
706 ci.link_mode = conn->link_mode;
708 hci_dev_unlock_bh(hdev);
710 if (!conn)
711 return -ENOENT;
713 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
716 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
718 struct hci_auth_info_req req;
719 struct hci_conn *conn;
721 if (copy_from_user(&req, arg, sizeof(req)))
722 return -EFAULT;
724 hci_dev_lock_bh(hdev);
725 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
726 if (conn)
727 req.type = conn->auth_type;
728 hci_dev_unlock_bh(hdev);
730 if (!conn)
731 return -ENOENT;
733 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;