drm: populate irq_by_busid-member for pci
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / hci_conn.c
blob3163330cd4f1388e3c0b5d593b5e54ca75ba0c12
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
48 static void hci_le_connect(struct hci_conn *conn)
50 struct hci_dev *hdev = conn->hdev;
51 struct hci_cp_le_create_conn cp;
53 conn->state = BT_CONNECT;
54 conn->out = 1;
55 conn->link_mode |= HCI_LM_MASTER;
57 memset(&cp, 0, sizeof(cp));
58 cp.scan_interval = cpu_to_le16(0x0004);
59 cp.scan_window = cpu_to_le16(0x0004);
60 bacpy(&cp.peer_addr, &conn->dst);
61 cp.conn_interval_min = cpu_to_le16(0x0008);
62 cp.conn_interval_max = cpu_to_le16(0x0100);
63 cp.supervision_timeout = cpu_to_le16(0x0064);
64 cp.min_ce_len = cpu_to_le16(0x0001);
65 cp.max_ce_len = cpu_to_le16(0x0001);
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
70 static void hci_le_connect_cancel(struct hci_conn *conn)
72 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
75 void hci_acl_connect(struct hci_conn *conn)
77 struct hci_dev *hdev = conn->hdev;
78 struct inquiry_entry *ie;
79 struct hci_cp_create_conn cp;
81 BT_DBG("%p", conn);
83 conn->state = BT_CONNECT;
84 conn->out = 1;
86 conn->link_mode = HCI_LM_MASTER;
88 conn->attempt++;
90 conn->link_policy = hdev->link_policy;
92 memset(&cp, 0, sizeof(cp));
93 bacpy(&cp.bdaddr, &conn->dst);
94 cp.pscan_rep_mode = 0x02;
96 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
97 if (ie) {
98 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
99 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
100 cp.pscan_mode = ie->data.pscan_mode;
101 cp.clock_offset = ie->data.clock_offset |
102 cpu_to_le16(0x8000);
105 memcpy(conn->dev_class, ie->data.dev_class, 3);
106 conn->ssp_mode = ie->data.ssp_mode;
109 cp.pkt_type = cpu_to_le16(conn->pkt_type);
110 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
111 cp.role_switch = 0x01;
112 else
113 cp.role_switch = 0x00;
115 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
118 static void hci_acl_connect_cancel(struct hci_conn *conn)
120 struct hci_cp_create_conn_cancel cp;
122 BT_DBG("%p", conn);
124 if (conn->hdev->hci_ver < 2)
125 return;
127 bacpy(&cp.bdaddr, &conn->dst);
128 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
131 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
133 struct hci_cp_disconnect cp;
135 BT_DBG("%p", conn);
137 conn->state = BT_DISCONN;
139 cp.handle = cpu_to_le16(conn->handle);
140 cp.reason = reason;
141 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
144 void hci_add_sco(struct hci_conn *conn, __u16 handle)
146 struct hci_dev *hdev = conn->hdev;
147 struct hci_cp_add_sco cp;
149 BT_DBG("%p", conn);
151 conn->state = BT_CONNECT;
152 conn->out = 1;
154 conn->attempt++;
156 cp.handle = cpu_to_le16(handle);
157 cp.pkt_type = cpu_to_le16(conn->pkt_type);
159 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
162 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
164 struct hci_dev *hdev = conn->hdev;
165 struct hci_cp_setup_sync_conn cp;
167 BT_DBG("%p", conn);
169 conn->state = BT_CONNECT;
170 conn->out = 1;
172 conn->attempt++;
174 cp.handle = cpu_to_le16(handle);
175 cp.pkt_type = cpu_to_le16(conn->pkt_type);
177 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
178 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
179 cp.max_latency = cpu_to_le16(0xffff);
180 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
181 cp.retrans_effort = 0xff;
183 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
186 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
187 u16 latency, u16 to_multiplier)
189 struct hci_cp_le_conn_update cp;
190 struct hci_dev *hdev = conn->hdev;
192 memset(&cp, 0, sizeof(cp));
194 cp.handle = cpu_to_le16(conn->handle);
195 cp.conn_interval_min = cpu_to_le16(min);
196 cp.conn_interval_max = cpu_to_le16(max);
197 cp.conn_latency = cpu_to_le16(latency);
198 cp.supervision_timeout = cpu_to_le16(to_multiplier);
199 cp.min_ce_len = cpu_to_le16(0x0001);
200 cp.max_ce_len = cpu_to_le16(0x0001);
202 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
204 EXPORT_SYMBOL(hci_le_conn_update);
206 /* Device _must_ be locked */
207 void hci_sco_setup(struct hci_conn *conn, __u8 status)
209 struct hci_conn *sco = conn->link;
211 BT_DBG("%p", conn);
213 if (!sco)
214 return;
216 if (!status) {
217 if (lmp_esco_capable(conn->hdev))
218 hci_setup_sync(sco, conn->handle);
219 else
220 hci_add_sco(sco, conn->handle);
221 } else {
222 hci_proto_connect_cfm(sco, status);
223 hci_conn_del(sco);
227 static void hci_conn_timeout(unsigned long arg)
229 struct hci_conn *conn = (void *) arg;
230 struct hci_dev *hdev = conn->hdev;
231 __u8 reason;
233 BT_DBG("conn %p state %d", conn, conn->state);
235 if (atomic_read(&conn->refcnt))
236 return;
238 hci_dev_lock(hdev);
240 switch (conn->state) {
241 case BT_CONNECT:
242 case BT_CONNECT2:
243 if (conn->out) {
244 if (conn->type == ACL_LINK)
245 hci_acl_connect_cancel(conn);
246 else if (conn->type == LE_LINK)
247 hci_le_connect_cancel(conn);
249 break;
250 case BT_CONFIG:
251 case BT_CONNECTED:
252 reason = hci_proto_disconn_ind(conn);
253 hci_acl_disconn(conn, reason);
254 break;
255 default:
256 conn->state = BT_CLOSED;
257 break;
260 hci_dev_unlock(hdev);
263 static void hci_conn_idle(unsigned long arg)
265 struct hci_conn *conn = (void *) arg;
267 BT_DBG("conn %p mode %d", conn, conn->mode);
269 hci_conn_enter_sniff_mode(conn);
272 static void hci_conn_auto_accept(unsigned long arg)
274 struct hci_conn *conn = (void *) arg;
275 struct hci_dev *hdev = conn->hdev;
277 hci_dev_lock(hdev);
279 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
280 &conn->dst);
282 hci_dev_unlock(hdev);
285 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
287 struct hci_conn *conn;
289 BT_DBG("%s dst %s", hdev->name, batostr(dst));
291 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
292 if (!conn)
293 return NULL;
295 bacpy(&conn->dst, dst);
296 conn->hdev = hdev;
297 conn->type = type;
298 conn->mode = HCI_CM_ACTIVE;
299 conn->state = BT_OPEN;
300 conn->auth_type = HCI_AT_GENERAL_BONDING;
301 conn->io_capability = hdev->io_capability;
302 conn->remote_auth = 0xff;
303 conn->key_type = 0xff;
305 conn->power_save = 1;
306 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
308 switch (type) {
309 case ACL_LINK:
310 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
311 break;
312 case SCO_LINK:
313 if (lmp_esco_capable(hdev))
314 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
315 (hdev->esco_type & EDR_ESCO_MASK);
316 else
317 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
318 break;
319 case ESCO_LINK:
320 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
321 break;
324 skb_queue_head_init(&conn->data_q);
326 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
327 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
328 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
329 (unsigned long) conn);
331 atomic_set(&conn->refcnt, 0);
333 hci_dev_hold(hdev);
335 tasklet_disable(&hdev->tx_task);
337 hci_conn_hash_add(hdev, conn);
338 if (hdev->notify)
339 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
341 atomic_set(&conn->devref, 0);
343 hci_conn_init_sysfs(conn);
345 tasklet_enable(&hdev->tx_task);
347 return conn;
350 int hci_conn_del(struct hci_conn *conn)
352 struct hci_dev *hdev = conn->hdev;
354 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
356 del_timer(&conn->idle_timer);
358 del_timer(&conn->disc_timer);
360 del_timer(&conn->auto_accept_timer);
362 if (conn->type == ACL_LINK) {
363 struct hci_conn *sco = conn->link;
364 if (sco)
365 sco->link = NULL;
367 /* Unacked frames */
368 hdev->acl_cnt += conn->sent;
369 } else if (conn->type == LE_LINK) {
370 if (hdev->le_pkts)
371 hdev->le_cnt += conn->sent;
372 else
373 hdev->acl_cnt += conn->sent;
374 } else {
375 struct hci_conn *acl = conn->link;
376 if (acl) {
377 acl->link = NULL;
378 hci_conn_put(acl);
382 tasklet_disable(&hdev->tx_task);
384 hci_conn_hash_del(hdev, conn);
385 if (hdev->notify)
386 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
388 tasklet_enable(&hdev->tx_task);
390 skb_queue_purge(&conn->data_q);
392 hci_conn_put_device(conn);
394 hci_dev_put(hdev);
396 return 0;
399 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
401 int use_src = bacmp(src, BDADDR_ANY);
402 struct hci_dev *hdev = NULL;
403 struct list_head *p;
405 BT_DBG("%s -> %s", batostr(src), batostr(dst));
407 read_lock_bh(&hci_dev_list_lock);
409 list_for_each(p, &hci_dev_list) {
410 struct hci_dev *d = list_entry(p, struct hci_dev, list);
412 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
413 continue;
415 /* Simple routing:
416 * No source address - find interface with bdaddr != dst
417 * Source address - find interface with bdaddr == src
420 if (use_src) {
421 if (!bacmp(&d->bdaddr, src)) {
422 hdev = d; break;
424 } else {
425 if (bacmp(&d->bdaddr, dst)) {
426 hdev = d; break;
431 if (hdev)
432 hdev = hci_dev_hold(hdev);
434 read_unlock_bh(&hci_dev_list_lock);
435 return hdev;
437 EXPORT_SYMBOL(hci_get_route);
439 /* Create SCO, ACL or LE connection.
440 * Device _must_ be locked */
441 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
443 struct hci_conn *acl;
444 struct hci_conn *sco;
445 struct hci_conn *le;
447 BT_DBG("%s dst %s", hdev->name, batostr(dst));
449 if (type == LE_LINK) {
450 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
451 if (le)
452 return ERR_PTR(-EBUSY);
453 le = hci_conn_add(hdev, LE_LINK, dst);
454 if (!le)
455 return ERR_PTR(-ENOMEM);
456 if (le->state == BT_OPEN)
457 hci_le_connect(le);
459 hci_conn_hold(le);
461 return le;
464 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
465 if (!acl) {
466 acl = hci_conn_add(hdev, ACL_LINK, dst);
467 if (!acl)
468 return NULL;
471 hci_conn_hold(acl);
473 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
474 acl->sec_level = BT_SECURITY_LOW;
475 acl->pending_sec_level = sec_level;
476 acl->auth_type = auth_type;
477 hci_acl_connect(acl);
480 if (type == ACL_LINK)
481 return acl;
483 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
484 if (!sco) {
485 sco = hci_conn_add(hdev, type, dst);
486 if (!sco) {
487 hci_conn_put(acl);
488 return NULL;
492 acl->link = sco;
493 sco->link = acl;
495 hci_conn_hold(sco);
497 if (acl->state == BT_CONNECTED &&
498 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
499 acl->power_save = 1;
500 hci_conn_enter_active_mode(acl);
502 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
503 /* defer SCO setup until mode change completed */
504 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
505 return sco;
508 hci_sco_setup(acl, 0x00);
511 return sco;
513 EXPORT_SYMBOL(hci_connect);
515 /* Check link security requirement */
516 int hci_conn_check_link_mode(struct hci_conn *conn)
518 BT_DBG("conn %p", conn);
520 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
521 !(conn->link_mode & HCI_LM_ENCRYPT))
522 return 0;
524 return 1;
526 EXPORT_SYMBOL(hci_conn_check_link_mode);
528 /* Authenticate remote device */
529 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
531 BT_DBG("conn %p", conn);
533 if (conn->pending_sec_level > sec_level)
534 sec_level = conn->pending_sec_level;
536 if (sec_level > conn->sec_level)
537 conn->pending_sec_level = sec_level;
538 else if (conn->link_mode & HCI_LM_AUTH)
539 return 1;
541 /* Make sure we preserve an existing MITM requirement*/
542 auth_type |= (conn->auth_type & 0x01);
544 conn->auth_type = auth_type;
546 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
547 struct hci_cp_auth_requested cp;
548 cp.handle = cpu_to_le16(conn->handle);
549 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
550 sizeof(cp), &cp);
553 return 0;
556 /* Encrypt the the link */
557 static void hci_conn_encrypt(struct hci_conn *conn)
559 BT_DBG("conn %p", conn);
561 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
562 struct hci_cp_set_conn_encrypt cp;
563 cp.handle = cpu_to_le16(conn->handle);
564 cp.encrypt = 0x01;
565 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
566 &cp);
570 /* Enable security */
571 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
573 BT_DBG("conn %p", conn);
575 /* For sdp we don't need the link key. */
576 if (sec_level == BT_SECURITY_SDP)
577 return 1;
579 /* For non 2.1 devices and low security level we don't need the link
580 key. */
581 if (sec_level == BT_SECURITY_LOW &&
582 (!conn->ssp_mode || !conn->hdev->ssp_mode))
583 return 1;
585 /* For other security levels we need the link key. */
586 if (!(conn->link_mode & HCI_LM_AUTH))
587 goto auth;
589 /* An authenticated combination key has sufficient security for any
590 security level. */
591 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
592 goto encrypt;
594 /* An unauthenticated combination key has sufficient security for
595 security level 1 and 2. */
596 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
597 (sec_level == BT_SECURITY_MEDIUM ||
598 sec_level == BT_SECURITY_LOW))
599 goto encrypt;
601 /* A combination key has always sufficient security for the security
602 levels 1 or 2. High security level requires the combination key
603 is generated using maximum PIN code length (16).
604 For pre 2.1 units. */
605 if (conn->key_type == HCI_LK_COMBINATION &&
606 (sec_level != BT_SECURITY_HIGH ||
607 conn->pin_length == 16))
608 goto encrypt;
610 auth:
611 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
612 return 0;
614 hci_conn_auth(conn, sec_level, auth_type);
615 return 0;
617 encrypt:
618 if (conn->link_mode & HCI_LM_ENCRYPT)
619 return 1;
621 hci_conn_encrypt(conn);
622 return 0;
624 EXPORT_SYMBOL(hci_conn_security);
626 /* Check secure link requirement */
627 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
629 BT_DBG("conn %p", conn);
631 if (sec_level != BT_SECURITY_HIGH)
632 return 1; /* Accept if non-secure is required */
634 if (conn->key_type == HCI_LK_AUTH_COMBINATION ||
635 (conn->key_type == HCI_LK_COMBINATION &&
636 conn->pin_length == 16))
637 return 1;
639 return 0; /* Reject not secure link */
641 EXPORT_SYMBOL(hci_conn_check_secure);
643 /* Change link key */
644 int hci_conn_change_link_key(struct hci_conn *conn)
646 BT_DBG("conn %p", conn);
648 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
649 struct hci_cp_change_conn_link_key cp;
650 cp.handle = cpu_to_le16(conn->handle);
651 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
652 sizeof(cp), &cp);
655 return 0;
657 EXPORT_SYMBOL(hci_conn_change_link_key);
659 /* Switch role */
660 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
662 BT_DBG("conn %p", conn);
664 if (!role && conn->link_mode & HCI_LM_MASTER)
665 return 1;
667 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
668 struct hci_cp_switch_role cp;
669 bacpy(&cp.bdaddr, &conn->dst);
670 cp.role = role;
671 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
674 return 0;
676 EXPORT_SYMBOL(hci_conn_switch_role);
678 /* Enter active mode */
679 void hci_conn_enter_active_mode(struct hci_conn *conn)
681 struct hci_dev *hdev = conn->hdev;
683 BT_DBG("conn %p mode %d", conn, conn->mode);
685 if (test_bit(HCI_RAW, &hdev->flags))
686 return;
688 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
689 goto timer;
691 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
692 struct hci_cp_exit_sniff_mode cp;
693 cp.handle = cpu_to_le16(conn->handle);
694 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
697 timer:
698 if (hdev->idle_timeout > 0)
699 mod_timer(&conn->idle_timer,
700 jiffies + msecs_to_jiffies(hdev->idle_timeout));
703 /* Enter sniff mode */
704 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
706 struct hci_dev *hdev = conn->hdev;
708 BT_DBG("conn %p mode %d", conn, conn->mode);
710 if (test_bit(HCI_RAW, &hdev->flags))
711 return;
713 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
714 return;
716 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
717 return;
719 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
720 struct hci_cp_sniff_subrate cp;
721 cp.handle = cpu_to_le16(conn->handle);
722 cp.max_latency = cpu_to_le16(0);
723 cp.min_remote_timeout = cpu_to_le16(0);
724 cp.min_local_timeout = cpu_to_le16(0);
725 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
728 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
729 struct hci_cp_sniff_mode cp;
730 cp.handle = cpu_to_le16(conn->handle);
731 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
732 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
733 cp.attempt = cpu_to_le16(4);
734 cp.timeout = cpu_to_le16(1);
735 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
739 /* Drop all connection on the device */
740 void hci_conn_hash_flush(struct hci_dev *hdev)
742 struct hci_conn_hash *h = &hdev->conn_hash;
743 struct list_head *p;
745 BT_DBG("hdev %s", hdev->name);
747 p = h->list.next;
748 while (p != &h->list) {
749 struct hci_conn *c;
751 c = list_entry(p, struct hci_conn, list);
752 p = p->next;
754 c->state = BT_CLOSED;
756 hci_proto_disconn_cfm(c, 0x16);
757 hci_conn_del(c);
761 /* Check pending connect attempts */
762 void hci_conn_check_pending(struct hci_dev *hdev)
764 struct hci_conn *conn;
766 BT_DBG("hdev %s", hdev->name);
768 hci_dev_lock(hdev);
770 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
771 if (conn)
772 hci_acl_connect(conn);
774 hci_dev_unlock(hdev);
777 void hci_conn_hold_device(struct hci_conn *conn)
779 atomic_inc(&conn->devref);
781 EXPORT_SYMBOL(hci_conn_hold_device);
783 void hci_conn_put_device(struct hci_conn *conn)
785 if (atomic_dec_and_test(&conn->devref))
786 hci_conn_del_sysfs(conn);
788 EXPORT_SYMBOL(hci_conn_put_device);
790 int hci_get_conn_list(void __user *arg)
792 struct hci_conn_list_req req, *cl;
793 struct hci_conn_info *ci;
794 struct hci_dev *hdev;
795 struct list_head *p;
796 int n = 0, size, err;
798 if (copy_from_user(&req, arg, sizeof(req)))
799 return -EFAULT;
801 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
802 return -EINVAL;
804 size = sizeof(req) + req.conn_num * sizeof(*ci);
806 cl = kmalloc(size, GFP_KERNEL);
807 if (!cl)
808 return -ENOMEM;
810 hdev = hci_dev_get(req.dev_id);
811 if (!hdev) {
812 kfree(cl);
813 return -ENODEV;
816 ci = cl->conn_info;
818 hci_dev_lock_bh(hdev);
819 list_for_each(p, &hdev->conn_hash.list) {
820 register struct hci_conn *c;
821 c = list_entry(p, struct hci_conn, list);
823 bacpy(&(ci + n)->bdaddr, &c->dst);
824 (ci + n)->handle = c->handle;
825 (ci + n)->type = c->type;
826 (ci + n)->out = c->out;
827 (ci + n)->state = c->state;
828 (ci + n)->link_mode = c->link_mode;
829 if (++n >= req.conn_num)
830 break;
832 hci_dev_unlock_bh(hdev);
834 cl->dev_id = hdev->id;
835 cl->conn_num = n;
836 size = sizeof(req) + n * sizeof(*ci);
838 hci_dev_put(hdev);
840 err = copy_to_user(arg, cl, size);
841 kfree(cl);
843 return err ? -EFAULT : 0;
846 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
848 struct hci_conn_info_req req;
849 struct hci_conn_info ci;
850 struct hci_conn *conn;
851 char __user *ptr = arg + sizeof(req);
853 if (copy_from_user(&req, arg, sizeof(req)))
854 return -EFAULT;
856 hci_dev_lock_bh(hdev);
857 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
858 if (conn) {
859 bacpy(&ci.bdaddr, &conn->dst);
860 ci.handle = conn->handle;
861 ci.type = conn->type;
862 ci.out = conn->out;
863 ci.state = conn->state;
864 ci.link_mode = conn->link_mode;
866 hci_dev_unlock_bh(hdev);
868 if (!conn)
869 return -ENOENT;
871 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
874 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
876 struct hci_auth_info_req req;
877 struct hci_conn *conn;
879 if (copy_from_user(&req, arg, sizeof(req)))
880 return -EFAULT;
882 hci_dev_lock_bh(hdev);
883 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
884 if (conn)
885 req.type = conn->auth_type;
886 hci_dev_unlock_bh(hdev);
888 if (!conn)
889 return -ENOENT;
891 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;