xfs: fix acl count validation in xfs_acl_from_disk()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / hci_conn.c
blobbcd158f40bb9e4d7a7fa5c167c1281779d60847d
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
48 static void hci_le_connect(struct hci_conn *conn)
50 struct hci_dev *hdev = conn->hdev;
51 struct hci_cp_le_create_conn cp;
53 conn->state = BT_CONNECT;
54 conn->out = 1;
55 conn->link_mode |= HCI_LM_MASTER;
57 memset(&cp, 0, sizeof(cp));
58 cp.scan_interval = cpu_to_le16(0x0004);
59 cp.scan_window = cpu_to_le16(0x0004);
60 bacpy(&cp.peer_addr, &conn->dst);
61 cp.conn_interval_min = cpu_to_le16(0x0008);
62 cp.conn_interval_max = cpu_to_le16(0x0100);
63 cp.supervision_timeout = cpu_to_le16(0x0064);
64 cp.min_ce_len = cpu_to_le16(0x0001);
65 cp.max_ce_len = cpu_to_le16(0x0001);
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
70 static void hci_le_connect_cancel(struct hci_conn *conn)
72 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
75 void hci_acl_connect(struct hci_conn *conn)
77 struct hci_dev *hdev = conn->hdev;
78 struct inquiry_entry *ie;
79 struct hci_cp_create_conn cp;
81 BT_DBG("%p", conn);
83 conn->state = BT_CONNECT;
84 conn->out = 1;
86 conn->link_mode = HCI_LM_MASTER;
88 conn->attempt++;
90 conn->link_policy = hdev->link_policy;
92 memset(&cp, 0, sizeof(cp));
93 bacpy(&cp.bdaddr, &conn->dst);
94 cp.pscan_rep_mode = 0x02;
96 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
97 if (ie) {
98 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
99 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
100 cp.pscan_mode = ie->data.pscan_mode;
101 cp.clock_offset = ie->data.clock_offset |
102 cpu_to_le16(0x8000);
105 memcpy(conn->dev_class, ie->data.dev_class, 3);
106 conn->ssp_mode = ie->data.ssp_mode;
109 cp.pkt_type = cpu_to_le16(conn->pkt_type);
110 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
111 cp.role_switch = 0x01;
112 else
113 cp.role_switch = 0x00;
115 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
118 static void hci_acl_connect_cancel(struct hci_conn *conn)
120 struct hci_cp_create_conn_cancel cp;
122 BT_DBG("%p", conn);
124 if (conn->hdev->hci_ver < 2)
125 return;
127 bacpy(&cp.bdaddr, &conn->dst);
128 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
131 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
133 struct hci_cp_disconnect cp;
135 BT_DBG("%p", conn);
137 conn->state = BT_DISCONN;
139 cp.handle = cpu_to_le16(conn->handle);
140 cp.reason = reason;
141 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
144 void hci_add_sco(struct hci_conn *conn, __u16 handle)
146 struct hci_dev *hdev = conn->hdev;
147 struct hci_cp_add_sco cp;
149 BT_DBG("%p", conn);
151 conn->state = BT_CONNECT;
152 conn->out = 1;
154 conn->attempt++;
156 cp.handle = cpu_to_le16(handle);
157 cp.pkt_type = cpu_to_le16(conn->pkt_type);
159 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
162 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
164 struct hci_dev *hdev = conn->hdev;
165 struct hci_cp_setup_sync_conn cp;
167 BT_DBG("%p", conn);
169 conn->state = BT_CONNECT;
170 conn->out = 1;
172 conn->attempt++;
174 cp.handle = cpu_to_le16(handle);
175 cp.pkt_type = cpu_to_le16(conn->pkt_type);
177 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
178 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
179 cp.max_latency = cpu_to_le16(0xffff);
180 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
181 cp.retrans_effort = 0xff;
183 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
186 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
187 u16 latency, u16 to_multiplier)
189 struct hci_cp_le_conn_update cp;
190 struct hci_dev *hdev = conn->hdev;
192 memset(&cp, 0, sizeof(cp));
194 cp.handle = cpu_to_le16(conn->handle);
195 cp.conn_interval_min = cpu_to_le16(min);
196 cp.conn_interval_max = cpu_to_le16(max);
197 cp.conn_latency = cpu_to_le16(latency);
198 cp.supervision_timeout = cpu_to_le16(to_multiplier);
199 cp.min_ce_len = cpu_to_le16(0x0001);
200 cp.max_ce_len = cpu_to_le16(0x0001);
202 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
204 EXPORT_SYMBOL(hci_le_conn_update);
206 /* Device _must_ be locked */
207 void hci_sco_setup(struct hci_conn *conn, __u8 status)
209 struct hci_conn *sco = conn->link;
211 BT_DBG("%p", conn);
213 if (!sco)
214 return;
216 if (!status) {
217 if (lmp_esco_capable(conn->hdev))
218 hci_setup_sync(sco, conn->handle);
219 else
220 hci_add_sco(sco, conn->handle);
221 } else {
222 hci_proto_connect_cfm(sco, status);
223 hci_conn_del(sco);
227 static void hci_conn_timeout(unsigned long arg)
229 struct hci_conn *conn = (void *) arg;
230 struct hci_dev *hdev = conn->hdev;
231 __u8 reason;
233 BT_DBG("conn %p state %d", conn, conn->state);
235 if (atomic_read(&conn->refcnt))
236 return;
238 hci_dev_lock(hdev);
240 switch (conn->state) {
241 case BT_CONNECT:
242 case BT_CONNECT2:
243 if (conn->out) {
244 if (conn->type == ACL_LINK)
245 hci_acl_connect_cancel(conn);
246 else if (conn->type == LE_LINK)
247 hci_le_connect_cancel(conn);
249 break;
250 case BT_CONFIG:
251 case BT_CONNECTED:
252 reason = hci_proto_disconn_ind(conn);
253 hci_acl_disconn(conn, reason);
254 break;
255 default:
256 conn->state = BT_CLOSED;
257 break;
260 hci_dev_unlock(hdev);
263 static void hci_conn_idle(unsigned long arg)
265 struct hci_conn *conn = (void *) arg;
267 BT_DBG("conn %p mode %d", conn, conn->mode);
269 hci_conn_enter_sniff_mode(conn);
272 static void hci_conn_auto_accept(unsigned long arg)
274 struct hci_conn *conn = (void *) arg;
275 struct hci_dev *hdev = conn->hdev;
277 hci_dev_lock(hdev);
279 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
280 &conn->dst);
282 hci_dev_unlock(hdev);
285 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
287 struct hci_conn *conn;
289 BT_DBG("%s dst %s", hdev->name, batostr(dst));
291 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
292 if (!conn)
293 return NULL;
295 bacpy(&conn->dst, dst);
296 conn->hdev = hdev;
297 conn->type = type;
298 conn->mode = HCI_CM_ACTIVE;
299 conn->state = BT_OPEN;
300 conn->auth_type = HCI_AT_GENERAL_BONDING;
301 conn->io_capability = hdev->io_capability;
302 conn->remote_auth = 0xff;
303 conn->key_type = 0xff;
305 conn->power_save = 1;
306 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
308 switch (type) {
309 case ACL_LINK:
310 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
311 break;
312 case SCO_LINK:
313 if (lmp_esco_capable(hdev))
314 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
315 (hdev->esco_type & EDR_ESCO_MASK);
316 else
317 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
318 break;
319 case ESCO_LINK:
320 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
321 break;
324 skb_queue_head_init(&conn->data_q);
326 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
327 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
328 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
329 (unsigned long) conn);
331 atomic_set(&conn->refcnt, 0);
333 hci_dev_hold(hdev);
335 tasklet_disable(&hdev->tx_task);
337 hci_conn_hash_add(hdev, conn);
338 if (hdev->notify)
339 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
341 atomic_set(&conn->devref, 0);
343 hci_conn_init_sysfs(conn);
345 tasklet_enable(&hdev->tx_task);
347 return conn;
350 int hci_conn_del(struct hci_conn *conn)
352 struct hci_dev *hdev = conn->hdev;
354 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
356 del_timer(&conn->idle_timer);
358 del_timer(&conn->disc_timer);
360 del_timer(&conn->auto_accept_timer);
362 if (conn->type == ACL_LINK) {
363 struct hci_conn *sco = conn->link;
364 if (sco)
365 sco->link = NULL;
367 /* Unacked frames */
368 hdev->acl_cnt += conn->sent;
369 } else if (conn->type == LE_LINK) {
370 if (hdev->le_pkts)
371 hdev->le_cnt += conn->sent;
372 else
373 hdev->acl_cnt += conn->sent;
374 } else {
375 struct hci_conn *acl = conn->link;
376 if (acl) {
377 acl->link = NULL;
378 hci_conn_put(acl);
382 tasklet_disable(&hdev->tx_task);
384 hci_conn_hash_del(hdev, conn);
385 if (hdev->notify)
386 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
388 tasklet_enable(&hdev->tx_task);
390 skb_queue_purge(&conn->data_q);
392 hci_conn_put_device(conn);
394 hci_dev_put(hdev);
396 if (conn->handle == 0)
397 kfree(conn);
399 return 0;
402 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
404 int use_src = bacmp(src, BDADDR_ANY);
405 struct hci_dev *hdev = NULL;
406 struct list_head *p;
408 BT_DBG("%s -> %s", batostr(src), batostr(dst));
410 read_lock_bh(&hci_dev_list_lock);
412 list_for_each(p, &hci_dev_list) {
413 struct hci_dev *d = list_entry(p, struct hci_dev, list);
415 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
416 continue;
418 /* Simple routing:
419 * No source address - find interface with bdaddr != dst
420 * Source address - find interface with bdaddr == src
423 if (use_src) {
424 if (!bacmp(&d->bdaddr, src)) {
425 hdev = d; break;
427 } else {
428 if (bacmp(&d->bdaddr, dst)) {
429 hdev = d; break;
434 if (hdev)
435 hdev = hci_dev_hold(hdev);
437 read_unlock_bh(&hci_dev_list_lock);
438 return hdev;
440 EXPORT_SYMBOL(hci_get_route);
442 /* Create SCO, ACL or LE connection.
443 * Device _must_ be locked */
444 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
446 struct hci_conn *acl;
447 struct hci_conn *sco;
448 struct hci_conn *le;
450 BT_DBG("%s dst %s", hdev->name, batostr(dst));
452 if (type == LE_LINK) {
453 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
454 if (le)
455 return ERR_PTR(-EBUSY);
456 le = hci_conn_add(hdev, LE_LINK, dst);
457 if (!le)
458 return ERR_PTR(-ENOMEM);
459 if (le->state == BT_OPEN)
460 hci_le_connect(le);
462 hci_conn_hold(le);
464 return le;
467 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
468 if (!acl) {
469 acl = hci_conn_add(hdev, ACL_LINK, dst);
470 if (!acl)
471 return NULL;
474 hci_conn_hold(acl);
476 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
477 acl->sec_level = BT_SECURITY_LOW;
478 acl->pending_sec_level = sec_level;
479 acl->auth_type = auth_type;
480 hci_acl_connect(acl);
483 if (type == ACL_LINK)
484 return acl;
486 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
487 if (!sco) {
488 sco = hci_conn_add(hdev, type, dst);
489 if (!sco) {
490 hci_conn_put(acl);
491 return NULL;
495 acl->link = sco;
496 sco->link = acl;
498 hci_conn_hold(sco);
500 if (acl->state == BT_CONNECTED &&
501 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
502 acl->power_save = 1;
503 hci_conn_enter_active_mode(acl);
505 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
506 /* defer SCO setup until mode change completed */
507 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
508 return sco;
511 hci_sco_setup(acl, 0x00);
514 return sco;
516 EXPORT_SYMBOL(hci_connect);
518 /* Check link security requirement */
519 int hci_conn_check_link_mode(struct hci_conn *conn)
521 BT_DBG("conn %p", conn);
523 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
524 !(conn->link_mode & HCI_LM_ENCRYPT))
525 return 0;
527 return 1;
529 EXPORT_SYMBOL(hci_conn_check_link_mode);
531 /* Authenticate remote device */
532 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
534 BT_DBG("conn %p", conn);
536 if (conn->pending_sec_level > sec_level)
537 sec_level = conn->pending_sec_level;
539 if (sec_level > conn->sec_level)
540 conn->pending_sec_level = sec_level;
541 else if (conn->link_mode & HCI_LM_AUTH)
542 return 1;
544 /* Make sure we preserve an existing MITM requirement*/
545 auth_type |= (conn->auth_type & 0x01);
547 conn->auth_type = auth_type;
549 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
550 struct hci_cp_auth_requested cp;
551 cp.handle = cpu_to_le16(conn->handle);
552 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
553 sizeof(cp), &cp);
556 return 0;
559 /* Encrypt the the link */
560 static void hci_conn_encrypt(struct hci_conn *conn)
562 BT_DBG("conn %p", conn);
564 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
565 struct hci_cp_set_conn_encrypt cp;
566 cp.handle = cpu_to_le16(conn->handle);
567 cp.encrypt = 0x01;
568 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
569 &cp);
573 /* Enable security */
574 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
576 BT_DBG("conn %p", conn);
578 /* For sdp we don't need the link key. */
579 if (sec_level == BT_SECURITY_SDP)
580 return 1;
582 /* For non 2.1 devices and low security level we don't need the link
583 key. */
584 if (sec_level == BT_SECURITY_LOW &&
585 (!conn->ssp_mode || !conn->hdev->ssp_mode))
586 return 1;
588 /* For other security levels we need the link key. */
589 if (!(conn->link_mode & HCI_LM_AUTH))
590 goto auth;
592 /* An authenticated combination key has sufficient security for any
593 security level. */
594 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
595 goto encrypt;
597 /* An unauthenticated combination key has sufficient security for
598 security level 1 and 2. */
599 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
600 (sec_level == BT_SECURITY_MEDIUM ||
601 sec_level == BT_SECURITY_LOW))
602 goto encrypt;
604 /* A combination key has always sufficient security for the security
605 levels 1 or 2. High security level requires the combination key
606 is generated using maximum PIN code length (16).
607 For pre 2.1 units. */
608 if (conn->key_type == HCI_LK_COMBINATION &&
609 (sec_level != BT_SECURITY_HIGH ||
610 conn->pin_length == 16))
611 goto encrypt;
613 auth:
614 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
615 return 0;
617 if (!hci_conn_auth(conn, sec_level, auth_type))
618 return 0;
620 encrypt:
621 if (conn->link_mode & HCI_LM_ENCRYPT)
622 return 1;
624 hci_conn_encrypt(conn);
625 return 0;
627 EXPORT_SYMBOL(hci_conn_security);
629 /* Check secure link requirement */
630 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
632 BT_DBG("conn %p", conn);
634 if (sec_level != BT_SECURITY_HIGH)
635 return 1; /* Accept if non-secure is required */
637 if (conn->key_type == HCI_LK_AUTH_COMBINATION ||
638 (conn->key_type == HCI_LK_COMBINATION &&
639 conn->pin_length == 16))
640 return 1;
642 return 0; /* Reject not secure link */
644 EXPORT_SYMBOL(hci_conn_check_secure);
646 /* Change link key */
647 int hci_conn_change_link_key(struct hci_conn *conn)
649 BT_DBG("conn %p", conn);
651 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
652 struct hci_cp_change_conn_link_key cp;
653 cp.handle = cpu_to_le16(conn->handle);
654 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
655 sizeof(cp), &cp);
658 return 0;
660 EXPORT_SYMBOL(hci_conn_change_link_key);
662 /* Switch role */
663 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
665 BT_DBG("conn %p", conn);
667 if (!role && conn->link_mode & HCI_LM_MASTER)
668 return 1;
670 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
671 struct hci_cp_switch_role cp;
672 bacpy(&cp.bdaddr, &conn->dst);
673 cp.role = role;
674 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
677 return 0;
679 EXPORT_SYMBOL(hci_conn_switch_role);
681 /* Enter active mode */
682 void hci_conn_enter_active_mode(struct hci_conn *conn)
684 struct hci_dev *hdev = conn->hdev;
686 BT_DBG("conn %p mode %d", conn, conn->mode);
688 if (test_bit(HCI_RAW, &hdev->flags))
689 return;
691 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
692 goto timer;
694 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
695 struct hci_cp_exit_sniff_mode cp;
696 cp.handle = cpu_to_le16(conn->handle);
697 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
700 timer:
701 if (hdev->idle_timeout > 0)
702 mod_timer(&conn->idle_timer,
703 jiffies + msecs_to_jiffies(hdev->idle_timeout));
706 /* Enter sniff mode */
707 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
709 struct hci_dev *hdev = conn->hdev;
711 BT_DBG("conn %p mode %d", conn, conn->mode);
713 if (test_bit(HCI_RAW, &hdev->flags))
714 return;
716 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
717 return;
719 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
720 return;
722 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
723 struct hci_cp_sniff_subrate cp;
724 cp.handle = cpu_to_le16(conn->handle);
725 cp.max_latency = cpu_to_le16(0);
726 cp.min_remote_timeout = cpu_to_le16(0);
727 cp.min_local_timeout = cpu_to_le16(0);
728 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
731 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
732 struct hci_cp_sniff_mode cp;
733 cp.handle = cpu_to_le16(conn->handle);
734 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
735 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
736 cp.attempt = cpu_to_le16(4);
737 cp.timeout = cpu_to_le16(1);
738 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
742 /* Drop all connection on the device */
743 void hci_conn_hash_flush(struct hci_dev *hdev)
745 struct hci_conn_hash *h = &hdev->conn_hash;
746 struct list_head *p;
748 BT_DBG("hdev %s", hdev->name);
750 p = h->list.next;
751 while (p != &h->list) {
752 struct hci_conn *c;
754 c = list_entry(p, struct hci_conn, list);
755 p = p->next;
757 c->state = BT_CLOSED;
759 hci_proto_disconn_cfm(c, 0x16);
760 hci_conn_del(c);
764 /* Check pending connect attempts */
765 void hci_conn_check_pending(struct hci_dev *hdev)
767 struct hci_conn *conn;
769 BT_DBG("hdev %s", hdev->name);
771 hci_dev_lock(hdev);
773 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
774 if (conn)
775 hci_acl_connect(conn);
777 hci_dev_unlock(hdev);
780 void hci_conn_hold_device(struct hci_conn *conn)
782 atomic_inc(&conn->devref);
784 EXPORT_SYMBOL(hci_conn_hold_device);
786 void hci_conn_put_device(struct hci_conn *conn)
788 if (atomic_dec_and_test(&conn->devref))
789 hci_conn_del_sysfs(conn);
791 EXPORT_SYMBOL(hci_conn_put_device);
793 int hci_get_conn_list(void __user *arg)
795 struct hci_conn_list_req req, *cl;
796 struct hci_conn_info *ci;
797 struct hci_dev *hdev;
798 struct list_head *p;
799 int n = 0, size, err;
801 if (copy_from_user(&req, arg, sizeof(req)))
802 return -EFAULT;
804 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
805 return -EINVAL;
807 size = sizeof(req) + req.conn_num * sizeof(*ci);
809 cl = kmalloc(size, GFP_KERNEL);
810 if (!cl)
811 return -ENOMEM;
813 hdev = hci_dev_get(req.dev_id);
814 if (!hdev) {
815 kfree(cl);
816 return -ENODEV;
819 ci = cl->conn_info;
821 hci_dev_lock_bh(hdev);
822 list_for_each(p, &hdev->conn_hash.list) {
823 register struct hci_conn *c;
824 c = list_entry(p, struct hci_conn, list);
826 bacpy(&(ci + n)->bdaddr, &c->dst);
827 (ci + n)->handle = c->handle;
828 (ci + n)->type = c->type;
829 (ci + n)->out = c->out;
830 (ci + n)->state = c->state;
831 (ci + n)->link_mode = c->link_mode;
832 if (++n >= req.conn_num)
833 break;
835 hci_dev_unlock_bh(hdev);
837 cl->dev_id = hdev->id;
838 cl->conn_num = n;
839 size = sizeof(req) + n * sizeof(*ci);
841 hci_dev_put(hdev);
843 err = copy_to_user(arg, cl, size);
844 kfree(cl);
846 return err ? -EFAULT : 0;
849 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
851 struct hci_conn_info_req req;
852 struct hci_conn_info ci;
853 struct hci_conn *conn;
854 char __user *ptr = arg + sizeof(req);
856 if (copy_from_user(&req, arg, sizeof(req)))
857 return -EFAULT;
859 hci_dev_lock_bh(hdev);
860 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
861 if (conn) {
862 bacpy(&ci.bdaddr, &conn->dst);
863 ci.handle = conn->handle;
864 ci.type = conn->type;
865 ci.out = conn->out;
866 ci.state = conn->state;
867 ci.link_mode = conn->link_mode;
869 hci_dev_unlock_bh(hdev);
871 if (!conn)
872 return -ENOENT;
874 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
877 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
879 struct hci_auth_info_req req;
880 struct hci_conn *conn;
882 if (copy_from_user(&req, arg, sizeof(req)))
883 return -EFAULT;
885 hci_dev_lock_bh(hdev);
886 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
887 if (conn)
888 req.type = conn->auth_type;
889 hci_dev_unlock_bh(hdev);
891 if (!conn)
892 return -ENOENT;
894 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;