tuntap: switch to use rtnl_dereference()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / bluetooth / hci_conn.c
blob25bfce0666ebff70258019ddb1bba9b6f1eca967
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/export.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/a2mp.h>
32 #include <net/bluetooth/smp.h>
34 static void hci_le_create_connection(struct hci_conn *conn)
36 struct hci_dev *hdev = conn->hdev;
37 struct hci_cp_le_create_conn cp;
39 conn->state = BT_CONNECT;
40 conn->out = true;
41 conn->link_mode |= HCI_LM_MASTER;
42 conn->sec_level = BT_SECURITY_LOW;
44 memset(&cp, 0, sizeof(cp));
45 cp.scan_interval = __constant_cpu_to_le16(0x0060);
46 cp.scan_window = __constant_cpu_to_le16(0x0030);
47 bacpy(&cp.peer_addr, &conn->dst);
48 cp.peer_addr_type = conn->dst_type;
49 cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
50 cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
51 cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
52 cp.min_ce_len = __constant_cpu_to_le16(0x0000);
53 cp.max_ce_len = __constant_cpu_to_le16(0x0000);
55 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
58 static void hci_le_create_connection_cancel(struct hci_conn *conn)
60 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
63 static void hci_acl_create_connection(struct hci_conn *conn)
65 struct hci_dev *hdev = conn->hdev;
66 struct inquiry_entry *ie;
67 struct hci_cp_create_conn cp;
69 BT_DBG("hcon %p", conn);
71 conn->state = BT_CONNECT;
72 conn->out = true;
74 conn->link_mode = HCI_LM_MASTER;
76 conn->attempt++;
78 conn->link_policy = hdev->link_policy;
80 memset(&cp, 0, sizeof(cp));
81 bacpy(&cp.bdaddr, &conn->dst);
82 cp.pscan_rep_mode = 0x02;
84 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
85 if (ie) {
86 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
87 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
88 cp.pscan_mode = ie->data.pscan_mode;
89 cp.clock_offset = ie->data.clock_offset |
90 __constant_cpu_to_le16(0x8000);
93 memcpy(conn->dev_class, ie->data.dev_class, 3);
94 if (ie->data.ssp_mode > 0)
95 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
98 cp.pkt_type = cpu_to_le16(conn->pkt_type);
99 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
100 cp.role_switch = 0x01;
101 else
102 cp.role_switch = 0x00;
104 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
107 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
109 struct hci_cp_create_conn_cancel cp;
111 BT_DBG("hcon %p", conn);
113 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
114 return;
116 bacpy(&cp.bdaddr, &conn->dst);
117 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
120 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
122 struct hci_cp_disconnect cp;
124 BT_DBG("hcon %p", conn);
126 conn->state = BT_DISCONN;
128 cp.handle = cpu_to_le16(conn->handle);
129 cp.reason = reason;
130 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
133 static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
135 struct hci_cp_disconn_phy_link cp;
137 BT_DBG("hcon %p", conn);
139 conn->state = BT_DISCONN;
141 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
142 cp.reason = reason;
143 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
144 sizeof(cp), &cp);
147 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
149 struct hci_dev *hdev = conn->hdev;
150 struct hci_cp_add_sco cp;
152 BT_DBG("hcon %p", conn);
154 conn->state = BT_CONNECT;
155 conn->out = true;
157 conn->attempt++;
159 cp.handle = cpu_to_le16(handle);
160 cp.pkt_type = cpu_to_le16(conn->pkt_type);
162 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
165 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
167 struct hci_dev *hdev = conn->hdev;
168 struct hci_cp_setup_sync_conn cp;
170 BT_DBG("hcon %p", conn);
172 conn->state = BT_CONNECT;
173 conn->out = true;
175 conn->attempt++;
177 cp.handle = cpu_to_le16(handle);
178 cp.pkt_type = cpu_to_le16(conn->pkt_type);
180 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
181 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
182 cp.max_latency = __constant_cpu_to_le16(0xffff);
183 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
184 cp.retrans_effort = 0xff;
186 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
189 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
190 u16 latency, u16 to_multiplier)
192 struct hci_cp_le_conn_update cp;
193 struct hci_dev *hdev = conn->hdev;
195 memset(&cp, 0, sizeof(cp));
197 cp.handle = cpu_to_le16(conn->handle);
198 cp.conn_interval_min = cpu_to_le16(min);
199 cp.conn_interval_max = cpu_to_le16(max);
200 cp.conn_latency = cpu_to_le16(latency);
201 cp.supervision_timeout = cpu_to_le16(to_multiplier);
202 cp.min_ce_len = __constant_cpu_to_le16(0x0001);
203 cp.max_ce_len = __constant_cpu_to_le16(0x0001);
205 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
208 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
209 __u8 ltk[16])
211 struct hci_dev *hdev = conn->hdev;
212 struct hci_cp_le_start_enc cp;
214 BT_DBG("hcon %p", conn);
216 memset(&cp, 0, sizeof(cp));
218 cp.handle = cpu_to_le16(conn->handle);
219 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
220 cp.ediv = ediv;
221 memcpy(cp.rand, rand, sizeof(cp.rand));
223 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
226 /* Device _must_ be locked */
227 void hci_sco_setup(struct hci_conn *conn, __u8 status)
229 struct hci_conn *sco = conn->link;
231 if (!sco)
232 return;
234 BT_DBG("hcon %p", conn);
236 if (!status) {
237 if (lmp_esco_capable(conn->hdev))
238 hci_setup_sync(sco, conn->handle);
239 else
240 hci_add_sco(sco, conn->handle);
241 } else {
242 hci_proto_connect_cfm(sco, status);
243 hci_conn_del(sco);
247 static void hci_conn_disconnect(struct hci_conn *conn)
249 __u8 reason = hci_proto_disconn_ind(conn);
251 switch (conn->type) {
252 case ACL_LINK:
253 hci_acl_disconn(conn, reason);
254 break;
255 case AMP_LINK:
256 hci_amp_disconn(conn, reason);
257 break;
261 static void hci_conn_timeout(struct work_struct *work)
263 struct hci_conn *conn = container_of(work, struct hci_conn,
264 disc_work.work);
266 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
268 if (atomic_read(&conn->refcnt))
269 return;
271 switch (conn->state) {
272 case BT_CONNECT:
273 case BT_CONNECT2:
274 if (conn->out) {
275 if (conn->type == ACL_LINK)
276 hci_acl_create_connection_cancel(conn);
277 else if (conn->type == LE_LINK)
278 hci_le_create_connection_cancel(conn);
280 break;
281 case BT_CONFIG:
282 case BT_CONNECTED:
283 hci_conn_disconnect(conn);
284 break;
285 default:
286 conn->state = BT_CLOSED;
287 break;
291 /* Enter sniff mode */
292 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
294 struct hci_dev *hdev = conn->hdev;
296 BT_DBG("hcon %p mode %d", conn, conn->mode);
298 if (test_bit(HCI_RAW, &hdev->flags))
299 return;
301 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
302 return;
304 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
305 return;
307 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
308 struct hci_cp_sniff_subrate cp;
309 cp.handle = cpu_to_le16(conn->handle);
310 cp.max_latency = __constant_cpu_to_le16(0);
311 cp.min_remote_timeout = __constant_cpu_to_le16(0);
312 cp.min_local_timeout = __constant_cpu_to_le16(0);
313 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
316 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
317 struct hci_cp_sniff_mode cp;
318 cp.handle = cpu_to_le16(conn->handle);
319 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
320 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
321 cp.attempt = __constant_cpu_to_le16(4);
322 cp.timeout = __constant_cpu_to_le16(1);
323 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
327 static void hci_conn_idle(unsigned long arg)
329 struct hci_conn *conn = (void *) arg;
331 BT_DBG("hcon %p mode %d", conn, conn->mode);
333 hci_conn_enter_sniff_mode(conn);
336 static void hci_conn_auto_accept(unsigned long arg)
338 struct hci_conn *conn = (void *) arg;
339 struct hci_dev *hdev = conn->hdev;
341 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
342 &conn->dst);
345 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
347 struct hci_conn *conn;
349 BT_DBG("%s dst %pMR", hdev->name, dst);
351 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
352 if (!conn)
353 return NULL;
355 bacpy(&conn->dst, dst);
356 conn->hdev = hdev;
357 conn->type = type;
358 conn->mode = HCI_CM_ACTIVE;
359 conn->state = BT_OPEN;
360 conn->auth_type = HCI_AT_GENERAL_BONDING;
361 conn->io_capability = hdev->io_capability;
362 conn->remote_auth = 0xff;
363 conn->key_type = 0xff;
365 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
366 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
368 switch (type) {
369 case ACL_LINK:
370 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
371 break;
372 case SCO_LINK:
373 if (lmp_esco_capable(hdev))
374 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
375 (hdev->esco_type & EDR_ESCO_MASK);
376 else
377 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
378 break;
379 case ESCO_LINK:
380 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
381 break;
384 skb_queue_head_init(&conn->data_q);
386 INIT_LIST_HEAD(&conn->chan_list);
388 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
389 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
390 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
391 (unsigned long) conn);
393 atomic_set(&conn->refcnt, 0);
395 hci_dev_hold(hdev);
397 hci_conn_hash_add(hdev, conn);
398 if (hdev->notify)
399 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
401 atomic_set(&conn->devref, 0);
403 hci_conn_init_sysfs(conn);
405 return conn;
408 int hci_conn_del(struct hci_conn *conn)
410 struct hci_dev *hdev = conn->hdev;
412 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
414 del_timer(&conn->idle_timer);
416 cancel_delayed_work_sync(&conn->disc_work);
418 del_timer(&conn->auto_accept_timer);
420 if (conn->type == ACL_LINK) {
421 struct hci_conn *sco = conn->link;
422 if (sco)
423 sco->link = NULL;
425 /* Unacked frames */
426 hdev->acl_cnt += conn->sent;
427 } else if (conn->type == LE_LINK) {
428 if (hdev->le_pkts)
429 hdev->le_cnt += conn->sent;
430 else
431 hdev->acl_cnt += conn->sent;
432 } else {
433 struct hci_conn *acl = conn->link;
434 if (acl) {
435 acl->link = NULL;
436 hci_conn_put(acl);
440 hci_chan_list_flush(conn);
442 if (conn->amp_mgr)
443 amp_mgr_put(conn->amp_mgr);
445 hci_conn_hash_del(hdev, conn);
446 if (hdev->notify)
447 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
449 skb_queue_purge(&conn->data_q);
451 hci_conn_put_device(conn);
453 hci_dev_put(hdev);
455 if (conn->handle == 0)
456 kfree(conn);
458 return 0;
461 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
463 int use_src = bacmp(src, BDADDR_ANY);
464 struct hci_dev *hdev = NULL, *d;
466 BT_DBG("%pMR -> %pMR", src, dst);
468 read_lock(&hci_dev_list_lock);
470 list_for_each_entry(d, &hci_dev_list, list) {
471 if (!test_bit(HCI_UP, &d->flags) ||
472 test_bit(HCI_RAW, &d->flags) ||
473 d->dev_type != HCI_BREDR)
474 continue;
476 /* Simple routing:
477 * No source address - find interface with bdaddr != dst
478 * Source address - find interface with bdaddr == src
481 if (use_src) {
482 if (!bacmp(&d->bdaddr, src)) {
483 hdev = d; break;
485 } else {
486 if (bacmp(&d->bdaddr, dst)) {
487 hdev = d; break;
492 if (hdev)
493 hdev = hci_dev_hold(hdev);
495 read_unlock(&hci_dev_list_lock);
496 return hdev;
498 EXPORT_SYMBOL(hci_get_route);
500 static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
501 u8 dst_type, u8 sec_level, u8 auth_type)
503 struct hci_conn *le;
505 if (test_bit(HCI_LE_PERIPHERAL, &hdev->flags))
506 return ERR_PTR(-ENOTSUPP);
508 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
509 if (!le) {
510 le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
511 if (le)
512 return ERR_PTR(-EBUSY);
514 le = hci_conn_add(hdev, LE_LINK, dst);
515 if (!le)
516 return ERR_PTR(-ENOMEM);
518 le->dst_type = bdaddr_to_le(dst_type);
519 hci_le_create_connection(le);
522 le->pending_sec_level = sec_level;
523 le->auth_type = auth_type;
525 hci_conn_hold(le);
527 return le;
530 static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
531 u8 sec_level, u8 auth_type)
533 struct hci_conn *acl;
535 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
536 if (!acl) {
537 acl = hci_conn_add(hdev, ACL_LINK, dst);
538 if (!acl)
539 return ERR_PTR(-ENOMEM);
542 hci_conn_hold(acl);
544 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
545 acl->sec_level = BT_SECURITY_LOW;
546 acl->pending_sec_level = sec_level;
547 acl->auth_type = auth_type;
548 hci_acl_create_connection(acl);
551 return acl;
554 static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
555 bdaddr_t *dst, u8 sec_level, u8 auth_type)
557 struct hci_conn *acl;
558 struct hci_conn *sco;
560 acl = hci_connect_acl(hdev, dst, sec_level, auth_type);
561 if (IS_ERR(acl))
562 return acl;
564 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
565 if (!sco) {
566 sco = hci_conn_add(hdev, type, dst);
567 if (!sco) {
568 hci_conn_put(acl);
569 return ERR_PTR(-ENOMEM);
573 acl->link = sco;
574 sco->link = acl;
576 hci_conn_hold(sco);
578 if (acl->state == BT_CONNECTED &&
579 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
580 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
581 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
583 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
584 /* defer SCO setup until mode change completed */
585 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
586 return sco;
589 hci_sco_setup(acl, 0x00);
592 return sco;
595 /* Create SCO, ACL or LE connection. */
596 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
597 __u8 dst_type, __u8 sec_level, __u8 auth_type)
599 BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type);
601 switch (type) {
602 case LE_LINK:
603 return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
604 case ACL_LINK:
605 return hci_connect_acl(hdev, dst, sec_level, auth_type);
606 case SCO_LINK:
607 case ESCO_LINK:
608 return hci_connect_sco(hdev, type, dst, sec_level, auth_type);
611 return ERR_PTR(-EINVAL);
614 /* Check link security requirement */
615 int hci_conn_check_link_mode(struct hci_conn *conn)
617 BT_DBG("hcon %p", conn);
619 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
620 return 0;
622 return 1;
625 /* Authenticate remote device */
626 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
628 BT_DBG("hcon %p", conn);
630 if (conn->pending_sec_level > sec_level)
631 sec_level = conn->pending_sec_level;
633 if (sec_level > conn->sec_level)
634 conn->pending_sec_level = sec_level;
635 else if (conn->link_mode & HCI_LM_AUTH)
636 return 1;
638 /* Make sure we preserve an existing MITM requirement*/
639 auth_type |= (conn->auth_type & 0x01);
641 conn->auth_type = auth_type;
643 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
644 struct hci_cp_auth_requested cp;
646 /* encrypt must be pending if auth is also pending */
647 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
649 cp.handle = cpu_to_le16(conn->handle);
650 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
651 sizeof(cp), &cp);
652 if (conn->key_type != 0xff)
653 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
656 return 0;
659 /* Encrypt the the link */
660 static void hci_conn_encrypt(struct hci_conn *conn)
662 BT_DBG("hcon %p", conn);
664 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
665 struct hci_cp_set_conn_encrypt cp;
666 cp.handle = cpu_to_le16(conn->handle);
667 cp.encrypt = 0x01;
668 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
669 &cp);
673 /* Enable security */
674 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
676 BT_DBG("hcon %p", conn);
678 if (conn->type == LE_LINK)
679 return smp_conn_security(conn, sec_level);
681 /* For sdp we don't need the link key. */
682 if (sec_level == BT_SECURITY_SDP)
683 return 1;
685 /* For non 2.1 devices and low security level we don't need the link
686 key. */
687 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
688 return 1;
690 /* For other security levels we need the link key. */
691 if (!(conn->link_mode & HCI_LM_AUTH))
692 goto auth;
694 /* An authenticated combination key has sufficient security for any
695 security level. */
696 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
697 goto encrypt;
699 /* An unauthenticated combination key has sufficient security for
700 security level 1 and 2. */
701 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
702 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
703 goto encrypt;
705 /* A combination key has always sufficient security for the security
706 levels 1 or 2. High security level requires the combination key
707 is generated using maximum PIN code length (16).
708 For pre 2.1 units. */
709 if (conn->key_type == HCI_LK_COMBINATION &&
710 (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
711 goto encrypt;
713 auth:
714 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
715 return 0;
717 if (!hci_conn_auth(conn, sec_level, auth_type))
718 return 0;
720 encrypt:
721 if (conn->link_mode & HCI_LM_ENCRYPT)
722 return 1;
724 hci_conn_encrypt(conn);
725 return 0;
727 EXPORT_SYMBOL(hci_conn_security);
729 /* Check secure link requirement */
730 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
732 BT_DBG("hcon %p", conn);
734 if (sec_level != BT_SECURITY_HIGH)
735 return 1; /* Accept if non-secure is required */
737 if (conn->sec_level == BT_SECURITY_HIGH)
738 return 1;
740 return 0; /* Reject not secure link */
742 EXPORT_SYMBOL(hci_conn_check_secure);
744 /* Change link key */
745 int hci_conn_change_link_key(struct hci_conn *conn)
747 BT_DBG("hcon %p", conn);
749 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
750 struct hci_cp_change_conn_link_key cp;
751 cp.handle = cpu_to_le16(conn->handle);
752 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
753 sizeof(cp), &cp);
756 return 0;
759 /* Switch role */
760 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
762 BT_DBG("hcon %p", conn);
764 if (!role && conn->link_mode & HCI_LM_MASTER)
765 return 1;
767 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
768 struct hci_cp_switch_role cp;
769 bacpy(&cp.bdaddr, &conn->dst);
770 cp.role = role;
771 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
774 return 0;
776 EXPORT_SYMBOL(hci_conn_switch_role);
778 /* Enter active mode */
779 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
781 struct hci_dev *hdev = conn->hdev;
783 BT_DBG("hcon %p mode %d", conn, conn->mode);
785 if (test_bit(HCI_RAW, &hdev->flags))
786 return;
788 if (conn->mode != HCI_CM_SNIFF)
789 goto timer;
791 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
792 goto timer;
794 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
795 struct hci_cp_exit_sniff_mode cp;
796 cp.handle = cpu_to_le16(conn->handle);
797 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
800 timer:
801 if (hdev->idle_timeout > 0)
802 mod_timer(&conn->idle_timer,
803 jiffies + msecs_to_jiffies(hdev->idle_timeout));
806 /* Drop all connection on the device */
807 void hci_conn_hash_flush(struct hci_dev *hdev)
809 struct hci_conn_hash *h = &hdev->conn_hash;
810 struct hci_conn *c, *n;
812 BT_DBG("hdev %s", hdev->name);
814 list_for_each_entry_safe(c, n, &h->list, list) {
815 c->state = BT_CLOSED;
817 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
818 hci_conn_del(c);
822 /* Check pending connect attempts */
823 void hci_conn_check_pending(struct hci_dev *hdev)
825 struct hci_conn *conn;
827 BT_DBG("hdev %s", hdev->name);
829 hci_dev_lock(hdev);
831 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
832 if (conn)
833 hci_acl_create_connection(conn);
835 hci_dev_unlock(hdev);
838 void hci_conn_hold_device(struct hci_conn *conn)
840 atomic_inc(&conn->devref);
842 EXPORT_SYMBOL(hci_conn_hold_device);
844 void hci_conn_put_device(struct hci_conn *conn)
846 if (atomic_dec_and_test(&conn->devref))
847 hci_conn_del_sysfs(conn);
849 EXPORT_SYMBOL(hci_conn_put_device);
851 int hci_get_conn_list(void __user *arg)
853 struct hci_conn *c;
854 struct hci_conn_list_req req, *cl;
855 struct hci_conn_info *ci;
856 struct hci_dev *hdev;
857 int n = 0, size, err;
859 if (copy_from_user(&req, arg, sizeof(req)))
860 return -EFAULT;
862 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
863 return -EINVAL;
865 size = sizeof(req) + req.conn_num * sizeof(*ci);
867 cl = kmalloc(size, GFP_KERNEL);
868 if (!cl)
869 return -ENOMEM;
871 hdev = hci_dev_get(req.dev_id);
872 if (!hdev) {
873 kfree(cl);
874 return -ENODEV;
877 ci = cl->conn_info;
879 hci_dev_lock(hdev);
880 list_for_each_entry(c, &hdev->conn_hash.list, list) {
881 bacpy(&(ci + n)->bdaddr, &c->dst);
882 (ci + n)->handle = c->handle;
883 (ci + n)->type = c->type;
884 (ci + n)->out = c->out;
885 (ci + n)->state = c->state;
886 (ci + n)->link_mode = c->link_mode;
887 if (++n >= req.conn_num)
888 break;
890 hci_dev_unlock(hdev);
892 cl->dev_id = hdev->id;
893 cl->conn_num = n;
894 size = sizeof(req) + n * sizeof(*ci);
896 hci_dev_put(hdev);
898 err = copy_to_user(arg, cl, size);
899 kfree(cl);
901 return err ? -EFAULT : 0;
904 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
906 struct hci_conn_info_req req;
907 struct hci_conn_info ci;
908 struct hci_conn *conn;
909 char __user *ptr = arg + sizeof(req);
911 if (copy_from_user(&req, arg, sizeof(req)))
912 return -EFAULT;
914 hci_dev_lock(hdev);
915 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
916 if (conn) {
917 bacpy(&ci.bdaddr, &conn->dst);
918 ci.handle = conn->handle;
919 ci.type = conn->type;
920 ci.out = conn->out;
921 ci.state = conn->state;
922 ci.link_mode = conn->link_mode;
924 hci_dev_unlock(hdev);
926 if (!conn)
927 return -ENOENT;
929 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
932 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
934 struct hci_auth_info_req req;
935 struct hci_conn *conn;
937 if (copy_from_user(&req, arg, sizeof(req)))
938 return -EFAULT;
940 hci_dev_lock(hdev);
941 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
942 if (conn)
943 req.type = conn->auth_type;
944 hci_dev_unlock(hdev);
946 if (!conn)
947 return -ENOENT;
949 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
952 struct hci_chan *hci_chan_create(struct hci_conn *conn)
954 struct hci_dev *hdev = conn->hdev;
955 struct hci_chan *chan;
957 BT_DBG("%s hcon %p", hdev->name, conn);
959 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
960 if (!chan)
961 return NULL;
963 chan->conn = conn;
964 skb_queue_head_init(&chan->data_q);
965 chan->state = BT_CONNECTED;
967 list_add_rcu(&chan->list, &conn->chan_list);
969 return chan;
972 void hci_chan_del(struct hci_chan *chan)
974 struct hci_conn *conn = chan->conn;
975 struct hci_dev *hdev = conn->hdev;
977 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
979 list_del_rcu(&chan->list);
981 synchronize_rcu();
983 hci_conn_put(conn);
985 skb_queue_purge(&chan->data_q);
986 kfree(chan);
989 void hci_chan_list_flush(struct hci_conn *conn)
991 struct hci_chan *chan, *n;
993 BT_DBG("hcon %p", conn);
995 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
996 hci_chan_del(chan);
999 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1000 __u16 handle)
1002 struct hci_chan *hchan;
1004 list_for_each_entry(hchan, &hcon->chan_list, list) {
1005 if (hchan->handle == handle)
1006 return hchan;
1009 return NULL;
1012 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1014 struct hci_conn_hash *h = &hdev->conn_hash;
1015 struct hci_conn *hcon;
1016 struct hci_chan *hchan = NULL;
1018 rcu_read_lock();
1020 list_for_each_entry_rcu(hcon, &h->list, list) {
1021 hchan = __hci_chan_lookup_handle(hcon, handle);
1022 if (hchan)
1023 break;
1026 rcu_read_unlock();
1028 return hchan;