Bluetooth: Move hci_get_cmd_complete() to hci_event.c
[linux-2.6/btrfs-unstable.git] / net / bluetooth / hci_event.c
blobc2483cb6ffbd89ce3d4b0167c9510f9197c5d42e
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 __u8 status = *((__u8 *) skb->data);
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
50 if (status)
51 return;
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
57 hci_dev_lock(hdev);
58 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
59 hci_dev_unlock(hdev);
61 hci_conn_check_pending(hdev);
64 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
66 __u8 status = *((__u8 *) skb->data);
68 BT_DBG("%s status 0x%2.2x", hdev->name, status);
70 if (status)
71 return;
73 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
78 __u8 status = *((__u8 *) skb->data);
80 BT_DBG("%s status 0x%2.2x", hdev->name, status);
82 if (status)
83 return;
85 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
87 hci_conn_check_pending(hdev);
90 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
91 struct sk_buff *skb)
93 BT_DBG("%s", hdev->name);
96 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
98 struct hci_rp_role_discovery *rp = (void *) skb->data;
99 struct hci_conn *conn;
101 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
103 if (rp->status)
104 return;
106 hci_dev_lock(hdev);
108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
109 if (conn)
110 conn->role = rp->role;
112 hci_dev_unlock(hdev);
115 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 struct hci_rp_read_link_policy *rp = (void *) skb->data;
118 struct hci_conn *conn;
120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122 if (rp->status)
123 return;
125 hci_dev_lock(hdev);
127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
128 if (conn)
129 conn->link_policy = __le16_to_cpu(rp->policy);
131 hci_dev_unlock(hdev);
134 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 struct hci_rp_write_link_policy *rp = (void *) skb->data;
137 struct hci_conn *conn;
138 void *sent;
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142 if (rp->status)
143 return;
145 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
146 if (!sent)
147 return;
149 hci_dev_lock(hdev);
151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 if (conn)
153 conn->link_policy = get_unaligned_le16(sent + 2);
155 hci_dev_unlock(hdev);
158 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
159 struct sk_buff *skb)
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165 if (rp->status)
166 return;
168 hdev->link_policy = __le16_to_cpu(rp->policy);
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
172 struct sk_buff *skb)
174 __u8 status = *((__u8 *) skb->data);
175 void *sent;
177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
179 if (status)
180 return;
182 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
183 if (!sent)
184 return;
186 hdev->link_policy = get_unaligned_le16(sent);
189 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
191 __u8 status = *((__u8 *) skb->data);
193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
195 clear_bit(HCI_RESET, &hdev->flags);
197 if (status)
198 return;
200 /* Reset all non-persistent flags */
201 hci_dev_clear_volatile_flags(hdev);
203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
211 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
212 hdev->scan_rsp_data_len = 0;
214 hdev->le_scan_type = LE_SCAN_PASSIVE;
216 hdev->ssp_debug_mode = 0;
218 hci_bdaddr_list_clear(&hdev->le_white_list);
221 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
222 struct sk_buff *skb)
224 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
225 struct hci_cp_read_stored_link_key *sent;
227 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
229 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
230 if (!sent)
231 return;
233 if (!rp->status && sent->read_all == 0x01) {
234 hdev->stored_max_keys = rp->max_keys;
235 hdev->stored_num_keys = rp->num_keys;
239 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
240 struct sk_buff *skb)
242 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
244 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
246 if (rp->status)
247 return;
249 if (rp->num_keys <= hdev->stored_num_keys)
250 hdev->stored_num_keys -= rp->num_keys;
251 else
252 hdev->stored_num_keys = 0;
255 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
257 __u8 status = *((__u8 *) skb->data);
258 void *sent;
260 BT_DBG("%s status 0x%2.2x", hdev->name, status);
262 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
263 if (!sent)
264 return;
266 hci_dev_lock(hdev);
268 if (hci_dev_test_flag(hdev, HCI_MGMT))
269 mgmt_set_local_name_complete(hdev, sent, status);
270 else if (!status)
271 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
273 hci_dev_unlock(hdev);
276 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278 struct hci_rp_read_local_name *rp = (void *) skb->data;
280 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
282 if (rp->status)
283 return;
285 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
286 hci_dev_test_flag(hdev, HCI_CONFIG))
287 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
290 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
292 __u8 status = *((__u8 *) skb->data);
293 void *sent;
295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
297 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
298 if (!sent)
299 return;
301 hci_dev_lock(hdev);
303 if (!status) {
304 __u8 param = *((__u8 *) sent);
306 if (param == AUTH_ENABLED)
307 set_bit(HCI_AUTH, &hdev->flags);
308 else
309 clear_bit(HCI_AUTH, &hdev->flags);
312 if (hci_dev_test_flag(hdev, HCI_MGMT))
313 mgmt_auth_enable_complete(hdev, status);
315 hci_dev_unlock(hdev);
318 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
320 __u8 status = *((__u8 *) skb->data);
321 __u8 param;
322 void *sent;
324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
326 if (status)
327 return;
329 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
330 if (!sent)
331 return;
333 param = *((__u8 *) sent);
335 if (param)
336 set_bit(HCI_ENCRYPT, &hdev->flags);
337 else
338 clear_bit(HCI_ENCRYPT, &hdev->flags);
341 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
343 __u8 status = *((__u8 *) skb->data);
344 __u8 param;
345 void *sent;
347 BT_DBG("%s status 0x%2.2x", hdev->name, status);
349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
350 if (!sent)
351 return;
353 param = *((__u8 *) sent);
355 hci_dev_lock(hdev);
357 if (status) {
358 hdev->discov_timeout = 0;
359 goto done;
362 if (param & SCAN_INQUIRY)
363 set_bit(HCI_ISCAN, &hdev->flags);
364 else
365 clear_bit(HCI_ISCAN, &hdev->flags);
367 if (param & SCAN_PAGE)
368 set_bit(HCI_PSCAN, &hdev->flags);
369 else
370 clear_bit(HCI_PSCAN, &hdev->flags);
372 done:
373 hci_dev_unlock(hdev);
376 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
378 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
380 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
382 if (rp->status)
383 return;
385 memcpy(hdev->dev_class, rp->dev_class, 3);
387 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
388 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
391 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
393 __u8 status = *((__u8 *) skb->data);
394 void *sent;
396 BT_DBG("%s status 0x%2.2x", hdev->name, status);
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
399 if (!sent)
400 return;
402 hci_dev_lock(hdev);
404 if (status == 0)
405 memcpy(hdev->dev_class, sent, 3);
407 if (hci_dev_test_flag(hdev, HCI_MGMT))
408 mgmt_set_class_of_dev_complete(hdev, sent, status);
410 hci_dev_unlock(hdev);
413 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
415 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
416 __u16 setting;
418 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
420 if (rp->status)
421 return;
423 setting = __le16_to_cpu(rp->voice_setting);
425 if (hdev->voice_setting == setting)
426 return;
428 hdev->voice_setting = setting;
430 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
432 if (hdev->notify)
433 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
436 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
437 struct sk_buff *skb)
439 __u8 status = *((__u8 *) skb->data);
440 __u16 setting;
441 void *sent;
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
445 if (status)
446 return;
448 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
449 if (!sent)
450 return;
452 setting = get_unaligned_le16(sent);
454 if (hdev->voice_setting == setting)
455 return;
457 hdev->voice_setting = setting;
459 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
461 if (hdev->notify)
462 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
465 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
466 struct sk_buff *skb)
468 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
470 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
472 if (rp->status)
473 return;
475 hdev->num_iac = rp->num_iac;
477 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
480 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
482 __u8 status = *((__u8 *) skb->data);
483 struct hci_cp_write_ssp_mode *sent;
485 BT_DBG("%s status 0x%2.2x", hdev->name, status);
487 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
488 if (!sent)
489 return;
491 hci_dev_lock(hdev);
493 if (!status) {
494 if (sent->mode)
495 hdev->features[1][0] |= LMP_HOST_SSP;
496 else
497 hdev->features[1][0] &= ~LMP_HOST_SSP;
500 if (hci_dev_test_flag(hdev, HCI_MGMT))
501 mgmt_ssp_enable_complete(hdev, sent->mode, status);
502 else if (!status) {
503 if (sent->mode)
504 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
505 else
506 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
509 hci_dev_unlock(hdev);
512 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
514 u8 status = *((u8 *) skb->data);
515 struct hci_cp_write_sc_support *sent;
517 BT_DBG("%s status 0x%2.2x", hdev->name, status);
519 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
520 if (!sent)
521 return;
523 hci_dev_lock(hdev);
525 if (!status) {
526 if (sent->support)
527 hdev->features[1][0] |= LMP_HOST_SC;
528 else
529 hdev->features[1][0] &= ~LMP_HOST_SC;
532 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
533 if (sent->support)
534 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
535 else
536 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
539 hci_dev_unlock(hdev);
542 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
544 struct hci_rp_read_local_version *rp = (void *) skb->data;
546 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
548 if (rp->status)
549 return;
551 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
552 hci_dev_test_flag(hdev, HCI_CONFIG)) {
553 hdev->hci_ver = rp->hci_ver;
554 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
555 hdev->lmp_ver = rp->lmp_ver;
556 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
557 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
561 static void hci_cc_read_local_commands(struct hci_dev *hdev,
562 struct sk_buff *skb)
564 struct hci_rp_read_local_commands *rp = (void *) skb->data;
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
568 if (rp->status)
569 return;
571 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
572 hci_dev_test_flag(hdev, HCI_CONFIG))
573 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
576 static void hci_cc_read_local_features(struct hci_dev *hdev,
577 struct sk_buff *skb)
579 struct hci_rp_read_local_features *rp = (void *) skb->data;
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
583 if (rp->status)
584 return;
586 memcpy(hdev->features, rp->features, 8);
588 /* Adjust default settings according to features
589 * supported by device. */
591 if (hdev->features[0][0] & LMP_3SLOT)
592 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
594 if (hdev->features[0][0] & LMP_5SLOT)
595 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
597 if (hdev->features[0][1] & LMP_HV2) {
598 hdev->pkt_type |= (HCI_HV2);
599 hdev->esco_type |= (ESCO_HV2);
602 if (hdev->features[0][1] & LMP_HV3) {
603 hdev->pkt_type |= (HCI_HV3);
604 hdev->esco_type |= (ESCO_HV3);
607 if (lmp_esco_capable(hdev))
608 hdev->esco_type |= (ESCO_EV3);
610 if (hdev->features[0][4] & LMP_EV4)
611 hdev->esco_type |= (ESCO_EV4);
613 if (hdev->features[0][4] & LMP_EV5)
614 hdev->esco_type |= (ESCO_EV5);
616 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
617 hdev->esco_type |= (ESCO_2EV3);
619 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
620 hdev->esco_type |= (ESCO_3EV3);
622 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
623 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
626 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
627 struct sk_buff *skb)
629 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633 if (rp->status)
634 return;
636 if (hdev->max_page < rp->max_page)
637 hdev->max_page = rp->max_page;
639 if (rp->page < HCI_MAX_PAGES)
640 memcpy(hdev->features[rp->page], rp->features, 8);
643 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
644 struct sk_buff *skb)
646 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
650 if (rp->status)
651 return;
653 hdev->flow_ctl_mode = rp->mode;
656 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
658 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
662 if (rp->status)
663 return;
665 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
666 hdev->sco_mtu = rp->sco_mtu;
667 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
668 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
670 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
671 hdev->sco_mtu = 64;
672 hdev->sco_pkts = 8;
675 hdev->acl_cnt = hdev->acl_pkts;
676 hdev->sco_cnt = hdev->sco_pkts;
678 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
679 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
682 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
684 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
686 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
688 if (rp->status)
689 return;
691 if (test_bit(HCI_INIT, &hdev->flags))
692 bacpy(&hdev->bdaddr, &rp->bdaddr);
694 if (hci_dev_test_flag(hdev, HCI_SETUP))
695 bacpy(&hdev->setup_addr, &rp->bdaddr);
698 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
699 struct sk_buff *skb)
701 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
703 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
705 if (rp->status)
706 return;
708 if (test_bit(HCI_INIT, &hdev->flags)) {
709 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
710 hdev->page_scan_window = __le16_to_cpu(rp->window);
714 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
715 struct sk_buff *skb)
717 u8 status = *((u8 *) skb->data);
718 struct hci_cp_write_page_scan_activity *sent;
720 BT_DBG("%s status 0x%2.2x", hdev->name, status);
722 if (status)
723 return;
725 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
726 if (!sent)
727 return;
729 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
730 hdev->page_scan_window = __le16_to_cpu(sent->window);
733 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
734 struct sk_buff *skb)
736 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
740 if (rp->status)
741 return;
743 if (test_bit(HCI_INIT, &hdev->flags))
744 hdev->page_scan_type = rp->type;
747 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
748 struct sk_buff *skb)
750 u8 status = *((u8 *) skb->data);
751 u8 *type;
753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
755 if (status)
756 return;
758 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
759 if (type)
760 hdev->page_scan_type = *type;
763 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
764 struct sk_buff *skb)
766 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
768 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
770 if (rp->status)
771 return;
773 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
774 hdev->block_len = __le16_to_cpu(rp->block_len);
775 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
777 hdev->block_cnt = hdev->num_blocks;
779 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
780 hdev->block_cnt, hdev->block_len);
783 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
785 struct hci_rp_read_clock *rp = (void *) skb->data;
786 struct hci_cp_read_clock *cp;
787 struct hci_conn *conn;
789 BT_DBG("%s", hdev->name);
791 if (skb->len < sizeof(*rp))
792 return;
794 if (rp->status)
795 return;
797 hci_dev_lock(hdev);
799 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
800 if (!cp)
801 goto unlock;
803 if (cp->which == 0x00) {
804 hdev->clock = le32_to_cpu(rp->clock);
805 goto unlock;
808 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
809 if (conn) {
810 conn->clock = le32_to_cpu(rp->clock);
811 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
814 unlock:
815 hci_dev_unlock(hdev);
818 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
819 struct sk_buff *skb)
821 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
823 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
825 if (rp->status)
826 goto a2mp_rsp;
828 hdev->amp_status = rp->amp_status;
829 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
830 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
831 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
832 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
833 hdev->amp_type = rp->amp_type;
834 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
835 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
836 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
837 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
839 a2mp_rsp:
840 a2mp_send_getinfo_rsp(hdev);
843 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
844 struct sk_buff *skb)
846 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
847 struct amp_assoc *assoc = &hdev->loc_assoc;
848 size_t rem_len, frag_len;
850 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
852 if (rp->status)
853 goto a2mp_rsp;
855 frag_len = skb->len - sizeof(*rp);
856 rem_len = __le16_to_cpu(rp->rem_len);
858 if (rem_len > frag_len) {
859 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
861 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
862 assoc->offset += frag_len;
864 /* Read other fragments */
865 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
867 return;
870 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
871 assoc->len = assoc->offset + rem_len;
872 assoc->offset = 0;
874 a2mp_rsp:
875 /* Send A2MP Rsp when all fragments are received */
876 a2mp_send_getampassoc_rsp(hdev, rp->status);
877 a2mp_send_create_phy_link_req(hdev, rp->status);
880 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
881 struct sk_buff *skb)
883 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
887 if (rp->status)
888 return;
890 hdev->inq_tx_power = rp->tx_power;
893 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
895 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
896 struct hci_cp_pin_code_reply *cp;
897 struct hci_conn *conn;
899 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
901 hci_dev_lock(hdev);
903 if (hci_dev_test_flag(hdev, HCI_MGMT))
904 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
906 if (rp->status)
907 goto unlock;
909 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
910 if (!cp)
911 goto unlock;
913 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
914 if (conn)
915 conn->pin_length = cp->pin_len;
917 unlock:
918 hci_dev_unlock(hdev);
921 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
923 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
927 hci_dev_lock(hdev);
929 if (hci_dev_test_flag(hdev, HCI_MGMT))
930 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
931 rp->status);
933 hci_dev_unlock(hdev);
936 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
937 struct sk_buff *skb)
939 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
941 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943 if (rp->status)
944 return;
946 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
947 hdev->le_pkts = rp->le_max_pkt;
949 hdev->le_cnt = hdev->le_pkts;
951 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
954 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
955 struct sk_buff *skb)
957 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
959 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
961 if (rp->status)
962 return;
964 memcpy(hdev->le_features, rp->features, 8);
967 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
968 struct sk_buff *skb)
970 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
972 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
974 if (rp->status)
975 return;
977 hdev->adv_tx_power = rp->tx_power;
980 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
982 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
984 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
986 hci_dev_lock(hdev);
988 if (hci_dev_test_flag(hdev, HCI_MGMT))
989 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
990 rp->status);
992 hci_dev_unlock(hdev);
995 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
996 struct sk_buff *skb)
998 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1000 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1002 hci_dev_lock(hdev);
1004 if (hci_dev_test_flag(hdev, HCI_MGMT))
1005 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1006 ACL_LINK, 0, rp->status);
1008 hci_dev_unlock(hdev);
1011 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1013 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1015 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1017 hci_dev_lock(hdev);
1019 if (hci_dev_test_flag(hdev, HCI_MGMT))
1020 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1021 0, rp->status);
1023 hci_dev_unlock(hdev);
1026 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1027 struct sk_buff *skb)
1029 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1031 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1033 hci_dev_lock(hdev);
1035 if (hci_dev_test_flag(hdev, HCI_MGMT))
1036 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1037 ACL_LINK, 0, rp->status);
1039 hci_dev_unlock(hdev);
1042 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1043 struct sk_buff *skb)
1045 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1047 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1049 hci_dev_lock(hdev);
1050 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
1051 rp->status);
1052 hci_dev_unlock(hdev);
1055 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1056 struct sk_buff *skb)
1058 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1060 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1062 hci_dev_lock(hdev);
1063 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1064 rp->hash256, rp->rand256,
1065 rp->status);
1066 hci_dev_unlock(hdev);
1069 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1071 __u8 status = *((__u8 *) skb->data);
1072 bdaddr_t *sent;
1074 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1076 if (status)
1077 return;
1079 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1080 if (!sent)
1081 return;
1083 hci_dev_lock(hdev);
1085 bacpy(&hdev->random_addr, sent);
1087 hci_dev_unlock(hdev);
1090 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1092 __u8 *sent, status = *((__u8 *) skb->data);
1094 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1096 if (status)
1097 return;
1099 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1100 if (!sent)
1101 return;
1103 hci_dev_lock(hdev);
1105 /* If we're doing connection initiation as peripheral. Set a
1106 * timeout in case something goes wrong.
1108 if (*sent) {
1109 struct hci_conn *conn;
1111 hci_dev_set_flag(hdev, HCI_LE_ADV);
1113 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1114 if (conn)
1115 queue_delayed_work(hdev->workqueue,
1116 &conn->le_conn_timeout,
1117 conn->conn_timeout);
1118 } else {
1119 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1122 hci_dev_unlock(hdev);
1125 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1127 struct hci_cp_le_set_scan_param *cp;
1128 __u8 status = *((__u8 *) skb->data);
1130 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1132 if (status)
1133 return;
1135 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1136 if (!cp)
1137 return;
1139 hci_dev_lock(hdev);
1141 hdev->le_scan_type = cp->type;
1143 hci_dev_unlock(hdev);
1146 static bool has_pending_adv_report(struct hci_dev *hdev)
1148 struct discovery_state *d = &hdev->discovery;
1150 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1153 static void clear_pending_adv_report(struct hci_dev *hdev)
1155 struct discovery_state *d = &hdev->discovery;
1157 bacpy(&d->last_adv_addr, BDADDR_ANY);
1158 d->last_adv_data_len = 0;
1161 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1162 u8 bdaddr_type, s8 rssi, u32 flags,
1163 u8 *data, u8 len)
1165 struct discovery_state *d = &hdev->discovery;
1167 bacpy(&d->last_adv_addr, bdaddr);
1168 d->last_adv_addr_type = bdaddr_type;
1169 d->last_adv_rssi = rssi;
1170 d->last_adv_flags = flags;
1171 memcpy(d->last_adv_data, data, len);
1172 d->last_adv_data_len = len;
1175 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1176 struct sk_buff *skb)
1178 struct hci_cp_le_set_scan_enable *cp;
1179 __u8 status = *((__u8 *) skb->data);
1181 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1183 if (status)
1184 return;
1186 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1187 if (!cp)
1188 return;
1190 hci_dev_lock(hdev);
1192 switch (cp->enable) {
1193 case LE_SCAN_ENABLE:
1194 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1195 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1196 clear_pending_adv_report(hdev);
1197 break;
1199 case LE_SCAN_DISABLE:
1200 /* We do this here instead of when setting DISCOVERY_STOPPED
1201 * since the latter would potentially require waiting for
1202 * inquiry to stop too.
1204 if (has_pending_adv_report(hdev)) {
1205 struct discovery_state *d = &hdev->discovery;
1207 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1208 d->last_adv_addr_type, NULL,
1209 d->last_adv_rssi, d->last_adv_flags,
1210 d->last_adv_data,
1211 d->last_adv_data_len, NULL, 0);
1214 /* Cancel this timer so that we don't try to disable scanning
1215 * when it's already disabled.
1217 cancel_delayed_work(&hdev->le_scan_disable);
1219 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1221 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1222 * interrupted scanning due to a connect request. Mark
1223 * therefore discovery as stopped. If this was not
1224 * because of a connect request advertising might have
1225 * been disabled because of active scanning, so
1226 * re-enable it again if necessary.
1228 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1229 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1230 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1231 hdev->discovery.state == DISCOVERY_FINDING)
1232 mgmt_reenable_advertising(hdev);
1234 break;
1236 default:
1237 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1238 break;
1241 hci_dev_unlock(hdev);
1244 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1245 struct sk_buff *skb)
1247 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1249 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1251 if (rp->status)
1252 return;
1254 hdev->le_white_list_size = rp->size;
1257 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1258 struct sk_buff *skb)
1260 __u8 status = *((__u8 *) skb->data);
1262 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1264 if (status)
1265 return;
1267 hci_bdaddr_list_clear(&hdev->le_white_list);
1270 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1271 struct sk_buff *skb)
1273 struct hci_cp_le_add_to_white_list *sent;
1274 __u8 status = *((__u8 *) skb->data);
1276 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1278 if (status)
1279 return;
1281 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1282 if (!sent)
1283 return;
1285 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1286 sent->bdaddr_type);
1289 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1290 struct sk_buff *skb)
1292 struct hci_cp_le_del_from_white_list *sent;
1293 __u8 status = *((__u8 *) skb->data);
1295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1297 if (status)
1298 return;
1300 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1301 if (!sent)
1302 return;
1304 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1305 sent->bdaddr_type);
1308 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1309 struct sk_buff *skb)
1311 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1313 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1315 if (rp->status)
1316 return;
1318 memcpy(hdev->le_states, rp->le_states, 8);
1321 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1322 struct sk_buff *skb)
1324 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1326 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1328 if (rp->status)
1329 return;
1331 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1332 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1335 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1336 struct sk_buff *skb)
1338 struct hci_cp_le_write_def_data_len *sent;
1339 __u8 status = *((__u8 *) skb->data);
1341 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1343 if (status)
1344 return;
1346 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1347 if (!sent)
1348 return;
1350 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1351 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1354 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1355 struct sk_buff *skb)
1357 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1359 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1361 if (rp->status)
1362 return;
1364 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1365 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1366 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1367 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1370 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1371 struct sk_buff *skb)
1373 struct hci_cp_write_le_host_supported *sent;
1374 __u8 status = *((__u8 *) skb->data);
1376 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1378 if (status)
1379 return;
1381 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1382 if (!sent)
1383 return;
1385 hci_dev_lock(hdev);
1387 if (sent->le) {
1388 hdev->features[1][0] |= LMP_HOST_LE;
1389 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1390 } else {
1391 hdev->features[1][0] &= ~LMP_HOST_LE;
1392 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1393 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1396 if (sent->simul)
1397 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1398 else
1399 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1401 hci_dev_unlock(hdev);
1404 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1406 struct hci_cp_le_set_adv_param *cp;
1407 u8 status = *((u8 *) skb->data);
1409 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1411 if (status)
1412 return;
1414 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1415 if (!cp)
1416 return;
1418 hci_dev_lock(hdev);
1419 hdev->adv_addr_type = cp->own_address_type;
1420 hci_dev_unlock(hdev);
1423 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1424 struct sk_buff *skb)
1426 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1428 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1429 hdev->name, rp->status, rp->phy_handle);
1431 if (rp->status)
1432 return;
1434 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1437 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1439 struct hci_rp_read_rssi *rp = (void *) skb->data;
1440 struct hci_conn *conn;
1442 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1444 if (rp->status)
1445 return;
1447 hci_dev_lock(hdev);
1449 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1450 if (conn)
1451 conn->rssi = rp->rssi;
1453 hci_dev_unlock(hdev);
1456 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1458 struct hci_cp_read_tx_power *sent;
1459 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1460 struct hci_conn *conn;
1462 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1464 if (rp->status)
1465 return;
1467 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1468 if (!sent)
1469 return;
1471 hci_dev_lock(hdev);
1473 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1474 if (!conn)
1475 goto unlock;
1477 switch (sent->type) {
1478 case 0x00:
1479 conn->tx_power = rp->tx_power;
1480 break;
1481 case 0x01:
1482 conn->max_tx_power = rp->tx_power;
1483 break;
1486 unlock:
1487 hci_dev_unlock(hdev);
1490 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1492 u8 status = *((u8 *) skb->data);
1493 u8 *mode;
1495 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1497 if (status)
1498 return;
1500 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1501 if (mode)
1502 hdev->ssp_debug_mode = *mode;
1505 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1507 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1509 if (status) {
1510 hci_conn_check_pending(hdev);
1511 return;
1514 set_bit(HCI_INQUIRY, &hdev->flags);
1517 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1519 struct hci_cp_create_conn *cp;
1520 struct hci_conn *conn;
1522 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1524 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1525 if (!cp)
1526 return;
1528 hci_dev_lock(hdev);
1530 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1532 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1534 if (status) {
1535 if (conn && conn->state == BT_CONNECT) {
1536 if (status != 0x0c || conn->attempt > 2) {
1537 conn->state = BT_CLOSED;
1538 hci_connect_cfm(conn, status);
1539 hci_conn_del(conn);
1540 } else
1541 conn->state = BT_CONNECT2;
1543 } else {
1544 if (!conn) {
1545 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1546 HCI_ROLE_MASTER);
1547 if (!conn)
1548 BT_ERR("No memory for new connection");
1552 hci_dev_unlock(hdev);
1555 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1557 struct hci_cp_add_sco *cp;
1558 struct hci_conn *acl, *sco;
1559 __u16 handle;
1561 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1563 if (!status)
1564 return;
1566 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1567 if (!cp)
1568 return;
1570 handle = __le16_to_cpu(cp->handle);
1572 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1574 hci_dev_lock(hdev);
1576 acl = hci_conn_hash_lookup_handle(hdev, handle);
1577 if (acl) {
1578 sco = acl->link;
1579 if (sco) {
1580 sco->state = BT_CLOSED;
1582 hci_connect_cfm(sco, status);
1583 hci_conn_del(sco);
1587 hci_dev_unlock(hdev);
1590 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1592 struct hci_cp_auth_requested *cp;
1593 struct hci_conn *conn;
1595 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1597 if (!status)
1598 return;
1600 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1601 if (!cp)
1602 return;
1604 hci_dev_lock(hdev);
1606 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1607 if (conn) {
1608 if (conn->state == BT_CONFIG) {
1609 hci_connect_cfm(conn, status);
1610 hci_conn_drop(conn);
1614 hci_dev_unlock(hdev);
1617 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1619 struct hci_cp_set_conn_encrypt *cp;
1620 struct hci_conn *conn;
1622 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1624 if (!status)
1625 return;
1627 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1628 if (!cp)
1629 return;
1631 hci_dev_lock(hdev);
1633 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1634 if (conn) {
1635 if (conn->state == BT_CONFIG) {
1636 hci_connect_cfm(conn, status);
1637 hci_conn_drop(conn);
1641 hci_dev_unlock(hdev);
1644 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1645 struct hci_conn *conn)
1647 if (conn->state != BT_CONFIG || !conn->out)
1648 return 0;
1650 if (conn->pending_sec_level == BT_SECURITY_SDP)
1651 return 0;
1653 /* Only request authentication for SSP connections or non-SSP
1654 * devices with sec_level MEDIUM or HIGH or if MITM protection
1655 * is requested.
1657 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1658 conn->pending_sec_level != BT_SECURITY_FIPS &&
1659 conn->pending_sec_level != BT_SECURITY_HIGH &&
1660 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1661 return 0;
1663 return 1;
1666 static int hci_resolve_name(struct hci_dev *hdev,
1667 struct inquiry_entry *e)
1669 struct hci_cp_remote_name_req cp;
1671 memset(&cp, 0, sizeof(cp));
1673 bacpy(&cp.bdaddr, &e->data.bdaddr);
1674 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1675 cp.pscan_mode = e->data.pscan_mode;
1676 cp.clock_offset = e->data.clock_offset;
1678 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1681 static bool hci_resolve_next_name(struct hci_dev *hdev)
1683 struct discovery_state *discov = &hdev->discovery;
1684 struct inquiry_entry *e;
1686 if (list_empty(&discov->resolve))
1687 return false;
1689 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1690 if (!e)
1691 return false;
1693 if (hci_resolve_name(hdev, e) == 0) {
1694 e->name_state = NAME_PENDING;
1695 return true;
1698 return false;
1701 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1702 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1704 struct discovery_state *discov = &hdev->discovery;
1705 struct inquiry_entry *e;
1707 /* Update the mgmt connected state if necessary. Be careful with
1708 * conn objects that exist but are not (yet) connected however.
1709 * Only those in BT_CONFIG or BT_CONNECTED states can be
1710 * considered connected.
1712 if (conn &&
1713 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1714 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1715 mgmt_device_connected(hdev, conn, 0, name, name_len);
1717 if (discov->state == DISCOVERY_STOPPED)
1718 return;
1720 if (discov->state == DISCOVERY_STOPPING)
1721 goto discov_complete;
1723 if (discov->state != DISCOVERY_RESOLVING)
1724 return;
1726 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1727 /* If the device was not found in a list of found devices names of which
1728 * are pending. there is no need to continue resolving a next name as it
1729 * will be done upon receiving another Remote Name Request Complete
1730 * Event */
1731 if (!e)
1732 return;
1734 list_del(&e->list);
1735 if (name) {
1736 e->name_state = NAME_KNOWN;
1737 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1738 e->data.rssi, name, name_len);
1739 } else {
1740 e->name_state = NAME_NOT_KNOWN;
1743 if (hci_resolve_next_name(hdev))
1744 return;
1746 discov_complete:
1747 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1750 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1752 struct hci_cp_remote_name_req *cp;
1753 struct hci_conn *conn;
1755 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1757 /* If successful wait for the name req complete event before
1758 * checking for the need to do authentication */
1759 if (!status)
1760 return;
1762 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1763 if (!cp)
1764 return;
1766 hci_dev_lock(hdev);
1768 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1770 if (hci_dev_test_flag(hdev, HCI_MGMT))
1771 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1773 if (!conn)
1774 goto unlock;
1776 if (!hci_outgoing_auth_needed(hdev, conn))
1777 goto unlock;
1779 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1780 struct hci_cp_auth_requested auth_cp;
1782 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1784 auth_cp.handle = __cpu_to_le16(conn->handle);
1785 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1786 sizeof(auth_cp), &auth_cp);
1789 unlock:
1790 hci_dev_unlock(hdev);
1793 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1795 struct hci_cp_read_remote_features *cp;
1796 struct hci_conn *conn;
1798 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1800 if (!status)
1801 return;
1803 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1804 if (!cp)
1805 return;
1807 hci_dev_lock(hdev);
1809 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1810 if (conn) {
1811 if (conn->state == BT_CONFIG) {
1812 hci_connect_cfm(conn, status);
1813 hci_conn_drop(conn);
1817 hci_dev_unlock(hdev);
1820 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1822 struct hci_cp_read_remote_ext_features *cp;
1823 struct hci_conn *conn;
1825 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1827 if (!status)
1828 return;
1830 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1831 if (!cp)
1832 return;
1834 hci_dev_lock(hdev);
1836 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1837 if (conn) {
1838 if (conn->state == BT_CONFIG) {
1839 hci_connect_cfm(conn, status);
1840 hci_conn_drop(conn);
1844 hci_dev_unlock(hdev);
1847 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1849 struct hci_cp_setup_sync_conn *cp;
1850 struct hci_conn *acl, *sco;
1851 __u16 handle;
1853 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1855 if (!status)
1856 return;
1858 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1859 if (!cp)
1860 return;
1862 handle = __le16_to_cpu(cp->handle);
1864 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1866 hci_dev_lock(hdev);
1868 acl = hci_conn_hash_lookup_handle(hdev, handle);
1869 if (acl) {
1870 sco = acl->link;
1871 if (sco) {
1872 sco->state = BT_CLOSED;
1874 hci_connect_cfm(sco, status);
1875 hci_conn_del(sco);
1879 hci_dev_unlock(hdev);
1882 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1884 struct hci_cp_sniff_mode *cp;
1885 struct hci_conn *conn;
1887 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1889 if (!status)
1890 return;
1892 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1893 if (!cp)
1894 return;
1896 hci_dev_lock(hdev);
1898 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1899 if (conn) {
1900 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1902 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1903 hci_sco_setup(conn, status);
1906 hci_dev_unlock(hdev);
1909 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1911 struct hci_cp_exit_sniff_mode *cp;
1912 struct hci_conn *conn;
1914 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1916 if (!status)
1917 return;
1919 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1920 if (!cp)
1921 return;
1923 hci_dev_lock(hdev);
1925 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1926 if (conn) {
1927 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1929 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1930 hci_sco_setup(conn, status);
1933 hci_dev_unlock(hdev);
1936 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1938 struct hci_cp_disconnect *cp;
1939 struct hci_conn *conn;
1941 if (!status)
1942 return;
1944 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1945 if (!cp)
1946 return;
1948 hci_dev_lock(hdev);
1950 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1951 if (conn)
1952 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1953 conn->dst_type, status);
1955 hci_dev_unlock(hdev);
1958 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1960 struct hci_cp_create_phy_link *cp;
1962 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1964 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1965 if (!cp)
1966 return;
1968 hci_dev_lock(hdev);
1970 if (status) {
1971 struct hci_conn *hcon;
1973 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1974 if (hcon)
1975 hci_conn_del(hcon);
1976 } else {
1977 amp_write_remote_assoc(hdev, cp->phy_handle);
1980 hci_dev_unlock(hdev);
1983 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1985 struct hci_cp_accept_phy_link *cp;
1987 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1989 if (status)
1990 return;
1992 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1993 if (!cp)
1994 return;
1996 amp_write_remote_assoc(hdev, cp->phy_handle);
1999 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2001 struct hci_cp_le_create_conn *cp;
2002 struct hci_conn *conn;
2004 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2006 /* All connection failure handling is taken care of by the
2007 * hci_le_conn_failed function which is triggered by the HCI
2008 * request completion callbacks used for connecting.
2010 if (status)
2011 return;
2013 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2014 if (!cp)
2015 return;
2017 hci_dev_lock(hdev);
2019 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
2020 if (!conn)
2021 goto unlock;
2023 /* Store the initiator and responder address information which
2024 * is needed for SMP. These values will not change during the
2025 * lifetime of the connection.
2027 conn->init_addr_type = cp->own_address_type;
2028 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
2029 bacpy(&conn->init_addr, &hdev->random_addr);
2030 else
2031 bacpy(&conn->init_addr, &hdev->bdaddr);
2033 conn->resp_addr_type = cp->peer_addr_type;
2034 bacpy(&conn->resp_addr, &cp->peer_addr);
2036 /* We don't want the connection attempt to stick around
2037 * indefinitely since LE doesn't have a page timeout concept
2038 * like BR/EDR. Set a timer for any connection that doesn't use
2039 * the white list for connecting.
2041 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
2042 queue_delayed_work(conn->hdev->workqueue,
2043 &conn->le_conn_timeout,
2044 conn->conn_timeout);
2046 unlock:
2047 hci_dev_unlock(hdev);
2050 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2052 struct hci_cp_le_start_enc *cp;
2053 struct hci_conn *conn;
2055 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2057 if (!status)
2058 return;
2060 hci_dev_lock(hdev);
2062 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2063 if (!cp)
2064 goto unlock;
2066 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2067 if (!conn)
2068 goto unlock;
2070 if (conn->state != BT_CONNECTED)
2071 goto unlock;
2073 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2074 hci_conn_drop(conn);
2076 unlock:
2077 hci_dev_unlock(hdev);
2080 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2082 struct hci_cp_switch_role *cp;
2083 struct hci_conn *conn;
2085 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2087 if (!status)
2088 return;
2090 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2091 if (!cp)
2092 return;
2094 hci_dev_lock(hdev);
2096 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2097 if (conn)
2098 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2100 hci_dev_unlock(hdev);
2103 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2105 __u8 status = *((__u8 *) skb->data);
2106 struct discovery_state *discov = &hdev->discovery;
2107 struct inquiry_entry *e;
2109 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2111 hci_conn_check_pending(hdev);
2113 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2114 return;
2116 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2117 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2119 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2120 return;
2122 hci_dev_lock(hdev);
2124 if (discov->state != DISCOVERY_FINDING)
2125 goto unlock;
2127 if (list_empty(&discov->resolve)) {
2128 /* When BR/EDR inquiry is active and no LE scanning is in
2129 * progress, then change discovery state to indicate completion.
2131 * When running LE scanning and BR/EDR inquiry simultaneously
2132 * and the LE scan already finished, then change the discovery
2133 * state to indicate completion.
2135 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2136 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2137 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2138 goto unlock;
2141 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2142 if (e && hci_resolve_name(hdev, e) == 0) {
2143 e->name_state = NAME_PENDING;
2144 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2145 } else {
2146 /* When BR/EDR inquiry is active and no LE scanning is in
2147 * progress, then change discovery state to indicate completion.
2149 * When running LE scanning and BR/EDR inquiry simultaneously
2150 * and the LE scan already finished, then change the discovery
2151 * state to indicate completion.
2153 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2154 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2155 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2158 unlock:
2159 hci_dev_unlock(hdev);
2162 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2164 struct inquiry_data data;
2165 struct inquiry_info *info = (void *) (skb->data + 1);
2166 int num_rsp = *((__u8 *) skb->data);
2168 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2170 if (!num_rsp)
2171 return;
2173 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2174 return;
2176 hci_dev_lock(hdev);
2178 for (; num_rsp; num_rsp--, info++) {
2179 u32 flags;
2181 bacpy(&data.bdaddr, &info->bdaddr);
2182 data.pscan_rep_mode = info->pscan_rep_mode;
2183 data.pscan_period_mode = info->pscan_period_mode;
2184 data.pscan_mode = info->pscan_mode;
2185 memcpy(data.dev_class, info->dev_class, 3);
2186 data.clock_offset = info->clock_offset;
2187 data.rssi = HCI_RSSI_INVALID;
2188 data.ssp_mode = 0x00;
2190 flags = hci_inquiry_cache_update(hdev, &data, false);
2192 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2193 info->dev_class, HCI_RSSI_INVALID,
2194 flags, NULL, 0, NULL, 0);
2197 hci_dev_unlock(hdev);
2200 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2202 struct hci_ev_conn_complete *ev = (void *) skb->data;
2203 struct hci_conn *conn;
2205 BT_DBG("%s", hdev->name);
2207 hci_dev_lock(hdev);
2209 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2210 if (!conn) {
2211 if (ev->link_type != SCO_LINK)
2212 goto unlock;
2214 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2215 if (!conn)
2216 goto unlock;
2218 conn->type = SCO_LINK;
2221 if (!ev->status) {
2222 conn->handle = __le16_to_cpu(ev->handle);
2224 if (conn->type == ACL_LINK) {
2225 conn->state = BT_CONFIG;
2226 hci_conn_hold(conn);
2228 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2229 !hci_find_link_key(hdev, &ev->bdaddr))
2230 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2231 else
2232 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2233 } else
2234 conn->state = BT_CONNECTED;
2236 hci_debugfs_create_conn(conn);
2237 hci_conn_add_sysfs(conn);
2239 if (test_bit(HCI_AUTH, &hdev->flags))
2240 set_bit(HCI_CONN_AUTH, &conn->flags);
2242 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2243 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2245 /* Get remote features */
2246 if (conn->type == ACL_LINK) {
2247 struct hci_cp_read_remote_features cp;
2248 cp.handle = ev->handle;
2249 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2250 sizeof(cp), &cp);
2252 hci_update_page_scan(hdev);
2255 /* Set packet type for incoming connection */
2256 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2257 struct hci_cp_change_conn_ptype cp;
2258 cp.handle = ev->handle;
2259 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2260 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2261 &cp);
2263 } else {
2264 conn->state = BT_CLOSED;
2265 if (conn->type == ACL_LINK)
2266 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2267 conn->dst_type, ev->status);
2270 if (conn->type == ACL_LINK)
2271 hci_sco_setup(conn, ev->status);
2273 if (ev->status) {
2274 hci_connect_cfm(conn, ev->status);
2275 hci_conn_del(conn);
2276 } else if (ev->link_type != ACL_LINK)
2277 hci_connect_cfm(conn, ev->status);
2279 unlock:
2280 hci_dev_unlock(hdev);
2282 hci_conn_check_pending(hdev);
2285 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2287 struct hci_cp_reject_conn_req cp;
2289 bacpy(&cp.bdaddr, bdaddr);
2290 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2291 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2294 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2296 struct hci_ev_conn_request *ev = (void *) skb->data;
2297 int mask = hdev->link_mode;
2298 struct inquiry_entry *ie;
2299 struct hci_conn *conn;
2300 __u8 flags = 0;
2302 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2303 ev->link_type);
2305 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2306 &flags);
2308 if (!(mask & HCI_LM_ACCEPT)) {
2309 hci_reject_conn(hdev, &ev->bdaddr);
2310 return;
2313 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2314 BDADDR_BREDR)) {
2315 hci_reject_conn(hdev, &ev->bdaddr);
2316 return;
2319 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2320 * connection. These features are only touched through mgmt so
2321 * only do the checks if HCI_MGMT is set.
2323 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2324 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2325 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2326 BDADDR_BREDR)) {
2327 hci_reject_conn(hdev, &ev->bdaddr);
2328 return;
2331 /* Connection accepted */
2333 hci_dev_lock(hdev);
2335 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2336 if (ie)
2337 memcpy(ie->data.dev_class, ev->dev_class, 3);
2339 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2340 &ev->bdaddr);
2341 if (!conn) {
2342 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2343 HCI_ROLE_SLAVE);
2344 if (!conn) {
2345 BT_ERR("No memory for new connection");
2346 hci_dev_unlock(hdev);
2347 return;
2351 memcpy(conn->dev_class, ev->dev_class, 3);
2353 hci_dev_unlock(hdev);
2355 if (ev->link_type == ACL_LINK ||
2356 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2357 struct hci_cp_accept_conn_req cp;
2358 conn->state = BT_CONNECT;
2360 bacpy(&cp.bdaddr, &ev->bdaddr);
2362 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2363 cp.role = 0x00; /* Become master */
2364 else
2365 cp.role = 0x01; /* Remain slave */
2367 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2368 } else if (!(flags & HCI_PROTO_DEFER)) {
2369 struct hci_cp_accept_sync_conn_req cp;
2370 conn->state = BT_CONNECT;
2372 bacpy(&cp.bdaddr, &ev->bdaddr);
2373 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2375 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2376 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2377 cp.max_latency = cpu_to_le16(0xffff);
2378 cp.content_format = cpu_to_le16(hdev->voice_setting);
2379 cp.retrans_effort = 0xff;
2381 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2382 &cp);
2383 } else {
2384 conn->state = BT_CONNECT2;
2385 hci_connect_cfm(conn, 0);
2389 static u8 hci_to_mgmt_reason(u8 err)
2391 switch (err) {
2392 case HCI_ERROR_CONNECTION_TIMEOUT:
2393 return MGMT_DEV_DISCONN_TIMEOUT;
2394 case HCI_ERROR_REMOTE_USER_TERM:
2395 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2396 case HCI_ERROR_REMOTE_POWER_OFF:
2397 return MGMT_DEV_DISCONN_REMOTE;
2398 case HCI_ERROR_LOCAL_HOST_TERM:
2399 return MGMT_DEV_DISCONN_LOCAL_HOST;
2400 default:
2401 return MGMT_DEV_DISCONN_UNKNOWN;
2405 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2407 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2408 u8 reason = hci_to_mgmt_reason(ev->reason);
2409 struct hci_conn_params *params;
2410 struct hci_conn *conn;
2411 bool mgmt_connected;
2412 u8 type;
2414 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2416 hci_dev_lock(hdev);
2418 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2419 if (!conn)
2420 goto unlock;
2422 if (ev->status) {
2423 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2424 conn->dst_type, ev->status);
2425 goto unlock;
2428 conn->state = BT_CLOSED;
2430 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2431 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2432 reason, mgmt_connected);
2434 if (conn->type == ACL_LINK) {
2435 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2436 hci_remove_link_key(hdev, &conn->dst);
2438 hci_update_page_scan(hdev);
2441 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2442 if (params) {
2443 switch (params->auto_connect) {
2444 case HCI_AUTO_CONN_LINK_LOSS:
2445 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2446 break;
2447 /* Fall through */
2449 case HCI_AUTO_CONN_DIRECT:
2450 case HCI_AUTO_CONN_ALWAYS:
2451 list_del_init(&params->action);
2452 list_add(&params->action, &hdev->pend_le_conns);
2453 hci_update_background_scan(hdev);
2454 break;
2456 default:
2457 break;
2461 type = conn->type;
2463 hci_disconn_cfm(conn, ev->reason);
2464 hci_conn_del(conn);
2466 /* Re-enable advertising if necessary, since it might
2467 * have been disabled by the connection. From the
2468 * HCI_LE_Set_Advertise_Enable command description in
2469 * the core specification (v4.0):
2470 * "The Controller shall continue advertising until the Host
2471 * issues an LE_Set_Advertise_Enable command with
2472 * Advertising_Enable set to 0x00 (Advertising is disabled)
2473 * or until a connection is created or until the Advertising
2474 * is timed out due to Directed Advertising."
2476 if (type == LE_LINK)
2477 mgmt_reenable_advertising(hdev);
2479 unlock:
2480 hci_dev_unlock(hdev);
2483 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2485 struct hci_ev_auth_complete *ev = (void *) skb->data;
2486 struct hci_conn *conn;
2488 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2490 hci_dev_lock(hdev);
2492 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2493 if (!conn)
2494 goto unlock;
2496 if (!ev->status) {
2497 if (!hci_conn_ssp_enabled(conn) &&
2498 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2499 BT_INFO("re-auth of legacy device is not possible.");
2500 } else {
2501 set_bit(HCI_CONN_AUTH, &conn->flags);
2502 conn->sec_level = conn->pending_sec_level;
2504 } else {
2505 mgmt_auth_failed(conn, ev->status);
2508 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2509 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2511 if (conn->state == BT_CONFIG) {
2512 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2513 struct hci_cp_set_conn_encrypt cp;
2514 cp.handle = ev->handle;
2515 cp.encrypt = 0x01;
2516 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2517 &cp);
2518 } else {
2519 conn->state = BT_CONNECTED;
2520 hci_connect_cfm(conn, ev->status);
2521 hci_conn_drop(conn);
2523 } else {
2524 hci_auth_cfm(conn, ev->status);
2526 hci_conn_hold(conn);
2527 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2528 hci_conn_drop(conn);
2531 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2532 if (!ev->status) {
2533 struct hci_cp_set_conn_encrypt cp;
2534 cp.handle = ev->handle;
2535 cp.encrypt = 0x01;
2536 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2537 &cp);
2538 } else {
2539 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2540 hci_encrypt_cfm(conn, ev->status, 0x00);
2544 unlock:
2545 hci_dev_unlock(hdev);
2548 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2550 struct hci_ev_remote_name *ev = (void *) skb->data;
2551 struct hci_conn *conn;
2553 BT_DBG("%s", hdev->name);
2555 hci_conn_check_pending(hdev);
2557 hci_dev_lock(hdev);
2559 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2561 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2562 goto check_auth;
2564 if (ev->status == 0)
2565 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2566 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2567 else
2568 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2570 check_auth:
2571 if (!conn)
2572 goto unlock;
2574 if (!hci_outgoing_auth_needed(hdev, conn))
2575 goto unlock;
2577 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2578 struct hci_cp_auth_requested cp;
2580 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2582 cp.handle = __cpu_to_le16(conn->handle);
2583 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2586 unlock:
2587 hci_dev_unlock(hdev);
2590 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2592 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2593 struct hci_conn *conn;
2595 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2597 hci_dev_lock(hdev);
2599 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2600 if (!conn)
2601 goto unlock;
2603 if (!ev->status) {
2604 if (ev->encrypt) {
2605 /* Encryption implies authentication */
2606 set_bit(HCI_CONN_AUTH, &conn->flags);
2607 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2608 conn->sec_level = conn->pending_sec_level;
2610 /* P-256 authentication key implies FIPS */
2611 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2612 set_bit(HCI_CONN_FIPS, &conn->flags);
2614 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2615 conn->type == LE_LINK)
2616 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2617 } else {
2618 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2619 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2623 /* We should disregard the current RPA and generate a new one
2624 * whenever the encryption procedure fails.
2626 if (ev->status && conn->type == LE_LINK)
2627 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2629 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2631 if (ev->status && conn->state == BT_CONNECTED) {
2632 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2633 hci_conn_drop(conn);
2634 goto unlock;
2637 if (conn->state == BT_CONFIG) {
2638 if (!ev->status)
2639 conn->state = BT_CONNECTED;
2641 /* In Secure Connections Only mode, do not allow any
2642 * connections that are not encrypted with AES-CCM
2643 * using a P-256 authenticated combination key.
2645 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2646 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2647 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2648 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2649 hci_conn_drop(conn);
2650 goto unlock;
2653 hci_connect_cfm(conn, ev->status);
2654 hci_conn_drop(conn);
2655 } else
2656 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2658 unlock:
2659 hci_dev_unlock(hdev);
2662 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2663 struct sk_buff *skb)
2665 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2666 struct hci_conn *conn;
2668 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2670 hci_dev_lock(hdev);
2672 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2673 if (conn) {
2674 if (!ev->status)
2675 set_bit(HCI_CONN_SECURE, &conn->flags);
2677 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2679 hci_key_change_cfm(conn, ev->status);
2682 hci_dev_unlock(hdev);
2685 static void hci_remote_features_evt(struct hci_dev *hdev,
2686 struct sk_buff *skb)
2688 struct hci_ev_remote_features *ev = (void *) skb->data;
2689 struct hci_conn *conn;
2691 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2693 hci_dev_lock(hdev);
2695 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2696 if (!conn)
2697 goto unlock;
2699 if (!ev->status)
2700 memcpy(conn->features[0], ev->features, 8);
2702 if (conn->state != BT_CONFIG)
2703 goto unlock;
2705 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2706 lmp_ext_feat_capable(conn)) {
2707 struct hci_cp_read_remote_ext_features cp;
2708 cp.handle = ev->handle;
2709 cp.page = 0x01;
2710 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2711 sizeof(cp), &cp);
2712 goto unlock;
2715 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2716 struct hci_cp_remote_name_req cp;
2717 memset(&cp, 0, sizeof(cp));
2718 bacpy(&cp.bdaddr, &conn->dst);
2719 cp.pscan_rep_mode = 0x02;
2720 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2721 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2722 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2724 if (!hci_outgoing_auth_needed(hdev, conn)) {
2725 conn->state = BT_CONNECTED;
2726 hci_connect_cfm(conn, ev->status);
2727 hci_conn_drop(conn);
2730 unlock:
2731 hci_dev_unlock(hdev);
2734 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2735 u16 *opcode, u8 *status,
2736 hci_req_complete_t *req_complete,
2737 hci_req_complete_skb_t *req_complete_skb)
2739 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2741 *opcode = __le16_to_cpu(ev->opcode);
2742 *status = skb->data[sizeof(*ev)];
2744 skb_pull(skb, sizeof(*ev));
2746 switch (*opcode) {
2747 case HCI_OP_INQUIRY_CANCEL:
2748 hci_cc_inquiry_cancel(hdev, skb);
2749 break;
2751 case HCI_OP_PERIODIC_INQ:
2752 hci_cc_periodic_inq(hdev, skb);
2753 break;
2755 case HCI_OP_EXIT_PERIODIC_INQ:
2756 hci_cc_exit_periodic_inq(hdev, skb);
2757 break;
2759 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2760 hci_cc_remote_name_req_cancel(hdev, skb);
2761 break;
2763 case HCI_OP_ROLE_DISCOVERY:
2764 hci_cc_role_discovery(hdev, skb);
2765 break;
2767 case HCI_OP_READ_LINK_POLICY:
2768 hci_cc_read_link_policy(hdev, skb);
2769 break;
2771 case HCI_OP_WRITE_LINK_POLICY:
2772 hci_cc_write_link_policy(hdev, skb);
2773 break;
2775 case HCI_OP_READ_DEF_LINK_POLICY:
2776 hci_cc_read_def_link_policy(hdev, skb);
2777 break;
2779 case HCI_OP_WRITE_DEF_LINK_POLICY:
2780 hci_cc_write_def_link_policy(hdev, skb);
2781 break;
2783 case HCI_OP_RESET:
2784 hci_cc_reset(hdev, skb);
2785 break;
2787 case HCI_OP_READ_STORED_LINK_KEY:
2788 hci_cc_read_stored_link_key(hdev, skb);
2789 break;
2791 case HCI_OP_DELETE_STORED_LINK_KEY:
2792 hci_cc_delete_stored_link_key(hdev, skb);
2793 break;
2795 case HCI_OP_WRITE_LOCAL_NAME:
2796 hci_cc_write_local_name(hdev, skb);
2797 break;
2799 case HCI_OP_READ_LOCAL_NAME:
2800 hci_cc_read_local_name(hdev, skb);
2801 break;
2803 case HCI_OP_WRITE_AUTH_ENABLE:
2804 hci_cc_write_auth_enable(hdev, skb);
2805 break;
2807 case HCI_OP_WRITE_ENCRYPT_MODE:
2808 hci_cc_write_encrypt_mode(hdev, skb);
2809 break;
2811 case HCI_OP_WRITE_SCAN_ENABLE:
2812 hci_cc_write_scan_enable(hdev, skb);
2813 break;
2815 case HCI_OP_READ_CLASS_OF_DEV:
2816 hci_cc_read_class_of_dev(hdev, skb);
2817 break;
2819 case HCI_OP_WRITE_CLASS_OF_DEV:
2820 hci_cc_write_class_of_dev(hdev, skb);
2821 break;
2823 case HCI_OP_READ_VOICE_SETTING:
2824 hci_cc_read_voice_setting(hdev, skb);
2825 break;
2827 case HCI_OP_WRITE_VOICE_SETTING:
2828 hci_cc_write_voice_setting(hdev, skb);
2829 break;
2831 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2832 hci_cc_read_num_supported_iac(hdev, skb);
2833 break;
2835 case HCI_OP_WRITE_SSP_MODE:
2836 hci_cc_write_ssp_mode(hdev, skb);
2837 break;
2839 case HCI_OP_WRITE_SC_SUPPORT:
2840 hci_cc_write_sc_support(hdev, skb);
2841 break;
2843 case HCI_OP_READ_LOCAL_VERSION:
2844 hci_cc_read_local_version(hdev, skb);
2845 break;
2847 case HCI_OP_READ_LOCAL_COMMANDS:
2848 hci_cc_read_local_commands(hdev, skb);
2849 break;
2851 case HCI_OP_READ_LOCAL_FEATURES:
2852 hci_cc_read_local_features(hdev, skb);
2853 break;
2855 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2856 hci_cc_read_local_ext_features(hdev, skb);
2857 break;
2859 case HCI_OP_READ_BUFFER_SIZE:
2860 hci_cc_read_buffer_size(hdev, skb);
2861 break;
2863 case HCI_OP_READ_BD_ADDR:
2864 hci_cc_read_bd_addr(hdev, skb);
2865 break;
2867 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2868 hci_cc_read_page_scan_activity(hdev, skb);
2869 break;
2871 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2872 hci_cc_write_page_scan_activity(hdev, skb);
2873 break;
2875 case HCI_OP_READ_PAGE_SCAN_TYPE:
2876 hci_cc_read_page_scan_type(hdev, skb);
2877 break;
2879 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2880 hci_cc_write_page_scan_type(hdev, skb);
2881 break;
2883 case HCI_OP_READ_DATA_BLOCK_SIZE:
2884 hci_cc_read_data_block_size(hdev, skb);
2885 break;
2887 case HCI_OP_READ_FLOW_CONTROL_MODE:
2888 hci_cc_read_flow_control_mode(hdev, skb);
2889 break;
2891 case HCI_OP_READ_LOCAL_AMP_INFO:
2892 hci_cc_read_local_amp_info(hdev, skb);
2893 break;
2895 case HCI_OP_READ_CLOCK:
2896 hci_cc_read_clock(hdev, skb);
2897 break;
2899 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2900 hci_cc_read_local_amp_assoc(hdev, skb);
2901 break;
2903 case HCI_OP_READ_INQ_RSP_TX_POWER:
2904 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2905 break;
2907 case HCI_OP_PIN_CODE_REPLY:
2908 hci_cc_pin_code_reply(hdev, skb);
2909 break;
2911 case HCI_OP_PIN_CODE_NEG_REPLY:
2912 hci_cc_pin_code_neg_reply(hdev, skb);
2913 break;
2915 case HCI_OP_READ_LOCAL_OOB_DATA:
2916 hci_cc_read_local_oob_data(hdev, skb);
2917 break;
2919 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2920 hci_cc_read_local_oob_ext_data(hdev, skb);
2921 break;
2923 case HCI_OP_LE_READ_BUFFER_SIZE:
2924 hci_cc_le_read_buffer_size(hdev, skb);
2925 break;
2927 case HCI_OP_LE_READ_LOCAL_FEATURES:
2928 hci_cc_le_read_local_features(hdev, skb);
2929 break;
2931 case HCI_OP_LE_READ_ADV_TX_POWER:
2932 hci_cc_le_read_adv_tx_power(hdev, skb);
2933 break;
2935 case HCI_OP_USER_CONFIRM_REPLY:
2936 hci_cc_user_confirm_reply(hdev, skb);
2937 break;
2939 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2940 hci_cc_user_confirm_neg_reply(hdev, skb);
2941 break;
2943 case HCI_OP_USER_PASSKEY_REPLY:
2944 hci_cc_user_passkey_reply(hdev, skb);
2945 break;
2947 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2948 hci_cc_user_passkey_neg_reply(hdev, skb);
2949 break;
2951 case HCI_OP_LE_SET_RANDOM_ADDR:
2952 hci_cc_le_set_random_addr(hdev, skb);
2953 break;
2955 case HCI_OP_LE_SET_ADV_ENABLE:
2956 hci_cc_le_set_adv_enable(hdev, skb);
2957 break;
2959 case HCI_OP_LE_SET_SCAN_PARAM:
2960 hci_cc_le_set_scan_param(hdev, skb);
2961 break;
2963 case HCI_OP_LE_SET_SCAN_ENABLE:
2964 hci_cc_le_set_scan_enable(hdev, skb);
2965 break;
2967 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2968 hci_cc_le_read_white_list_size(hdev, skb);
2969 break;
2971 case HCI_OP_LE_CLEAR_WHITE_LIST:
2972 hci_cc_le_clear_white_list(hdev, skb);
2973 break;
2975 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2976 hci_cc_le_add_to_white_list(hdev, skb);
2977 break;
2979 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2980 hci_cc_le_del_from_white_list(hdev, skb);
2981 break;
2983 case HCI_OP_LE_READ_SUPPORTED_STATES:
2984 hci_cc_le_read_supported_states(hdev, skb);
2985 break;
2987 case HCI_OP_LE_READ_DEF_DATA_LEN:
2988 hci_cc_le_read_def_data_len(hdev, skb);
2989 break;
2991 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
2992 hci_cc_le_write_def_data_len(hdev, skb);
2993 break;
2995 case HCI_OP_LE_READ_MAX_DATA_LEN:
2996 hci_cc_le_read_max_data_len(hdev, skb);
2997 break;
2999 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3000 hci_cc_write_le_host_supported(hdev, skb);
3001 break;
3003 case HCI_OP_LE_SET_ADV_PARAM:
3004 hci_cc_set_adv_param(hdev, skb);
3005 break;
3007 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
3008 hci_cc_write_remote_amp_assoc(hdev, skb);
3009 break;
3011 case HCI_OP_READ_RSSI:
3012 hci_cc_read_rssi(hdev, skb);
3013 break;
3015 case HCI_OP_READ_TX_POWER:
3016 hci_cc_read_tx_power(hdev, skb);
3017 break;
3019 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3020 hci_cc_write_ssp_debug_mode(hdev, skb);
3021 break;
3023 default:
3024 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3025 break;
3028 if (*opcode != HCI_OP_NOP)
3029 cancel_delayed_work(&hdev->cmd_timer);
3031 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3032 atomic_set(&hdev->cmd_cnt, 1);
3034 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3035 req_complete_skb);
3037 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3038 queue_work(hdev->workqueue, &hdev->cmd_work);
3041 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3042 u16 *opcode, u8 *status,
3043 hci_req_complete_t *req_complete,
3044 hci_req_complete_skb_t *req_complete_skb)
3046 struct hci_ev_cmd_status *ev = (void *) skb->data;
3048 skb_pull(skb, sizeof(*ev));
3050 *opcode = __le16_to_cpu(ev->opcode);
3051 *status = ev->status;
3053 switch (*opcode) {
3054 case HCI_OP_INQUIRY:
3055 hci_cs_inquiry(hdev, ev->status);
3056 break;
3058 case HCI_OP_CREATE_CONN:
3059 hci_cs_create_conn(hdev, ev->status);
3060 break;
3062 case HCI_OP_DISCONNECT:
3063 hci_cs_disconnect(hdev, ev->status);
3064 break;
3066 case HCI_OP_ADD_SCO:
3067 hci_cs_add_sco(hdev, ev->status);
3068 break;
3070 case HCI_OP_AUTH_REQUESTED:
3071 hci_cs_auth_requested(hdev, ev->status);
3072 break;
3074 case HCI_OP_SET_CONN_ENCRYPT:
3075 hci_cs_set_conn_encrypt(hdev, ev->status);
3076 break;
3078 case HCI_OP_REMOTE_NAME_REQ:
3079 hci_cs_remote_name_req(hdev, ev->status);
3080 break;
3082 case HCI_OP_READ_REMOTE_FEATURES:
3083 hci_cs_read_remote_features(hdev, ev->status);
3084 break;
3086 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3087 hci_cs_read_remote_ext_features(hdev, ev->status);
3088 break;
3090 case HCI_OP_SETUP_SYNC_CONN:
3091 hci_cs_setup_sync_conn(hdev, ev->status);
3092 break;
3094 case HCI_OP_CREATE_PHY_LINK:
3095 hci_cs_create_phylink(hdev, ev->status);
3096 break;
3098 case HCI_OP_ACCEPT_PHY_LINK:
3099 hci_cs_accept_phylink(hdev, ev->status);
3100 break;
3102 case HCI_OP_SNIFF_MODE:
3103 hci_cs_sniff_mode(hdev, ev->status);
3104 break;
3106 case HCI_OP_EXIT_SNIFF_MODE:
3107 hci_cs_exit_sniff_mode(hdev, ev->status);
3108 break;
3110 case HCI_OP_SWITCH_ROLE:
3111 hci_cs_switch_role(hdev, ev->status);
3112 break;
3114 case HCI_OP_LE_CREATE_CONN:
3115 hci_cs_le_create_conn(hdev, ev->status);
3116 break;
3118 case HCI_OP_LE_START_ENC:
3119 hci_cs_le_start_enc(hdev, ev->status);
3120 break;
3122 default:
3123 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3124 break;
3127 if (*opcode != HCI_OP_NOP)
3128 cancel_delayed_work(&hdev->cmd_timer);
3130 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3131 atomic_set(&hdev->cmd_cnt, 1);
3133 /* Indicate request completion if the command failed. Also, if
3134 * we're not waiting for a special event and we get a success
3135 * command status we should try to flag the request as completed
3136 * (since for this kind of commands there will not be a command
3137 * complete event).
3139 if (ev->status ||
3140 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3141 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3142 req_complete_skb);
3144 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3145 queue_work(hdev->workqueue, &hdev->cmd_work);
3148 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3150 struct hci_ev_hardware_error *ev = (void *) skb->data;
3152 hdev->hw_error_code = ev->code;
3154 queue_work(hdev->req_workqueue, &hdev->error_reset);
3157 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3159 struct hci_ev_role_change *ev = (void *) skb->data;
3160 struct hci_conn *conn;
3162 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3164 hci_dev_lock(hdev);
3166 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3167 if (conn) {
3168 if (!ev->status)
3169 conn->role = ev->role;
3171 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3173 hci_role_switch_cfm(conn, ev->status, ev->role);
3176 hci_dev_unlock(hdev);
3179 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3181 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3182 int i;
3184 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3185 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3186 return;
3189 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3190 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3191 BT_DBG("%s bad parameters", hdev->name);
3192 return;
3195 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3197 for (i = 0; i < ev->num_hndl; i++) {
3198 struct hci_comp_pkts_info *info = &ev->handles[i];
3199 struct hci_conn *conn;
3200 __u16 handle, count;
3202 handle = __le16_to_cpu(info->handle);
3203 count = __le16_to_cpu(info->count);
3205 conn = hci_conn_hash_lookup_handle(hdev, handle);
3206 if (!conn)
3207 continue;
3209 conn->sent -= count;
3211 switch (conn->type) {
3212 case ACL_LINK:
3213 hdev->acl_cnt += count;
3214 if (hdev->acl_cnt > hdev->acl_pkts)
3215 hdev->acl_cnt = hdev->acl_pkts;
3216 break;
3218 case LE_LINK:
3219 if (hdev->le_pkts) {
3220 hdev->le_cnt += count;
3221 if (hdev->le_cnt > hdev->le_pkts)
3222 hdev->le_cnt = hdev->le_pkts;
3223 } else {
3224 hdev->acl_cnt += count;
3225 if (hdev->acl_cnt > hdev->acl_pkts)
3226 hdev->acl_cnt = hdev->acl_pkts;
3228 break;
3230 case SCO_LINK:
3231 hdev->sco_cnt += count;
3232 if (hdev->sco_cnt > hdev->sco_pkts)
3233 hdev->sco_cnt = hdev->sco_pkts;
3234 break;
3236 default:
3237 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3238 break;
3242 queue_work(hdev->workqueue, &hdev->tx_work);
3245 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3246 __u16 handle)
3248 struct hci_chan *chan;
3250 switch (hdev->dev_type) {
3251 case HCI_BREDR:
3252 return hci_conn_hash_lookup_handle(hdev, handle);
3253 case HCI_AMP:
3254 chan = hci_chan_lookup_handle(hdev, handle);
3255 if (chan)
3256 return chan->conn;
3257 break;
3258 default:
3259 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3260 break;
3263 return NULL;
3266 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3268 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3269 int i;
3271 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3272 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3273 return;
3276 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3277 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3278 BT_DBG("%s bad parameters", hdev->name);
3279 return;
3282 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3283 ev->num_hndl);
3285 for (i = 0; i < ev->num_hndl; i++) {
3286 struct hci_comp_blocks_info *info = &ev->handles[i];
3287 struct hci_conn *conn = NULL;
3288 __u16 handle, block_count;
3290 handle = __le16_to_cpu(info->handle);
3291 block_count = __le16_to_cpu(info->blocks);
3293 conn = __hci_conn_lookup_handle(hdev, handle);
3294 if (!conn)
3295 continue;
3297 conn->sent -= block_count;
3299 switch (conn->type) {
3300 case ACL_LINK:
3301 case AMP_LINK:
3302 hdev->block_cnt += block_count;
3303 if (hdev->block_cnt > hdev->num_blocks)
3304 hdev->block_cnt = hdev->num_blocks;
3305 break;
3307 default:
3308 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3309 break;
3313 queue_work(hdev->workqueue, &hdev->tx_work);
3316 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3318 struct hci_ev_mode_change *ev = (void *) skb->data;
3319 struct hci_conn *conn;
3321 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3323 hci_dev_lock(hdev);
3325 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3326 if (conn) {
3327 conn->mode = ev->mode;
3329 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3330 &conn->flags)) {
3331 if (conn->mode == HCI_CM_ACTIVE)
3332 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3333 else
3334 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3337 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3338 hci_sco_setup(conn, ev->status);
3341 hci_dev_unlock(hdev);
3344 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3346 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3347 struct hci_conn *conn;
3349 BT_DBG("%s", hdev->name);
3351 hci_dev_lock(hdev);
3353 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3354 if (!conn)
3355 goto unlock;
3357 if (conn->state == BT_CONNECTED) {
3358 hci_conn_hold(conn);
3359 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3360 hci_conn_drop(conn);
3363 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3364 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3365 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3366 sizeof(ev->bdaddr), &ev->bdaddr);
3367 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3368 u8 secure;
3370 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3371 secure = 1;
3372 else
3373 secure = 0;
3375 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3378 unlock:
3379 hci_dev_unlock(hdev);
3382 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3384 if (key_type == HCI_LK_CHANGED_COMBINATION)
3385 return;
3387 conn->pin_length = pin_len;
3388 conn->key_type = key_type;
3390 switch (key_type) {
3391 case HCI_LK_LOCAL_UNIT:
3392 case HCI_LK_REMOTE_UNIT:
3393 case HCI_LK_DEBUG_COMBINATION:
3394 return;
3395 case HCI_LK_COMBINATION:
3396 if (pin_len == 16)
3397 conn->pending_sec_level = BT_SECURITY_HIGH;
3398 else
3399 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3400 break;
3401 case HCI_LK_UNAUTH_COMBINATION_P192:
3402 case HCI_LK_UNAUTH_COMBINATION_P256:
3403 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3404 break;
3405 case HCI_LK_AUTH_COMBINATION_P192:
3406 conn->pending_sec_level = BT_SECURITY_HIGH;
3407 break;
3408 case HCI_LK_AUTH_COMBINATION_P256:
3409 conn->pending_sec_level = BT_SECURITY_FIPS;
3410 break;
3414 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3416 struct hci_ev_link_key_req *ev = (void *) skb->data;
3417 struct hci_cp_link_key_reply cp;
3418 struct hci_conn *conn;
3419 struct link_key *key;
3421 BT_DBG("%s", hdev->name);
3423 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3424 return;
3426 hci_dev_lock(hdev);
3428 key = hci_find_link_key(hdev, &ev->bdaddr);
3429 if (!key) {
3430 BT_DBG("%s link key not found for %pMR", hdev->name,
3431 &ev->bdaddr);
3432 goto not_found;
3435 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3436 &ev->bdaddr);
3438 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3439 if (conn) {
3440 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3442 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3443 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3444 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3445 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3446 goto not_found;
3449 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3450 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3451 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3452 BT_DBG("%s ignoring key unauthenticated for high security",
3453 hdev->name);
3454 goto not_found;
3457 conn_set_key(conn, key->type, key->pin_len);
3460 bacpy(&cp.bdaddr, &ev->bdaddr);
3461 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3463 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3465 hci_dev_unlock(hdev);
3467 return;
3469 not_found:
3470 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3471 hci_dev_unlock(hdev);
3474 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3476 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3477 struct hci_conn *conn;
3478 struct link_key *key;
3479 bool persistent;
3480 u8 pin_len = 0;
3482 BT_DBG("%s", hdev->name);
3484 hci_dev_lock(hdev);
3486 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3487 if (!conn)
3488 goto unlock;
3490 hci_conn_hold(conn);
3491 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3492 hci_conn_drop(conn);
3494 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3495 conn_set_key(conn, ev->key_type, conn->pin_length);
3497 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3498 goto unlock;
3500 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3501 ev->key_type, pin_len, &persistent);
3502 if (!key)
3503 goto unlock;
3505 /* Update connection information since adding the key will have
3506 * fixed up the type in the case of changed combination keys.
3508 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3509 conn_set_key(conn, key->type, key->pin_len);
3511 mgmt_new_link_key(hdev, key, persistent);
3513 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3514 * is set. If it's not set simply remove the key from the kernel
3515 * list (we've still notified user space about it but with
3516 * store_hint being 0).
3518 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3519 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3520 list_del_rcu(&key->list);
3521 kfree_rcu(key, rcu);
3522 goto unlock;
3525 if (persistent)
3526 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3527 else
3528 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3530 unlock:
3531 hci_dev_unlock(hdev);
3534 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3536 struct hci_ev_clock_offset *ev = (void *) skb->data;
3537 struct hci_conn *conn;
3539 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3541 hci_dev_lock(hdev);
3543 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3544 if (conn && !ev->status) {
3545 struct inquiry_entry *ie;
3547 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3548 if (ie) {
3549 ie->data.clock_offset = ev->clock_offset;
3550 ie->timestamp = jiffies;
3554 hci_dev_unlock(hdev);
3557 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3559 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3560 struct hci_conn *conn;
3562 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3564 hci_dev_lock(hdev);
3566 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3567 if (conn && !ev->status)
3568 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3570 hci_dev_unlock(hdev);
3573 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3575 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3576 struct inquiry_entry *ie;
3578 BT_DBG("%s", hdev->name);
3580 hci_dev_lock(hdev);
3582 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3583 if (ie) {
3584 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3585 ie->timestamp = jiffies;
3588 hci_dev_unlock(hdev);
3591 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3592 struct sk_buff *skb)
3594 struct inquiry_data data;
3595 int num_rsp = *((__u8 *) skb->data);
3597 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3599 if (!num_rsp)
3600 return;
3602 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3603 return;
3605 hci_dev_lock(hdev);
3607 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3608 struct inquiry_info_with_rssi_and_pscan_mode *info;
3609 info = (void *) (skb->data + 1);
3611 for (; num_rsp; num_rsp--, info++) {
3612 u32 flags;
3614 bacpy(&data.bdaddr, &info->bdaddr);
3615 data.pscan_rep_mode = info->pscan_rep_mode;
3616 data.pscan_period_mode = info->pscan_period_mode;
3617 data.pscan_mode = info->pscan_mode;
3618 memcpy(data.dev_class, info->dev_class, 3);
3619 data.clock_offset = info->clock_offset;
3620 data.rssi = info->rssi;
3621 data.ssp_mode = 0x00;
3623 flags = hci_inquiry_cache_update(hdev, &data, false);
3625 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3626 info->dev_class, info->rssi,
3627 flags, NULL, 0, NULL, 0);
3629 } else {
3630 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3632 for (; num_rsp; num_rsp--, info++) {
3633 u32 flags;
3635 bacpy(&data.bdaddr, &info->bdaddr);
3636 data.pscan_rep_mode = info->pscan_rep_mode;
3637 data.pscan_period_mode = info->pscan_period_mode;
3638 data.pscan_mode = 0x00;
3639 memcpy(data.dev_class, info->dev_class, 3);
3640 data.clock_offset = info->clock_offset;
3641 data.rssi = info->rssi;
3642 data.ssp_mode = 0x00;
3644 flags = hci_inquiry_cache_update(hdev, &data, false);
3646 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3647 info->dev_class, info->rssi,
3648 flags, NULL, 0, NULL, 0);
3652 hci_dev_unlock(hdev);
3655 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3656 struct sk_buff *skb)
3658 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3659 struct hci_conn *conn;
3661 BT_DBG("%s", hdev->name);
3663 hci_dev_lock(hdev);
3665 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3666 if (!conn)
3667 goto unlock;
3669 if (ev->page < HCI_MAX_PAGES)
3670 memcpy(conn->features[ev->page], ev->features, 8);
3672 if (!ev->status && ev->page == 0x01) {
3673 struct inquiry_entry *ie;
3675 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3676 if (ie)
3677 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3679 if (ev->features[0] & LMP_HOST_SSP) {
3680 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3681 } else {
3682 /* It is mandatory by the Bluetooth specification that
3683 * Extended Inquiry Results are only used when Secure
3684 * Simple Pairing is enabled, but some devices violate
3685 * this.
3687 * To make these devices work, the internal SSP
3688 * enabled flag needs to be cleared if the remote host
3689 * features do not indicate SSP support */
3690 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3693 if (ev->features[0] & LMP_HOST_SC)
3694 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3697 if (conn->state != BT_CONFIG)
3698 goto unlock;
3700 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3701 struct hci_cp_remote_name_req cp;
3702 memset(&cp, 0, sizeof(cp));
3703 bacpy(&cp.bdaddr, &conn->dst);
3704 cp.pscan_rep_mode = 0x02;
3705 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3706 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3707 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3709 if (!hci_outgoing_auth_needed(hdev, conn)) {
3710 conn->state = BT_CONNECTED;
3711 hci_connect_cfm(conn, ev->status);
3712 hci_conn_drop(conn);
3715 unlock:
3716 hci_dev_unlock(hdev);
3719 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3720 struct sk_buff *skb)
3722 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3723 struct hci_conn *conn;
3725 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3727 hci_dev_lock(hdev);
3729 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3730 if (!conn) {
3731 if (ev->link_type == ESCO_LINK)
3732 goto unlock;
3734 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3735 if (!conn)
3736 goto unlock;
3738 conn->type = SCO_LINK;
3741 switch (ev->status) {
3742 case 0x00:
3743 conn->handle = __le16_to_cpu(ev->handle);
3744 conn->state = BT_CONNECTED;
3746 hci_debugfs_create_conn(conn);
3747 hci_conn_add_sysfs(conn);
3748 break;
3750 case 0x10: /* Connection Accept Timeout */
3751 case 0x0d: /* Connection Rejected due to Limited Resources */
3752 case 0x11: /* Unsupported Feature or Parameter Value */
3753 case 0x1c: /* SCO interval rejected */
3754 case 0x1a: /* Unsupported Remote Feature */
3755 case 0x1f: /* Unspecified error */
3756 case 0x20: /* Unsupported LMP Parameter value */
3757 if (conn->out) {
3758 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3759 (hdev->esco_type & EDR_ESCO_MASK);
3760 if (hci_setup_sync(conn, conn->link->handle))
3761 goto unlock;
3763 /* fall through */
3765 default:
3766 conn->state = BT_CLOSED;
3767 break;
3770 hci_connect_cfm(conn, ev->status);
3771 if (ev->status)
3772 hci_conn_del(conn);
3774 unlock:
3775 hci_dev_unlock(hdev);
3778 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3780 size_t parsed = 0;
3782 while (parsed < eir_len) {
3783 u8 field_len = eir[0];
3785 if (field_len == 0)
3786 return parsed;
3788 parsed += field_len + 1;
3789 eir += field_len + 1;
3792 return eir_len;
3795 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3796 struct sk_buff *skb)
3798 struct inquiry_data data;
3799 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3800 int num_rsp = *((__u8 *) skb->data);
3801 size_t eir_len;
3803 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3805 if (!num_rsp)
3806 return;
3808 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3809 return;
3811 hci_dev_lock(hdev);
3813 for (; num_rsp; num_rsp--, info++) {
3814 u32 flags;
3815 bool name_known;
3817 bacpy(&data.bdaddr, &info->bdaddr);
3818 data.pscan_rep_mode = info->pscan_rep_mode;
3819 data.pscan_period_mode = info->pscan_period_mode;
3820 data.pscan_mode = 0x00;
3821 memcpy(data.dev_class, info->dev_class, 3);
3822 data.clock_offset = info->clock_offset;
3823 data.rssi = info->rssi;
3824 data.ssp_mode = 0x01;
3826 if (hci_dev_test_flag(hdev, HCI_MGMT))
3827 name_known = eir_has_data_type(info->data,
3828 sizeof(info->data),
3829 EIR_NAME_COMPLETE);
3830 else
3831 name_known = true;
3833 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3835 eir_len = eir_get_length(info->data, sizeof(info->data));
3837 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3838 info->dev_class, info->rssi,
3839 flags, info->data, eir_len, NULL, 0);
3842 hci_dev_unlock(hdev);
3845 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3846 struct sk_buff *skb)
3848 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3849 struct hci_conn *conn;
3851 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3852 __le16_to_cpu(ev->handle));
3854 hci_dev_lock(hdev);
3856 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3857 if (!conn)
3858 goto unlock;
3860 /* For BR/EDR the necessary steps are taken through the
3861 * auth_complete event.
3863 if (conn->type != LE_LINK)
3864 goto unlock;
3866 if (!ev->status)
3867 conn->sec_level = conn->pending_sec_level;
3869 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3871 if (ev->status && conn->state == BT_CONNECTED) {
3872 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3873 hci_conn_drop(conn);
3874 goto unlock;
3877 if (conn->state == BT_CONFIG) {
3878 if (!ev->status)
3879 conn->state = BT_CONNECTED;
3881 hci_connect_cfm(conn, ev->status);
3882 hci_conn_drop(conn);
3883 } else {
3884 hci_auth_cfm(conn, ev->status);
3886 hci_conn_hold(conn);
3887 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3888 hci_conn_drop(conn);
3891 unlock:
3892 hci_dev_unlock(hdev);
3895 static u8 hci_get_auth_req(struct hci_conn *conn)
3897 /* If remote requests no-bonding follow that lead */
3898 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3899 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3900 return conn->remote_auth | (conn->auth_type & 0x01);
3902 /* If both remote and local have enough IO capabilities, require
3903 * MITM protection
3905 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3906 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3907 return conn->remote_auth | 0x01;
3909 /* No MITM protection possible so ignore remote requirement */
3910 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3913 static u8 bredr_oob_data_present(struct hci_conn *conn)
3915 struct hci_dev *hdev = conn->hdev;
3916 struct oob_data *data;
3918 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3919 if (!data)
3920 return 0x00;
3922 if (bredr_sc_enabled(hdev)) {
3923 /* When Secure Connections is enabled, then just
3924 * return the present value stored with the OOB
3925 * data. The stored value contains the right present
3926 * information. However it can only be trusted when
3927 * not in Secure Connection Only mode.
3929 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3930 return data->present;
3932 /* When Secure Connections Only mode is enabled, then
3933 * the P-256 values are required. If they are not
3934 * available, then do not declare that OOB data is
3935 * present.
3937 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3938 !memcmp(data->hash256, ZERO_KEY, 16))
3939 return 0x00;
3941 return 0x02;
3944 /* When Secure Connections is not enabled or actually
3945 * not supported by the hardware, then check that if
3946 * P-192 data values are present.
3948 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3949 !memcmp(data->hash192, ZERO_KEY, 16))
3950 return 0x00;
3952 return 0x01;
3955 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3957 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3958 struct hci_conn *conn;
3960 BT_DBG("%s", hdev->name);
3962 hci_dev_lock(hdev);
3964 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3965 if (!conn)
3966 goto unlock;
3968 hci_conn_hold(conn);
3970 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3971 goto unlock;
3973 /* Allow pairing if we're pairable, the initiators of the
3974 * pairing or if the remote is not requesting bonding.
3976 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
3977 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3978 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3979 struct hci_cp_io_capability_reply cp;
3981 bacpy(&cp.bdaddr, &ev->bdaddr);
3982 /* Change the IO capability from KeyboardDisplay
3983 * to DisplayYesNo as it is not supported by BT spec. */
3984 cp.capability = (conn->io_capability == 0x04) ?
3985 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3987 /* If we are initiators, there is no remote information yet */
3988 if (conn->remote_auth == 0xff) {
3989 /* Request MITM protection if our IO caps allow it
3990 * except for the no-bonding case.
3992 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3993 conn->auth_type != HCI_AT_NO_BONDING)
3994 conn->auth_type |= 0x01;
3995 } else {
3996 conn->auth_type = hci_get_auth_req(conn);
3999 /* If we're not bondable, force one of the non-bondable
4000 * authentication requirement values.
4002 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4003 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4005 cp.authentication = conn->auth_type;
4006 cp.oob_data = bredr_oob_data_present(conn);
4008 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4009 sizeof(cp), &cp);
4010 } else {
4011 struct hci_cp_io_capability_neg_reply cp;
4013 bacpy(&cp.bdaddr, &ev->bdaddr);
4014 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4016 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4017 sizeof(cp), &cp);
4020 unlock:
4021 hci_dev_unlock(hdev);
4024 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4026 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4027 struct hci_conn *conn;
4029 BT_DBG("%s", hdev->name);
4031 hci_dev_lock(hdev);
4033 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4034 if (!conn)
4035 goto unlock;
4037 conn->remote_cap = ev->capability;
4038 conn->remote_auth = ev->authentication;
4040 unlock:
4041 hci_dev_unlock(hdev);
4044 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4045 struct sk_buff *skb)
4047 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4048 int loc_mitm, rem_mitm, confirm_hint = 0;
4049 struct hci_conn *conn;
4051 BT_DBG("%s", hdev->name);
4053 hci_dev_lock(hdev);
4055 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4056 goto unlock;
4058 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4059 if (!conn)
4060 goto unlock;
4062 loc_mitm = (conn->auth_type & 0x01);
4063 rem_mitm = (conn->remote_auth & 0x01);
4065 /* If we require MITM but the remote device can't provide that
4066 * (it has NoInputNoOutput) then reject the confirmation
4067 * request. We check the security level here since it doesn't
4068 * necessarily match conn->auth_type.
4070 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4071 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4072 BT_DBG("Rejecting request: remote device can't provide MITM");
4073 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4074 sizeof(ev->bdaddr), &ev->bdaddr);
4075 goto unlock;
4078 /* If no side requires MITM protection; auto-accept */
4079 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4080 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4082 /* If we're not the initiators request authorization to
4083 * proceed from user space (mgmt_user_confirm with
4084 * confirm_hint set to 1). The exception is if neither
4085 * side had MITM or if the local IO capability is
4086 * NoInputNoOutput, in which case we do auto-accept
4088 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4089 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4090 (loc_mitm || rem_mitm)) {
4091 BT_DBG("Confirming auto-accept as acceptor");
4092 confirm_hint = 1;
4093 goto confirm;
4096 BT_DBG("Auto-accept of user confirmation with %ums delay",
4097 hdev->auto_accept_delay);
4099 if (hdev->auto_accept_delay > 0) {
4100 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4101 queue_delayed_work(conn->hdev->workqueue,
4102 &conn->auto_accept_work, delay);
4103 goto unlock;
4106 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4107 sizeof(ev->bdaddr), &ev->bdaddr);
4108 goto unlock;
4111 confirm:
4112 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4113 le32_to_cpu(ev->passkey), confirm_hint);
4115 unlock:
4116 hci_dev_unlock(hdev);
4119 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4120 struct sk_buff *skb)
4122 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4124 BT_DBG("%s", hdev->name);
4126 if (hci_dev_test_flag(hdev, HCI_MGMT))
4127 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4130 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4131 struct sk_buff *skb)
4133 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4134 struct hci_conn *conn;
4136 BT_DBG("%s", hdev->name);
4138 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4139 if (!conn)
4140 return;
4142 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4143 conn->passkey_entered = 0;
4145 if (hci_dev_test_flag(hdev, HCI_MGMT))
4146 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4147 conn->dst_type, conn->passkey_notify,
4148 conn->passkey_entered);
4151 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4153 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4154 struct hci_conn *conn;
4156 BT_DBG("%s", hdev->name);
4158 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4159 if (!conn)
4160 return;
4162 switch (ev->type) {
4163 case HCI_KEYPRESS_STARTED:
4164 conn->passkey_entered = 0;
4165 return;
4167 case HCI_KEYPRESS_ENTERED:
4168 conn->passkey_entered++;
4169 break;
4171 case HCI_KEYPRESS_ERASED:
4172 conn->passkey_entered--;
4173 break;
4175 case HCI_KEYPRESS_CLEARED:
4176 conn->passkey_entered = 0;
4177 break;
4179 case HCI_KEYPRESS_COMPLETED:
4180 return;
4183 if (hci_dev_test_flag(hdev, HCI_MGMT))
4184 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4185 conn->dst_type, conn->passkey_notify,
4186 conn->passkey_entered);
4189 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4190 struct sk_buff *skb)
4192 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4193 struct hci_conn *conn;
4195 BT_DBG("%s", hdev->name);
4197 hci_dev_lock(hdev);
4199 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4200 if (!conn)
4201 goto unlock;
4203 /* Reset the authentication requirement to unknown */
4204 conn->remote_auth = 0xff;
4206 /* To avoid duplicate auth_failed events to user space we check
4207 * the HCI_CONN_AUTH_PEND flag which will be set if we
4208 * initiated the authentication. A traditional auth_complete
4209 * event gets always produced as initiator and is also mapped to
4210 * the mgmt_auth_failed event */
4211 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4212 mgmt_auth_failed(conn, ev->status);
4214 hci_conn_drop(conn);
4216 unlock:
4217 hci_dev_unlock(hdev);
4220 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4221 struct sk_buff *skb)
4223 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4224 struct inquiry_entry *ie;
4225 struct hci_conn *conn;
4227 BT_DBG("%s", hdev->name);
4229 hci_dev_lock(hdev);
4231 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4232 if (conn)
4233 memcpy(conn->features[1], ev->features, 8);
4235 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4236 if (ie)
4237 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4239 hci_dev_unlock(hdev);
4242 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4243 struct sk_buff *skb)
4245 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4246 struct oob_data *data;
4248 BT_DBG("%s", hdev->name);
4250 hci_dev_lock(hdev);
4252 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4253 goto unlock;
4255 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4256 if (!data) {
4257 struct hci_cp_remote_oob_data_neg_reply cp;
4259 bacpy(&cp.bdaddr, &ev->bdaddr);
4260 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4261 sizeof(cp), &cp);
4262 goto unlock;
4265 if (bredr_sc_enabled(hdev)) {
4266 struct hci_cp_remote_oob_ext_data_reply cp;
4268 bacpy(&cp.bdaddr, &ev->bdaddr);
4269 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4270 memset(cp.hash192, 0, sizeof(cp.hash192));
4271 memset(cp.rand192, 0, sizeof(cp.rand192));
4272 } else {
4273 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4274 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4276 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4277 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4279 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4280 sizeof(cp), &cp);
4281 } else {
4282 struct hci_cp_remote_oob_data_reply cp;
4284 bacpy(&cp.bdaddr, &ev->bdaddr);
4285 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4286 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4288 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4289 sizeof(cp), &cp);
4292 unlock:
4293 hci_dev_unlock(hdev);
4296 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4297 struct sk_buff *skb)
4299 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4300 struct hci_conn *hcon, *bredr_hcon;
4302 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4303 ev->status);
4305 hci_dev_lock(hdev);
4307 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4308 if (!hcon) {
4309 hci_dev_unlock(hdev);
4310 return;
4313 if (ev->status) {
4314 hci_conn_del(hcon);
4315 hci_dev_unlock(hdev);
4316 return;
4319 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4321 hcon->state = BT_CONNECTED;
4322 bacpy(&hcon->dst, &bredr_hcon->dst);
4324 hci_conn_hold(hcon);
4325 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4326 hci_conn_drop(hcon);
4328 hci_debugfs_create_conn(hcon);
4329 hci_conn_add_sysfs(hcon);
4331 amp_physical_cfm(bredr_hcon, hcon);
4333 hci_dev_unlock(hdev);
4336 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4338 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4339 struct hci_conn *hcon;
4340 struct hci_chan *hchan;
4341 struct amp_mgr *mgr;
4343 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4344 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4345 ev->status);
4347 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4348 if (!hcon)
4349 return;
4351 /* Create AMP hchan */
4352 hchan = hci_chan_create(hcon);
4353 if (!hchan)
4354 return;
4356 hchan->handle = le16_to_cpu(ev->handle);
4358 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4360 mgr = hcon->amp_mgr;
4361 if (mgr && mgr->bredr_chan) {
4362 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4364 l2cap_chan_lock(bredr_chan);
4366 bredr_chan->conn->mtu = hdev->block_mtu;
4367 l2cap_logical_cfm(bredr_chan, hchan, 0);
4368 hci_conn_hold(hcon);
4370 l2cap_chan_unlock(bredr_chan);
4374 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4375 struct sk_buff *skb)
4377 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4378 struct hci_chan *hchan;
4380 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4381 le16_to_cpu(ev->handle), ev->status);
4383 if (ev->status)
4384 return;
4386 hci_dev_lock(hdev);
4388 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4389 if (!hchan)
4390 goto unlock;
4392 amp_destroy_logical_link(hchan, ev->reason);
4394 unlock:
4395 hci_dev_unlock(hdev);
4398 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4399 struct sk_buff *skb)
4401 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4402 struct hci_conn *hcon;
4404 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4406 if (ev->status)
4407 return;
4409 hci_dev_lock(hdev);
4411 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4412 if (hcon) {
4413 hcon->state = BT_CLOSED;
4414 hci_conn_del(hcon);
4417 hci_dev_unlock(hdev);
4420 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4422 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4423 struct hci_conn_params *params;
4424 struct hci_conn *conn;
4425 struct smp_irk *irk;
4426 u8 addr_type;
4428 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4430 hci_dev_lock(hdev);
4432 /* All controllers implicitly stop advertising in the event of a
4433 * connection, so ensure that the state bit is cleared.
4435 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4437 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4438 if (!conn) {
4439 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4440 if (!conn) {
4441 BT_ERR("No memory for new connection");
4442 goto unlock;
4445 conn->dst_type = ev->bdaddr_type;
4447 /* If we didn't have a hci_conn object previously
4448 * but we're in master role this must be something
4449 * initiated using a white list. Since white list based
4450 * connections are not "first class citizens" we don't
4451 * have full tracking of them. Therefore, we go ahead
4452 * with a "best effort" approach of determining the
4453 * initiator address based on the HCI_PRIVACY flag.
4455 if (conn->out) {
4456 conn->resp_addr_type = ev->bdaddr_type;
4457 bacpy(&conn->resp_addr, &ev->bdaddr);
4458 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4459 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4460 bacpy(&conn->init_addr, &hdev->rpa);
4461 } else {
4462 hci_copy_identity_address(hdev,
4463 &conn->init_addr,
4464 &conn->init_addr_type);
4467 } else {
4468 cancel_delayed_work(&conn->le_conn_timeout);
4471 if (!conn->out) {
4472 /* Set the responder (our side) address type based on
4473 * the advertising address type.
4475 conn->resp_addr_type = hdev->adv_addr_type;
4476 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4477 bacpy(&conn->resp_addr, &hdev->random_addr);
4478 else
4479 bacpy(&conn->resp_addr, &hdev->bdaddr);
4481 conn->init_addr_type = ev->bdaddr_type;
4482 bacpy(&conn->init_addr, &ev->bdaddr);
4484 /* For incoming connections, set the default minimum
4485 * and maximum connection interval. They will be used
4486 * to check if the parameters are in range and if not
4487 * trigger the connection update procedure.
4489 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4490 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4493 /* Lookup the identity address from the stored connection
4494 * address and address type.
4496 * When establishing connections to an identity address, the
4497 * connection procedure will store the resolvable random
4498 * address first. Now if it can be converted back into the
4499 * identity address, start using the identity address from
4500 * now on.
4502 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4503 if (irk) {
4504 bacpy(&conn->dst, &irk->bdaddr);
4505 conn->dst_type = irk->addr_type;
4508 if (ev->status) {
4509 hci_le_conn_failed(conn, ev->status);
4510 goto unlock;
4513 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4514 addr_type = BDADDR_LE_PUBLIC;
4515 else
4516 addr_type = BDADDR_LE_RANDOM;
4518 /* Drop the connection if the device is blocked */
4519 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4520 hci_conn_drop(conn);
4521 goto unlock;
4524 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4525 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4527 conn->sec_level = BT_SECURITY_LOW;
4528 conn->handle = __le16_to_cpu(ev->handle);
4529 conn->state = BT_CONNECTED;
4531 conn->le_conn_interval = le16_to_cpu(ev->interval);
4532 conn->le_conn_latency = le16_to_cpu(ev->latency);
4533 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4535 hci_debugfs_create_conn(conn);
4536 hci_conn_add_sysfs(conn);
4538 hci_connect_cfm(conn, ev->status);
4540 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4541 conn->dst_type);
4542 if (params) {
4543 list_del_init(&params->action);
4544 if (params->conn) {
4545 hci_conn_drop(params->conn);
4546 hci_conn_put(params->conn);
4547 params->conn = NULL;
4551 unlock:
4552 hci_update_background_scan(hdev);
4553 hci_dev_unlock(hdev);
4556 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4557 struct sk_buff *skb)
4559 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4560 struct hci_conn *conn;
4562 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4564 if (ev->status)
4565 return;
4567 hci_dev_lock(hdev);
4569 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4570 if (conn) {
4571 conn->le_conn_interval = le16_to_cpu(ev->interval);
4572 conn->le_conn_latency = le16_to_cpu(ev->latency);
4573 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4576 hci_dev_unlock(hdev);
4579 /* This function requires the caller holds hdev->lock */
4580 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4581 bdaddr_t *addr,
4582 u8 addr_type, u8 adv_type)
4584 struct hci_conn *conn;
4585 struct hci_conn_params *params;
4587 /* If the event is not connectable don't proceed further */
4588 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4589 return NULL;
4591 /* Ignore if the device is blocked */
4592 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4593 return NULL;
4595 /* Most controller will fail if we try to create new connections
4596 * while we have an existing one in slave role.
4598 if (hdev->conn_hash.le_num_slave > 0)
4599 return NULL;
4601 /* If we're not connectable only connect devices that we have in
4602 * our pend_le_conns list.
4604 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4605 addr, addr_type);
4606 if (!params)
4607 return NULL;
4609 switch (params->auto_connect) {
4610 case HCI_AUTO_CONN_DIRECT:
4611 /* Only devices advertising with ADV_DIRECT_IND are
4612 * triggering a connection attempt. This is allowing
4613 * incoming connections from slave devices.
4615 if (adv_type != LE_ADV_DIRECT_IND)
4616 return NULL;
4617 break;
4618 case HCI_AUTO_CONN_ALWAYS:
4619 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4620 * are triggering a connection attempt. This means
4621 * that incoming connectioms from slave device are
4622 * accepted and also outgoing connections to slave
4623 * devices are established when found.
4625 break;
4626 default:
4627 return NULL;
4630 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4631 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4632 if (!IS_ERR(conn)) {
4633 /* Store the pointer since we don't really have any
4634 * other owner of the object besides the params that
4635 * triggered it. This way we can abort the connection if
4636 * the parameters get removed and keep the reference
4637 * count consistent once the connection is established.
4639 params->conn = hci_conn_get(conn);
4640 return conn;
4643 switch (PTR_ERR(conn)) {
4644 case -EBUSY:
4645 /* If hci_connect() returns -EBUSY it means there is already
4646 * an LE connection attempt going on. Since controllers don't
4647 * support more than one connection attempt at the time, we
4648 * don't consider this an error case.
4650 break;
4651 default:
4652 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4653 return NULL;
4656 return NULL;
4659 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4660 u8 bdaddr_type, bdaddr_t *direct_addr,
4661 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4663 struct discovery_state *d = &hdev->discovery;
4664 struct smp_irk *irk;
4665 struct hci_conn *conn;
4666 bool match;
4667 u32 flags;
4669 /* If the direct address is present, then this report is from
4670 * a LE Direct Advertising Report event. In that case it is
4671 * important to see if the address is matching the local
4672 * controller address.
4674 if (direct_addr) {
4675 /* Only resolvable random addresses are valid for these
4676 * kind of reports and others can be ignored.
4678 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4679 return;
4681 /* If the controller is not using resolvable random
4682 * addresses, then this report can be ignored.
4684 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4685 return;
4687 /* If the local IRK of the controller does not match
4688 * with the resolvable random address provided, then
4689 * this report can be ignored.
4691 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4692 return;
4695 /* Check if we need to convert to identity address */
4696 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4697 if (irk) {
4698 bdaddr = &irk->bdaddr;
4699 bdaddr_type = irk->addr_type;
4702 /* Check if we have been requested to connect to this device */
4703 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4704 if (conn && type == LE_ADV_IND) {
4705 /* Store report for later inclusion by
4706 * mgmt_device_connected
4708 memcpy(conn->le_adv_data, data, len);
4709 conn->le_adv_data_len = len;
4712 /* Passive scanning shouldn't trigger any device found events,
4713 * except for devices marked as CONN_REPORT for which we do send
4714 * device found events.
4716 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4717 if (type == LE_ADV_DIRECT_IND)
4718 return;
4720 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4721 bdaddr, bdaddr_type))
4722 return;
4724 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4725 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4726 else
4727 flags = 0;
4728 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4729 rssi, flags, data, len, NULL, 0);
4730 return;
4733 /* When receiving non-connectable or scannable undirected
4734 * advertising reports, this means that the remote device is
4735 * not connectable and then clearly indicate this in the
4736 * device found event.
4738 * When receiving a scan response, then there is no way to
4739 * know if the remote device is connectable or not. However
4740 * since scan responses are merged with a previously seen
4741 * advertising report, the flags field from that report
4742 * will be used.
4744 * In the really unlikely case that a controller get confused
4745 * and just sends a scan response event, then it is marked as
4746 * not connectable as well.
4748 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4749 type == LE_ADV_SCAN_RSP)
4750 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4751 else
4752 flags = 0;
4754 /* If there's nothing pending either store the data from this
4755 * event or send an immediate device found event if the data
4756 * should not be stored for later.
4758 if (!has_pending_adv_report(hdev)) {
4759 /* If the report will trigger a SCAN_REQ store it for
4760 * later merging.
4762 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4763 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4764 rssi, flags, data, len);
4765 return;
4768 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4769 rssi, flags, data, len, NULL, 0);
4770 return;
4773 /* Check if the pending report is for the same device as the new one */
4774 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4775 bdaddr_type == d->last_adv_addr_type);
4777 /* If the pending data doesn't match this report or this isn't a
4778 * scan response (e.g. we got a duplicate ADV_IND) then force
4779 * sending of the pending data.
4781 if (type != LE_ADV_SCAN_RSP || !match) {
4782 /* Send out whatever is in the cache, but skip duplicates */
4783 if (!match)
4784 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4785 d->last_adv_addr_type, NULL,
4786 d->last_adv_rssi, d->last_adv_flags,
4787 d->last_adv_data,
4788 d->last_adv_data_len, NULL, 0);
4790 /* If the new report will trigger a SCAN_REQ store it for
4791 * later merging.
4793 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4794 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4795 rssi, flags, data, len);
4796 return;
4799 /* The advertising reports cannot be merged, so clear
4800 * the pending report and send out a device found event.
4802 clear_pending_adv_report(hdev);
4803 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4804 rssi, flags, data, len, NULL, 0);
4805 return;
4808 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4809 * the new event is a SCAN_RSP. We can therefore proceed with
4810 * sending a merged device found event.
4812 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4813 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4814 d->last_adv_data, d->last_adv_data_len, data, len);
4815 clear_pending_adv_report(hdev);
4818 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4820 u8 num_reports = skb->data[0];
4821 void *ptr = &skb->data[1];
4823 hci_dev_lock(hdev);
4825 while (num_reports--) {
4826 struct hci_ev_le_advertising_info *ev = ptr;
4827 s8 rssi;
4829 rssi = ev->data[ev->length];
4830 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4831 ev->bdaddr_type, NULL, 0, rssi,
4832 ev->data, ev->length);
4834 ptr += sizeof(*ev) + ev->length + 1;
4837 hci_dev_unlock(hdev);
4840 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4842 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4843 struct hci_cp_le_ltk_reply cp;
4844 struct hci_cp_le_ltk_neg_reply neg;
4845 struct hci_conn *conn;
4846 struct smp_ltk *ltk;
4848 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4850 hci_dev_lock(hdev);
4852 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4853 if (conn == NULL)
4854 goto not_found;
4856 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4857 if (!ltk)
4858 goto not_found;
4860 if (smp_ltk_is_sc(ltk)) {
4861 /* With SC both EDiv and Rand are set to zero */
4862 if (ev->ediv || ev->rand)
4863 goto not_found;
4864 } else {
4865 /* For non-SC keys check that EDiv and Rand match */
4866 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4867 goto not_found;
4870 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4871 cp.handle = cpu_to_le16(conn->handle);
4873 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4875 conn->enc_key_size = ltk->enc_size;
4877 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4879 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4880 * temporary key used to encrypt a connection following
4881 * pairing. It is used during the Encrypted Session Setup to
4882 * distribute the keys. Later, security can be re-established
4883 * using a distributed LTK.
4885 if (ltk->type == SMP_STK) {
4886 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4887 list_del_rcu(&ltk->list);
4888 kfree_rcu(ltk, rcu);
4889 } else {
4890 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4893 hci_dev_unlock(hdev);
4895 return;
4897 not_found:
4898 neg.handle = ev->handle;
4899 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4900 hci_dev_unlock(hdev);
4903 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4904 u8 reason)
4906 struct hci_cp_le_conn_param_req_neg_reply cp;
4908 cp.handle = cpu_to_le16(handle);
4909 cp.reason = reason;
4911 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4912 &cp);
4915 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4916 struct sk_buff *skb)
4918 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4919 struct hci_cp_le_conn_param_req_reply cp;
4920 struct hci_conn *hcon;
4921 u16 handle, min, max, latency, timeout;
4923 handle = le16_to_cpu(ev->handle);
4924 min = le16_to_cpu(ev->interval_min);
4925 max = le16_to_cpu(ev->interval_max);
4926 latency = le16_to_cpu(ev->latency);
4927 timeout = le16_to_cpu(ev->timeout);
4929 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4930 if (!hcon || hcon->state != BT_CONNECTED)
4931 return send_conn_param_neg_reply(hdev, handle,
4932 HCI_ERROR_UNKNOWN_CONN_ID);
4934 if (hci_check_conn_params(min, max, latency, timeout))
4935 return send_conn_param_neg_reply(hdev, handle,
4936 HCI_ERROR_INVALID_LL_PARAMS);
4938 if (hcon->role == HCI_ROLE_MASTER) {
4939 struct hci_conn_params *params;
4940 u8 store_hint;
4942 hci_dev_lock(hdev);
4944 params = hci_conn_params_lookup(hdev, &hcon->dst,
4945 hcon->dst_type);
4946 if (params) {
4947 params->conn_min_interval = min;
4948 params->conn_max_interval = max;
4949 params->conn_latency = latency;
4950 params->supervision_timeout = timeout;
4951 store_hint = 0x01;
4952 } else{
4953 store_hint = 0x00;
4956 hci_dev_unlock(hdev);
4958 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4959 store_hint, min, max, latency, timeout);
4962 cp.handle = ev->handle;
4963 cp.interval_min = ev->interval_min;
4964 cp.interval_max = ev->interval_max;
4965 cp.latency = ev->latency;
4966 cp.timeout = ev->timeout;
4967 cp.min_ce_len = 0;
4968 cp.max_ce_len = 0;
4970 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4973 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
4974 struct sk_buff *skb)
4976 u8 num_reports = skb->data[0];
4977 void *ptr = &skb->data[1];
4979 hci_dev_lock(hdev);
4981 while (num_reports--) {
4982 struct hci_ev_le_direct_adv_info *ev = ptr;
4984 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4985 ev->bdaddr_type, &ev->direct_addr,
4986 ev->direct_addr_type, ev->rssi, NULL, 0);
4988 ptr += sizeof(*ev);
4991 hci_dev_unlock(hdev);
4994 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4996 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4998 skb_pull(skb, sizeof(*le_ev));
5000 switch (le_ev->subevent) {
5001 case HCI_EV_LE_CONN_COMPLETE:
5002 hci_le_conn_complete_evt(hdev, skb);
5003 break;
5005 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5006 hci_le_conn_update_complete_evt(hdev, skb);
5007 break;
5009 case HCI_EV_LE_ADVERTISING_REPORT:
5010 hci_le_adv_report_evt(hdev, skb);
5011 break;
5013 case HCI_EV_LE_LTK_REQ:
5014 hci_le_ltk_request_evt(hdev, skb);
5015 break;
5017 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5018 hci_le_remote_conn_param_req_evt(hdev, skb);
5019 break;
5021 case HCI_EV_LE_DIRECT_ADV_REPORT:
5022 hci_le_direct_adv_report_evt(hdev, skb);
5023 break;
5025 default:
5026 break;
5030 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5032 struct hci_ev_channel_selected *ev = (void *) skb->data;
5033 struct hci_conn *hcon;
5035 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5037 skb_pull(skb, sizeof(*ev));
5039 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5040 if (!hcon)
5041 return;
5043 amp_read_loc_assoc_final_data(hdev, hcon);
5046 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5047 u8 event, struct sk_buff *skb)
5049 struct hci_ev_cmd_complete *ev;
5050 struct hci_event_hdr *hdr;
5052 if (!skb)
5053 return false;
5055 if (skb->len < sizeof(*hdr)) {
5056 BT_ERR("Too short HCI event");
5057 return false;
5060 hdr = (void *) skb->data;
5061 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5063 if (event) {
5064 if (hdr->evt != event)
5065 return false;
5066 return true;
5069 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5070 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5071 return false;
5074 if (skb->len < sizeof(*ev)) {
5075 BT_ERR("Too short cmd_complete event");
5076 return false;
5079 ev = (void *) skb->data;
5080 skb_pull(skb, sizeof(*ev));
5082 if (opcode != __le16_to_cpu(ev->opcode)) {
5083 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5084 __le16_to_cpu(ev->opcode));
5085 return false;
5088 return true;
5091 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5093 struct hci_event_hdr *hdr = (void *) skb->data;
5094 hci_req_complete_t req_complete = NULL;
5095 hci_req_complete_skb_t req_complete_skb = NULL;
5096 struct sk_buff *orig_skb = NULL;
5097 u8 status = 0, event = hdr->evt, req_evt = 0;
5098 u16 opcode = HCI_OP_NOP;
5100 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
5101 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5102 opcode = __le16_to_cpu(cmd_hdr->opcode);
5103 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5104 &req_complete_skb);
5105 req_evt = event;
5108 /* If it looks like we might end up having to call
5109 * req_complete_skb, store a pristine copy of the skb since the
5110 * various handlers may modify the original one through
5111 * skb_pull() calls, etc.
5113 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5114 event == HCI_EV_CMD_COMPLETE)
5115 orig_skb = skb_clone(skb, GFP_KERNEL);
5117 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5119 switch (event) {
5120 case HCI_EV_INQUIRY_COMPLETE:
5121 hci_inquiry_complete_evt(hdev, skb);
5122 break;
5124 case HCI_EV_INQUIRY_RESULT:
5125 hci_inquiry_result_evt(hdev, skb);
5126 break;
5128 case HCI_EV_CONN_COMPLETE:
5129 hci_conn_complete_evt(hdev, skb);
5130 break;
5132 case HCI_EV_CONN_REQUEST:
5133 hci_conn_request_evt(hdev, skb);
5134 break;
5136 case HCI_EV_DISCONN_COMPLETE:
5137 hci_disconn_complete_evt(hdev, skb);
5138 break;
5140 case HCI_EV_AUTH_COMPLETE:
5141 hci_auth_complete_evt(hdev, skb);
5142 break;
5144 case HCI_EV_REMOTE_NAME:
5145 hci_remote_name_evt(hdev, skb);
5146 break;
5148 case HCI_EV_ENCRYPT_CHANGE:
5149 hci_encrypt_change_evt(hdev, skb);
5150 break;
5152 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5153 hci_change_link_key_complete_evt(hdev, skb);
5154 break;
5156 case HCI_EV_REMOTE_FEATURES:
5157 hci_remote_features_evt(hdev, skb);
5158 break;
5160 case HCI_EV_CMD_COMPLETE:
5161 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5162 &req_complete, &req_complete_skb);
5163 break;
5165 case HCI_EV_CMD_STATUS:
5166 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5167 &req_complete_skb);
5168 break;
5170 case HCI_EV_HARDWARE_ERROR:
5171 hci_hardware_error_evt(hdev, skb);
5172 break;
5174 case HCI_EV_ROLE_CHANGE:
5175 hci_role_change_evt(hdev, skb);
5176 break;
5178 case HCI_EV_NUM_COMP_PKTS:
5179 hci_num_comp_pkts_evt(hdev, skb);
5180 break;
5182 case HCI_EV_MODE_CHANGE:
5183 hci_mode_change_evt(hdev, skb);
5184 break;
5186 case HCI_EV_PIN_CODE_REQ:
5187 hci_pin_code_request_evt(hdev, skb);
5188 break;
5190 case HCI_EV_LINK_KEY_REQ:
5191 hci_link_key_request_evt(hdev, skb);
5192 break;
5194 case HCI_EV_LINK_KEY_NOTIFY:
5195 hci_link_key_notify_evt(hdev, skb);
5196 break;
5198 case HCI_EV_CLOCK_OFFSET:
5199 hci_clock_offset_evt(hdev, skb);
5200 break;
5202 case HCI_EV_PKT_TYPE_CHANGE:
5203 hci_pkt_type_change_evt(hdev, skb);
5204 break;
5206 case HCI_EV_PSCAN_REP_MODE:
5207 hci_pscan_rep_mode_evt(hdev, skb);
5208 break;
5210 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5211 hci_inquiry_result_with_rssi_evt(hdev, skb);
5212 break;
5214 case HCI_EV_REMOTE_EXT_FEATURES:
5215 hci_remote_ext_features_evt(hdev, skb);
5216 break;
5218 case HCI_EV_SYNC_CONN_COMPLETE:
5219 hci_sync_conn_complete_evt(hdev, skb);
5220 break;
5222 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5223 hci_extended_inquiry_result_evt(hdev, skb);
5224 break;
5226 case HCI_EV_KEY_REFRESH_COMPLETE:
5227 hci_key_refresh_complete_evt(hdev, skb);
5228 break;
5230 case HCI_EV_IO_CAPA_REQUEST:
5231 hci_io_capa_request_evt(hdev, skb);
5232 break;
5234 case HCI_EV_IO_CAPA_REPLY:
5235 hci_io_capa_reply_evt(hdev, skb);
5236 break;
5238 case HCI_EV_USER_CONFIRM_REQUEST:
5239 hci_user_confirm_request_evt(hdev, skb);
5240 break;
5242 case HCI_EV_USER_PASSKEY_REQUEST:
5243 hci_user_passkey_request_evt(hdev, skb);
5244 break;
5246 case HCI_EV_USER_PASSKEY_NOTIFY:
5247 hci_user_passkey_notify_evt(hdev, skb);
5248 break;
5250 case HCI_EV_KEYPRESS_NOTIFY:
5251 hci_keypress_notify_evt(hdev, skb);
5252 break;
5254 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5255 hci_simple_pair_complete_evt(hdev, skb);
5256 break;
5258 case HCI_EV_REMOTE_HOST_FEATURES:
5259 hci_remote_host_features_evt(hdev, skb);
5260 break;
5262 case HCI_EV_LE_META:
5263 hci_le_meta_evt(hdev, skb);
5264 break;
5266 case HCI_EV_CHANNEL_SELECTED:
5267 hci_chan_selected_evt(hdev, skb);
5268 break;
5270 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5271 hci_remote_oob_data_request_evt(hdev, skb);
5272 break;
5274 case HCI_EV_PHY_LINK_COMPLETE:
5275 hci_phy_link_complete_evt(hdev, skb);
5276 break;
5278 case HCI_EV_LOGICAL_LINK_COMPLETE:
5279 hci_loglink_complete_evt(hdev, skb);
5280 break;
5282 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5283 hci_disconn_loglink_complete_evt(hdev, skb);
5284 break;
5286 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5287 hci_disconn_phylink_complete_evt(hdev, skb);
5288 break;
5290 case HCI_EV_NUM_COMP_BLOCKS:
5291 hci_num_comp_blocks_evt(hdev, skb);
5292 break;
5294 default:
5295 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5296 break;
5299 if (req_complete) {
5300 req_complete(hdev, status, opcode);
5301 } else if (req_complete_skb) {
5302 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5303 kfree_skb(orig_skb);
5304 orig_skb = NULL;
5306 req_complete_skb(hdev, status, opcode, orig_skb);
5309 kfree_skb(orig_skb);
5310 kfree_skb(skb);
5311 hdev->stat.evt_rx++;