mmc: sdhci: fix caps2 for HS200
[linux-2.6.git] / net / bluetooth / hci_event.c
blobb93cd2eb5d58f1d2cfa9f8d0ea81967166747eca
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
35 /* Handle HCI Event packets */
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39 __u8 status = *((__u8 *) skb->data);
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
43 if (status) {
44 hci_dev_lock(hdev);
45 mgmt_stop_discovery_failed(hdev, status);
46 hci_dev_unlock(hdev);
47 return;
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
52 wake_up_bit(&hdev->flags, HCI_INQUIRY);
54 hci_dev_lock(hdev);
55 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
56 hci_dev_unlock(hdev);
58 hci_conn_check_pending(hdev);
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
63 __u8 status = *((__u8 *) skb->data);
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
67 if (status)
68 return;
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
75 __u8 status = *((__u8 *) skb->data);
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
79 if (status)
80 return;
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
84 hci_conn_check_pending(hdev);
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 struct sk_buff *skb)
90 BT_DBG("%s", hdev->name);
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
100 if (rp->status)
101 return;
103 hci_dev_lock(hdev);
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
113 hci_dev_unlock(hdev);
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
123 if (rp->status)
124 return;
126 hci_dev_lock(hdev);
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
132 hci_dev_unlock(hdev);
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
143 if (rp->status)
144 return;
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
150 hci_dev_lock(hdev);
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
156 hci_dev_unlock(hdev);
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct sk_buff *skb)
162 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
166 if (rp->status)
167 return;
169 hdev->link_policy = __le16_to_cpu(rp->policy);
172 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 struct sk_buff *skb)
175 __u8 status = *((__u8 *) skb->data);
176 void *sent;
178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
180 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 if (!sent)
182 return;
184 if (!status)
185 hdev->link_policy = get_unaligned_le16(sent);
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
190 __u8 status = *((__u8 *) skb->data);
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
194 clear_bit(HCI_RESET, &hdev->flags);
196 /* Reset all non-persistent flags */
197 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
199 hdev->discovery.state = DISCOVERY_STOPPED;
200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
203 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 hdev->adv_data_len = 0;
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
209 __u8 status = *((__u8 *) skb->data);
210 void *sent;
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215 if (!sent)
216 return;
218 hci_dev_lock(hdev);
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
222 else if (!status)
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
225 hci_dev_unlock(hdev);
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
234 if (rp->status)
235 return;
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 if (!sent)
250 return;
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
257 else
258 clear_bit(HCI_AUTH, &hdev->flags);
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
267 __u8 status = *((__u8 *) skb->data);
268 void *sent;
270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
273 if (!sent)
274 return;
276 if (!status) {
277 __u8 param = *((__u8 *) sent);
279 if (param)
280 set_bit(HCI_ENCRYPT, &hdev->flags);
281 else
282 clear_bit(HCI_ENCRYPT, &hdev->flags);
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
290 void *sent;
292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 if (!sent)
296 return;
298 param = *((__u8 *) sent);
300 hci_dev_lock(hdev);
302 if (status) {
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
305 goto done;
308 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
311 if (param & SCAN_INQUIRY) {
312 set_bit(HCI_ISCAN, &hdev->flags);
313 if (!old_iscan)
314 mgmt_discoverable(hdev, 1);
315 if (hdev->discov_timeout > 0) {
316 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318 to);
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
325 if (!old_pscan)
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
330 done:
331 hci_dev_unlock(hdev);
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
338 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
340 if (rp->status)
341 return;
343 memcpy(hdev->dev_class, rp->dev_class, 3);
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
351 __u8 status = *((__u8 *) skb->data);
352 void *sent;
354 BT_DBG("%s status 0x%2.2x", hdev->name, status);
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 if (!sent)
358 return;
360 hci_dev_lock(hdev);
362 if (status == 0)
363 memcpy(hdev->dev_class, sent, 3);
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
368 hci_dev_unlock(hdev);
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 __u16 setting;
376 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
378 if (rp->status)
379 return;
381 setting = __le16_to_cpu(rp->voice_setting);
383 if (hdev->voice_setting == setting)
384 return;
386 hdev->voice_setting = setting;
388 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
390 if (hdev->notify)
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
395 struct sk_buff *skb)
397 __u8 status = *((__u8 *) skb->data);
398 __u16 setting;
399 void *sent;
401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
403 if (status)
404 return;
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 if (!sent)
408 return;
410 setting = get_unaligned_le16(sent);
412 if (hdev->voice_setting == setting)
413 return;
415 hdev->voice_setting = setting;
417 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
419 if (hdev->notify)
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
423 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
425 __u8 status = *((__u8 *) skb->data);
426 struct hci_cp_write_ssp_mode *sent;
428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
430 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
431 if (!sent)
432 return;
434 if (!status) {
435 if (sent->mode)
436 hdev->features[1][0] |= LMP_HOST_SSP;
437 else
438 hdev->features[1][0] &= ~LMP_HOST_SSP;
441 if (test_bit(HCI_MGMT, &hdev->dev_flags))
442 mgmt_ssp_enable_complete(hdev, sent->mode, status);
443 else if (!status) {
444 if (sent->mode)
445 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
446 else
447 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
451 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
453 struct hci_rp_read_local_version *rp = (void *) skb->data;
455 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
457 if (rp->status)
458 return;
460 hdev->hci_ver = rp->hci_ver;
461 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
462 hdev->lmp_ver = rp->lmp_ver;
463 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
464 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
466 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
467 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
470 static void hci_cc_read_local_commands(struct hci_dev *hdev,
471 struct sk_buff *skb)
473 struct hci_rp_read_local_commands *rp = (void *) skb->data;
475 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
477 if (!rp->status)
478 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
481 static void hci_cc_read_local_features(struct hci_dev *hdev,
482 struct sk_buff *skb)
484 struct hci_rp_read_local_features *rp = (void *) skb->data;
486 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
488 if (rp->status)
489 return;
491 memcpy(hdev->features, rp->features, 8);
493 /* Adjust default settings according to features
494 * supported by device. */
496 if (hdev->features[0][0] & LMP_3SLOT)
497 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
499 if (hdev->features[0][0] & LMP_5SLOT)
500 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
502 if (hdev->features[0][1] & LMP_HV2) {
503 hdev->pkt_type |= (HCI_HV2);
504 hdev->esco_type |= (ESCO_HV2);
507 if (hdev->features[0][1] & LMP_HV3) {
508 hdev->pkt_type |= (HCI_HV3);
509 hdev->esco_type |= (ESCO_HV3);
512 if (lmp_esco_capable(hdev))
513 hdev->esco_type |= (ESCO_EV3);
515 if (hdev->features[0][4] & LMP_EV4)
516 hdev->esco_type |= (ESCO_EV4);
518 if (hdev->features[0][4] & LMP_EV5)
519 hdev->esco_type |= (ESCO_EV5);
521 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
522 hdev->esco_type |= (ESCO_2EV3);
524 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
525 hdev->esco_type |= (ESCO_3EV3);
527 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
528 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
530 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
531 hdev->features[0][0], hdev->features[0][1],
532 hdev->features[0][2], hdev->features[0][3],
533 hdev->features[0][4], hdev->features[0][5],
534 hdev->features[0][6], hdev->features[0][7]);
537 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
538 struct sk_buff *skb)
540 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
542 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
544 if (rp->status)
545 return;
547 hdev->max_page = rp->max_page;
549 if (rp->page < HCI_MAX_PAGES)
550 memcpy(hdev->features[rp->page], rp->features, 8);
553 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
554 struct sk_buff *skb)
556 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
558 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
560 if (!rp->status)
561 hdev->flow_ctl_mode = rp->mode;
564 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
566 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
568 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
570 if (rp->status)
571 return;
573 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
574 hdev->sco_mtu = rp->sco_mtu;
575 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
576 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
578 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
579 hdev->sco_mtu = 64;
580 hdev->sco_pkts = 8;
583 hdev->acl_cnt = hdev->acl_pkts;
584 hdev->sco_cnt = hdev->sco_pkts;
586 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
587 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
590 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
592 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
594 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
596 if (!rp->status)
597 bacpy(&hdev->bdaddr, &rp->bdaddr);
600 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
601 struct sk_buff *skb)
603 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
605 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
607 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
608 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
609 hdev->page_scan_window = __le16_to_cpu(rp->window);
613 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
614 struct sk_buff *skb)
616 u8 status = *((u8 *) skb->data);
617 struct hci_cp_write_page_scan_activity *sent;
619 BT_DBG("%s status 0x%2.2x", hdev->name, status);
621 if (status)
622 return;
624 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
625 if (!sent)
626 return;
628 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
629 hdev->page_scan_window = __le16_to_cpu(sent->window);
632 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
633 struct sk_buff *skb)
635 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
637 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
639 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
640 hdev->page_scan_type = rp->type;
643 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
644 struct sk_buff *skb)
646 u8 status = *((u8 *) skb->data);
647 u8 *type;
649 BT_DBG("%s status 0x%2.2x", hdev->name, status);
651 if (status)
652 return;
654 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
655 if (type)
656 hdev->page_scan_type = *type;
659 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
660 struct sk_buff *skb)
662 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
664 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
666 if (rp->status)
667 return;
669 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
670 hdev->block_len = __le16_to_cpu(rp->block_len);
671 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
673 hdev->block_cnt = hdev->num_blocks;
675 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
676 hdev->block_cnt, hdev->block_len);
679 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
680 struct sk_buff *skb)
682 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
684 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
686 if (rp->status)
687 goto a2mp_rsp;
689 hdev->amp_status = rp->amp_status;
690 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
691 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
692 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
693 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
694 hdev->amp_type = rp->amp_type;
695 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
696 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
697 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
698 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
700 a2mp_rsp:
701 a2mp_send_getinfo_rsp(hdev);
704 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
705 struct sk_buff *skb)
707 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
708 struct amp_assoc *assoc = &hdev->loc_assoc;
709 size_t rem_len, frag_len;
711 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
713 if (rp->status)
714 goto a2mp_rsp;
716 frag_len = skb->len - sizeof(*rp);
717 rem_len = __le16_to_cpu(rp->rem_len);
719 if (rem_len > frag_len) {
720 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
722 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
723 assoc->offset += frag_len;
725 /* Read other fragments */
726 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
728 return;
731 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
732 assoc->len = assoc->offset + rem_len;
733 assoc->offset = 0;
735 a2mp_rsp:
736 /* Send A2MP Rsp when all fragments are received */
737 a2mp_send_getampassoc_rsp(hdev, rp->status);
738 a2mp_send_create_phy_link_req(hdev, rp->status);
741 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
742 struct sk_buff *skb)
744 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
746 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
748 if (!rp->status)
749 hdev->inq_tx_power = rp->tx_power;
752 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
754 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
755 struct hci_cp_pin_code_reply *cp;
756 struct hci_conn *conn;
758 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
760 hci_dev_lock(hdev);
762 if (test_bit(HCI_MGMT, &hdev->dev_flags))
763 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
765 if (rp->status)
766 goto unlock;
768 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
769 if (!cp)
770 goto unlock;
772 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
773 if (conn)
774 conn->pin_length = cp->pin_len;
776 unlock:
777 hci_dev_unlock(hdev);
780 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
782 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
784 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
786 hci_dev_lock(hdev);
788 if (test_bit(HCI_MGMT, &hdev->dev_flags))
789 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
790 rp->status);
792 hci_dev_unlock(hdev);
795 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
796 struct sk_buff *skb)
798 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
800 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
802 if (rp->status)
803 return;
805 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
806 hdev->le_pkts = rp->le_max_pkt;
808 hdev->le_cnt = hdev->le_pkts;
810 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
813 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
814 struct sk_buff *skb)
816 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
818 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
820 if (!rp->status)
821 memcpy(hdev->le_features, rp->features, 8);
824 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
825 struct sk_buff *skb)
827 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
829 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
831 if (!rp->status)
832 hdev->adv_tx_power = rp->tx_power;
835 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
837 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
839 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
841 hci_dev_lock(hdev);
843 if (test_bit(HCI_MGMT, &hdev->dev_flags))
844 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
845 rp->status);
847 hci_dev_unlock(hdev);
850 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
851 struct sk_buff *skb)
853 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
855 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
857 hci_dev_lock(hdev);
859 if (test_bit(HCI_MGMT, &hdev->dev_flags))
860 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
861 ACL_LINK, 0, rp->status);
863 hci_dev_unlock(hdev);
866 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
868 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
870 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
872 hci_dev_lock(hdev);
874 if (test_bit(HCI_MGMT, &hdev->dev_flags))
875 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
876 0, rp->status);
878 hci_dev_unlock(hdev);
881 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
882 struct sk_buff *skb)
884 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
886 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
888 hci_dev_lock(hdev);
890 if (test_bit(HCI_MGMT, &hdev->dev_flags))
891 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
892 ACL_LINK, 0, rp->status);
894 hci_dev_unlock(hdev);
897 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
898 struct sk_buff *skb)
900 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
902 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
904 hci_dev_lock(hdev);
905 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
906 rp->randomizer, rp->status);
907 hci_dev_unlock(hdev);
910 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
912 __u8 *sent, status = *((__u8 *) skb->data);
914 BT_DBG("%s status 0x%2.2x", hdev->name, status);
916 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
917 if (!sent)
918 return;
920 hci_dev_lock(hdev);
922 if (!status) {
923 if (*sent)
924 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
925 else
926 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
929 if (!test_bit(HCI_INIT, &hdev->flags)) {
930 struct hci_request req;
932 hci_req_init(&req, hdev);
933 hci_update_ad(&req);
934 hci_req_run(&req, NULL);
937 hci_dev_unlock(hdev);
940 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
942 __u8 status = *((__u8 *) skb->data);
944 BT_DBG("%s status 0x%2.2x", hdev->name, status);
946 if (status) {
947 hci_dev_lock(hdev);
948 mgmt_start_discovery_failed(hdev, status);
949 hci_dev_unlock(hdev);
950 return;
954 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
955 struct sk_buff *skb)
957 struct hci_cp_le_set_scan_enable *cp;
958 __u8 status = *((__u8 *) skb->data);
960 BT_DBG("%s status 0x%2.2x", hdev->name, status);
962 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
963 if (!cp)
964 return;
966 switch (cp->enable) {
967 case LE_SCAN_ENABLE:
968 if (status) {
969 hci_dev_lock(hdev);
970 mgmt_start_discovery_failed(hdev, status);
971 hci_dev_unlock(hdev);
972 return;
975 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
977 hci_dev_lock(hdev);
978 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
979 hci_dev_unlock(hdev);
980 break;
982 case LE_SCAN_DISABLE:
983 if (status) {
984 hci_dev_lock(hdev);
985 mgmt_stop_discovery_failed(hdev, status);
986 hci_dev_unlock(hdev);
987 return;
990 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
992 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
993 hdev->discovery.state == DISCOVERY_FINDING) {
994 mgmt_interleaved_discovery(hdev);
995 } else {
996 hci_dev_lock(hdev);
997 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
998 hci_dev_unlock(hdev);
1001 break;
1003 default:
1004 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1005 break;
1009 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1010 struct sk_buff *skb)
1012 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1014 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1016 if (!rp->status)
1017 hdev->le_white_list_size = rp->size;
1020 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1021 struct sk_buff *skb)
1023 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1025 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1027 if (!rp->status)
1028 memcpy(hdev->le_states, rp->le_states, 8);
1031 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1032 struct sk_buff *skb)
1034 struct hci_cp_write_le_host_supported *sent;
1035 __u8 status = *((__u8 *) skb->data);
1037 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1039 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1040 if (!sent)
1041 return;
1043 if (!status) {
1044 if (sent->le)
1045 hdev->features[1][0] |= LMP_HOST_LE;
1046 else
1047 hdev->features[1][0] &= ~LMP_HOST_LE;
1049 if (sent->simul)
1050 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1051 else
1052 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1055 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1056 !test_bit(HCI_INIT, &hdev->flags))
1057 mgmt_le_enable_complete(hdev, sent->le, status);
1060 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1061 struct sk_buff *skb)
1063 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1065 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1066 hdev->name, rp->status, rp->phy_handle);
1068 if (rp->status)
1069 return;
1071 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1074 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1076 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1078 if (status) {
1079 hci_conn_check_pending(hdev);
1080 hci_dev_lock(hdev);
1081 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1082 mgmt_start_discovery_failed(hdev, status);
1083 hci_dev_unlock(hdev);
1084 return;
1087 set_bit(HCI_INQUIRY, &hdev->flags);
1089 hci_dev_lock(hdev);
1090 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1091 hci_dev_unlock(hdev);
1094 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1096 struct hci_cp_create_conn *cp;
1097 struct hci_conn *conn;
1099 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1101 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1102 if (!cp)
1103 return;
1105 hci_dev_lock(hdev);
1107 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1109 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1111 if (status) {
1112 if (conn && conn->state == BT_CONNECT) {
1113 if (status != 0x0c || conn->attempt > 2) {
1114 conn->state = BT_CLOSED;
1115 hci_proto_connect_cfm(conn, status);
1116 hci_conn_del(conn);
1117 } else
1118 conn->state = BT_CONNECT2;
1120 } else {
1121 if (!conn) {
1122 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1123 if (conn) {
1124 conn->out = true;
1125 conn->link_mode |= HCI_LM_MASTER;
1126 } else
1127 BT_ERR("No memory for new connection");
1131 hci_dev_unlock(hdev);
1134 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1136 struct hci_cp_add_sco *cp;
1137 struct hci_conn *acl, *sco;
1138 __u16 handle;
1140 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1142 if (!status)
1143 return;
1145 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1146 if (!cp)
1147 return;
1149 handle = __le16_to_cpu(cp->handle);
1151 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1153 hci_dev_lock(hdev);
1155 acl = hci_conn_hash_lookup_handle(hdev, handle);
1156 if (acl) {
1157 sco = acl->link;
1158 if (sco) {
1159 sco->state = BT_CLOSED;
1161 hci_proto_connect_cfm(sco, status);
1162 hci_conn_del(sco);
1166 hci_dev_unlock(hdev);
1169 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1171 struct hci_cp_auth_requested *cp;
1172 struct hci_conn *conn;
1174 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1176 if (!status)
1177 return;
1179 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1180 if (!cp)
1181 return;
1183 hci_dev_lock(hdev);
1185 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1186 if (conn) {
1187 if (conn->state == BT_CONFIG) {
1188 hci_proto_connect_cfm(conn, status);
1189 hci_conn_drop(conn);
1193 hci_dev_unlock(hdev);
1196 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1198 struct hci_cp_set_conn_encrypt *cp;
1199 struct hci_conn *conn;
1201 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1203 if (!status)
1204 return;
1206 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1207 if (!cp)
1208 return;
1210 hci_dev_lock(hdev);
1212 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1213 if (conn) {
1214 if (conn->state == BT_CONFIG) {
1215 hci_proto_connect_cfm(conn, status);
1216 hci_conn_drop(conn);
1220 hci_dev_unlock(hdev);
1223 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1224 struct hci_conn *conn)
1226 if (conn->state != BT_CONFIG || !conn->out)
1227 return 0;
1229 if (conn->pending_sec_level == BT_SECURITY_SDP)
1230 return 0;
1232 /* Only request authentication for SSP connections or non-SSP
1233 * devices with sec_level HIGH or if MITM protection is requested */
1234 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1235 conn->pending_sec_level != BT_SECURITY_HIGH)
1236 return 0;
1238 return 1;
1241 static int hci_resolve_name(struct hci_dev *hdev,
1242 struct inquiry_entry *e)
1244 struct hci_cp_remote_name_req cp;
1246 memset(&cp, 0, sizeof(cp));
1248 bacpy(&cp.bdaddr, &e->data.bdaddr);
1249 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1250 cp.pscan_mode = e->data.pscan_mode;
1251 cp.clock_offset = e->data.clock_offset;
1253 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1256 static bool hci_resolve_next_name(struct hci_dev *hdev)
1258 struct discovery_state *discov = &hdev->discovery;
1259 struct inquiry_entry *e;
1261 if (list_empty(&discov->resolve))
1262 return false;
1264 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1265 if (!e)
1266 return false;
1268 if (hci_resolve_name(hdev, e) == 0) {
1269 e->name_state = NAME_PENDING;
1270 return true;
1273 return false;
1276 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1277 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1279 struct discovery_state *discov = &hdev->discovery;
1280 struct inquiry_entry *e;
1282 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1283 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1284 name_len, conn->dev_class);
1286 if (discov->state == DISCOVERY_STOPPED)
1287 return;
1289 if (discov->state == DISCOVERY_STOPPING)
1290 goto discov_complete;
1292 if (discov->state != DISCOVERY_RESOLVING)
1293 return;
1295 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1296 /* If the device was not found in a list of found devices names of which
1297 * are pending. there is no need to continue resolving a next name as it
1298 * will be done upon receiving another Remote Name Request Complete
1299 * Event */
1300 if (!e)
1301 return;
1303 list_del(&e->list);
1304 if (name) {
1305 e->name_state = NAME_KNOWN;
1306 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1307 e->data.rssi, name, name_len);
1308 } else {
1309 e->name_state = NAME_NOT_KNOWN;
1312 if (hci_resolve_next_name(hdev))
1313 return;
1315 discov_complete:
1316 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1319 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1321 struct hci_cp_remote_name_req *cp;
1322 struct hci_conn *conn;
1324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1326 /* If successful wait for the name req complete event before
1327 * checking for the need to do authentication */
1328 if (!status)
1329 return;
1331 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1332 if (!cp)
1333 return;
1335 hci_dev_lock(hdev);
1337 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1339 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1340 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1342 if (!conn)
1343 goto unlock;
1345 if (!hci_outgoing_auth_needed(hdev, conn))
1346 goto unlock;
1348 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1349 struct hci_cp_auth_requested cp;
1350 cp.handle = __cpu_to_le16(conn->handle);
1351 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1354 unlock:
1355 hci_dev_unlock(hdev);
1358 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1360 struct hci_cp_read_remote_features *cp;
1361 struct hci_conn *conn;
1363 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1365 if (!status)
1366 return;
1368 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1369 if (!cp)
1370 return;
1372 hci_dev_lock(hdev);
1374 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1375 if (conn) {
1376 if (conn->state == BT_CONFIG) {
1377 hci_proto_connect_cfm(conn, status);
1378 hci_conn_drop(conn);
1382 hci_dev_unlock(hdev);
1385 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1387 struct hci_cp_read_remote_ext_features *cp;
1388 struct hci_conn *conn;
1390 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1392 if (!status)
1393 return;
1395 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1396 if (!cp)
1397 return;
1399 hci_dev_lock(hdev);
1401 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1402 if (conn) {
1403 if (conn->state == BT_CONFIG) {
1404 hci_proto_connect_cfm(conn, status);
1405 hci_conn_drop(conn);
1409 hci_dev_unlock(hdev);
1412 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1414 struct hci_cp_setup_sync_conn *cp;
1415 struct hci_conn *acl, *sco;
1416 __u16 handle;
1418 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1420 if (!status)
1421 return;
1423 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1424 if (!cp)
1425 return;
1427 handle = __le16_to_cpu(cp->handle);
1429 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1431 hci_dev_lock(hdev);
1433 acl = hci_conn_hash_lookup_handle(hdev, handle);
1434 if (acl) {
1435 sco = acl->link;
1436 if (sco) {
1437 sco->state = BT_CLOSED;
1439 hci_proto_connect_cfm(sco, status);
1440 hci_conn_del(sco);
1444 hci_dev_unlock(hdev);
1447 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1449 struct hci_cp_sniff_mode *cp;
1450 struct hci_conn *conn;
1452 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1454 if (!status)
1455 return;
1457 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1458 if (!cp)
1459 return;
1461 hci_dev_lock(hdev);
1463 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1464 if (conn) {
1465 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1467 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1468 hci_sco_setup(conn, status);
1471 hci_dev_unlock(hdev);
1474 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1476 struct hci_cp_exit_sniff_mode *cp;
1477 struct hci_conn *conn;
1479 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1481 if (!status)
1482 return;
1484 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1485 if (!cp)
1486 return;
1488 hci_dev_lock(hdev);
1490 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1491 if (conn) {
1492 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1494 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1495 hci_sco_setup(conn, status);
1498 hci_dev_unlock(hdev);
1501 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1503 struct hci_cp_disconnect *cp;
1504 struct hci_conn *conn;
1506 if (!status)
1507 return;
1509 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1510 if (!cp)
1511 return;
1513 hci_dev_lock(hdev);
1515 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1516 if (conn)
1517 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1518 conn->dst_type, status);
1520 hci_dev_unlock(hdev);
1523 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1525 struct hci_conn *conn;
1527 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1529 if (status) {
1530 hci_dev_lock(hdev);
1532 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1533 if (!conn) {
1534 hci_dev_unlock(hdev);
1535 return;
1538 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1540 conn->state = BT_CLOSED;
1541 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1542 conn->dst_type, status);
1543 hci_proto_connect_cfm(conn, status);
1544 hci_conn_del(conn);
1546 hci_dev_unlock(hdev);
1550 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1552 struct hci_cp_create_phy_link *cp;
1554 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1556 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1557 if (!cp)
1558 return;
1560 hci_dev_lock(hdev);
1562 if (status) {
1563 struct hci_conn *hcon;
1565 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1566 if (hcon)
1567 hci_conn_del(hcon);
1568 } else {
1569 amp_write_remote_assoc(hdev, cp->phy_handle);
1572 hci_dev_unlock(hdev);
1575 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1577 struct hci_cp_accept_phy_link *cp;
1579 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1581 if (status)
1582 return;
1584 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1585 if (!cp)
1586 return;
1588 amp_write_remote_assoc(hdev, cp->phy_handle);
1591 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1593 __u8 status = *((__u8 *) skb->data);
1594 struct discovery_state *discov = &hdev->discovery;
1595 struct inquiry_entry *e;
1597 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1599 hci_conn_check_pending(hdev);
1601 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1602 return;
1604 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1605 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1607 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1608 return;
1610 hci_dev_lock(hdev);
1612 if (discov->state != DISCOVERY_FINDING)
1613 goto unlock;
1615 if (list_empty(&discov->resolve)) {
1616 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1617 goto unlock;
1620 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1621 if (e && hci_resolve_name(hdev, e) == 0) {
1622 e->name_state = NAME_PENDING;
1623 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1624 } else {
1625 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1628 unlock:
1629 hci_dev_unlock(hdev);
1632 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1634 struct inquiry_data data;
1635 struct inquiry_info *info = (void *) (skb->data + 1);
1636 int num_rsp = *((__u8 *) skb->data);
1638 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1640 if (!num_rsp)
1641 return;
1643 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1644 return;
1646 hci_dev_lock(hdev);
1648 for (; num_rsp; num_rsp--, info++) {
1649 bool name_known, ssp;
1651 bacpy(&data.bdaddr, &info->bdaddr);
1652 data.pscan_rep_mode = info->pscan_rep_mode;
1653 data.pscan_period_mode = info->pscan_period_mode;
1654 data.pscan_mode = info->pscan_mode;
1655 memcpy(data.dev_class, info->dev_class, 3);
1656 data.clock_offset = info->clock_offset;
1657 data.rssi = 0x00;
1658 data.ssp_mode = 0x00;
1660 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1661 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1662 info->dev_class, 0, !name_known, ssp, NULL,
1666 hci_dev_unlock(hdev);
1669 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1671 struct hci_ev_conn_complete *ev = (void *) skb->data;
1672 struct hci_conn *conn;
1674 BT_DBG("%s", hdev->name);
1676 hci_dev_lock(hdev);
1678 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1679 if (!conn) {
1680 if (ev->link_type != SCO_LINK)
1681 goto unlock;
1683 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1684 if (!conn)
1685 goto unlock;
1687 conn->type = SCO_LINK;
1690 if (!ev->status) {
1691 conn->handle = __le16_to_cpu(ev->handle);
1693 if (conn->type == ACL_LINK) {
1694 conn->state = BT_CONFIG;
1695 hci_conn_hold(conn);
1697 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1698 !hci_find_link_key(hdev, &ev->bdaddr))
1699 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1700 else
1701 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1702 } else
1703 conn->state = BT_CONNECTED;
1705 hci_conn_add_sysfs(conn);
1707 if (test_bit(HCI_AUTH, &hdev->flags))
1708 conn->link_mode |= HCI_LM_AUTH;
1710 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1711 conn->link_mode |= HCI_LM_ENCRYPT;
1713 /* Get remote features */
1714 if (conn->type == ACL_LINK) {
1715 struct hci_cp_read_remote_features cp;
1716 cp.handle = ev->handle;
1717 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1718 sizeof(cp), &cp);
1721 /* Set packet type for incoming connection */
1722 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1723 struct hci_cp_change_conn_ptype cp;
1724 cp.handle = ev->handle;
1725 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1726 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1727 &cp);
1729 } else {
1730 conn->state = BT_CLOSED;
1731 if (conn->type == ACL_LINK)
1732 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1733 conn->dst_type, ev->status);
1736 if (conn->type == ACL_LINK)
1737 hci_sco_setup(conn, ev->status);
1739 if (ev->status) {
1740 hci_proto_connect_cfm(conn, ev->status);
1741 hci_conn_del(conn);
1742 } else if (ev->link_type != ACL_LINK)
1743 hci_proto_connect_cfm(conn, ev->status);
1745 unlock:
1746 hci_dev_unlock(hdev);
1748 hci_conn_check_pending(hdev);
1751 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1753 struct hci_ev_conn_request *ev = (void *) skb->data;
1754 int mask = hdev->link_mode;
1755 __u8 flags = 0;
1757 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1758 ev->link_type);
1760 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1761 &flags);
1763 if ((mask & HCI_LM_ACCEPT) &&
1764 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1765 /* Connection accepted */
1766 struct inquiry_entry *ie;
1767 struct hci_conn *conn;
1769 hci_dev_lock(hdev);
1771 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1772 if (ie)
1773 memcpy(ie->data.dev_class, ev->dev_class, 3);
1775 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1776 &ev->bdaddr);
1777 if (!conn) {
1778 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1779 if (!conn) {
1780 BT_ERR("No memory for new connection");
1781 hci_dev_unlock(hdev);
1782 return;
1786 memcpy(conn->dev_class, ev->dev_class, 3);
1788 hci_dev_unlock(hdev);
1790 if (ev->link_type == ACL_LINK ||
1791 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1792 struct hci_cp_accept_conn_req cp;
1793 conn->state = BT_CONNECT;
1795 bacpy(&cp.bdaddr, &ev->bdaddr);
1797 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1798 cp.role = 0x00; /* Become master */
1799 else
1800 cp.role = 0x01; /* Remain slave */
1802 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1803 &cp);
1804 } else if (!(flags & HCI_PROTO_DEFER)) {
1805 struct hci_cp_accept_sync_conn_req cp;
1806 conn->state = BT_CONNECT;
1808 bacpy(&cp.bdaddr, &ev->bdaddr);
1809 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1811 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1812 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1813 cp.max_latency = __constant_cpu_to_le16(0xffff);
1814 cp.content_format = cpu_to_le16(hdev->voice_setting);
1815 cp.retrans_effort = 0xff;
1817 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1818 sizeof(cp), &cp);
1819 } else {
1820 conn->state = BT_CONNECT2;
1821 hci_proto_connect_cfm(conn, 0);
1823 } else {
1824 /* Connection rejected */
1825 struct hci_cp_reject_conn_req cp;
1827 bacpy(&cp.bdaddr, &ev->bdaddr);
1828 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1829 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1833 static u8 hci_to_mgmt_reason(u8 err)
1835 switch (err) {
1836 case HCI_ERROR_CONNECTION_TIMEOUT:
1837 return MGMT_DEV_DISCONN_TIMEOUT;
1838 case HCI_ERROR_REMOTE_USER_TERM:
1839 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1840 case HCI_ERROR_REMOTE_POWER_OFF:
1841 return MGMT_DEV_DISCONN_REMOTE;
1842 case HCI_ERROR_LOCAL_HOST_TERM:
1843 return MGMT_DEV_DISCONN_LOCAL_HOST;
1844 default:
1845 return MGMT_DEV_DISCONN_UNKNOWN;
1849 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1851 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1852 struct hci_conn *conn;
1854 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1856 hci_dev_lock(hdev);
1858 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1859 if (!conn)
1860 goto unlock;
1862 if (ev->status == 0)
1863 conn->state = BT_CLOSED;
1865 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1866 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1867 if (ev->status) {
1868 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1869 conn->dst_type, ev->status);
1870 } else {
1871 u8 reason = hci_to_mgmt_reason(ev->reason);
1873 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1874 conn->dst_type, reason);
1878 if (ev->status == 0) {
1879 if (conn->type == ACL_LINK && conn->flush_key)
1880 hci_remove_link_key(hdev, &conn->dst);
1881 hci_proto_disconn_cfm(conn, ev->reason);
1882 hci_conn_del(conn);
1885 unlock:
1886 hci_dev_unlock(hdev);
1889 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1891 struct hci_ev_auth_complete *ev = (void *) skb->data;
1892 struct hci_conn *conn;
1894 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1896 hci_dev_lock(hdev);
1898 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1899 if (!conn)
1900 goto unlock;
1902 if (!ev->status) {
1903 if (!hci_conn_ssp_enabled(conn) &&
1904 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1905 BT_INFO("re-auth of legacy device is not possible.");
1906 } else {
1907 conn->link_mode |= HCI_LM_AUTH;
1908 conn->sec_level = conn->pending_sec_level;
1910 } else {
1911 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1912 ev->status);
1915 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1916 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1918 if (conn->state == BT_CONFIG) {
1919 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1920 struct hci_cp_set_conn_encrypt cp;
1921 cp.handle = ev->handle;
1922 cp.encrypt = 0x01;
1923 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1924 &cp);
1925 } else {
1926 conn->state = BT_CONNECTED;
1927 hci_proto_connect_cfm(conn, ev->status);
1928 hci_conn_drop(conn);
1930 } else {
1931 hci_auth_cfm(conn, ev->status);
1933 hci_conn_hold(conn);
1934 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1935 hci_conn_drop(conn);
1938 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1939 if (!ev->status) {
1940 struct hci_cp_set_conn_encrypt cp;
1941 cp.handle = ev->handle;
1942 cp.encrypt = 0x01;
1943 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1944 &cp);
1945 } else {
1946 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1947 hci_encrypt_cfm(conn, ev->status, 0x00);
1951 unlock:
1952 hci_dev_unlock(hdev);
1955 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1957 struct hci_ev_remote_name *ev = (void *) skb->data;
1958 struct hci_conn *conn;
1960 BT_DBG("%s", hdev->name);
1962 hci_conn_check_pending(hdev);
1964 hci_dev_lock(hdev);
1966 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1968 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1969 goto check_auth;
1971 if (ev->status == 0)
1972 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1973 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1974 else
1975 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1977 check_auth:
1978 if (!conn)
1979 goto unlock;
1981 if (!hci_outgoing_auth_needed(hdev, conn))
1982 goto unlock;
1984 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1985 struct hci_cp_auth_requested cp;
1986 cp.handle = __cpu_to_le16(conn->handle);
1987 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1990 unlock:
1991 hci_dev_unlock(hdev);
1994 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1996 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1997 struct hci_conn *conn;
1999 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2001 hci_dev_lock(hdev);
2003 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2004 if (conn) {
2005 if (!ev->status) {
2006 if (ev->encrypt) {
2007 /* Encryption implies authentication */
2008 conn->link_mode |= HCI_LM_AUTH;
2009 conn->link_mode |= HCI_LM_ENCRYPT;
2010 conn->sec_level = conn->pending_sec_level;
2011 } else
2012 conn->link_mode &= ~HCI_LM_ENCRYPT;
2015 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2017 if (ev->status && conn->state == BT_CONNECTED) {
2018 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2019 hci_conn_drop(conn);
2020 goto unlock;
2023 if (conn->state == BT_CONFIG) {
2024 if (!ev->status)
2025 conn->state = BT_CONNECTED;
2027 hci_proto_connect_cfm(conn, ev->status);
2028 hci_conn_drop(conn);
2029 } else
2030 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2033 unlock:
2034 hci_dev_unlock(hdev);
2037 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2038 struct sk_buff *skb)
2040 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2041 struct hci_conn *conn;
2043 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2045 hci_dev_lock(hdev);
2047 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2048 if (conn) {
2049 if (!ev->status)
2050 conn->link_mode |= HCI_LM_SECURE;
2052 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2054 hci_key_change_cfm(conn, ev->status);
2057 hci_dev_unlock(hdev);
2060 static void hci_remote_features_evt(struct hci_dev *hdev,
2061 struct sk_buff *skb)
2063 struct hci_ev_remote_features *ev = (void *) skb->data;
2064 struct hci_conn *conn;
2066 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2068 hci_dev_lock(hdev);
2070 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2071 if (!conn)
2072 goto unlock;
2074 if (!ev->status)
2075 memcpy(conn->features[0], ev->features, 8);
2077 if (conn->state != BT_CONFIG)
2078 goto unlock;
2080 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2081 struct hci_cp_read_remote_ext_features cp;
2082 cp.handle = ev->handle;
2083 cp.page = 0x01;
2084 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2085 sizeof(cp), &cp);
2086 goto unlock;
2089 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2090 struct hci_cp_remote_name_req cp;
2091 memset(&cp, 0, sizeof(cp));
2092 bacpy(&cp.bdaddr, &conn->dst);
2093 cp.pscan_rep_mode = 0x02;
2094 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2095 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2096 mgmt_device_connected(hdev, &conn->dst, conn->type,
2097 conn->dst_type, 0, NULL, 0,
2098 conn->dev_class);
2100 if (!hci_outgoing_auth_needed(hdev, conn)) {
2101 conn->state = BT_CONNECTED;
2102 hci_proto_connect_cfm(conn, ev->status);
2103 hci_conn_drop(conn);
2106 unlock:
2107 hci_dev_unlock(hdev);
2110 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2112 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2113 u8 status = skb->data[sizeof(*ev)];
2114 __u16 opcode;
2116 skb_pull(skb, sizeof(*ev));
2118 opcode = __le16_to_cpu(ev->opcode);
2120 switch (opcode) {
2121 case HCI_OP_INQUIRY_CANCEL:
2122 hci_cc_inquiry_cancel(hdev, skb);
2123 break;
2125 case HCI_OP_PERIODIC_INQ:
2126 hci_cc_periodic_inq(hdev, skb);
2127 break;
2129 case HCI_OP_EXIT_PERIODIC_INQ:
2130 hci_cc_exit_periodic_inq(hdev, skb);
2131 break;
2133 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2134 hci_cc_remote_name_req_cancel(hdev, skb);
2135 break;
2137 case HCI_OP_ROLE_DISCOVERY:
2138 hci_cc_role_discovery(hdev, skb);
2139 break;
2141 case HCI_OP_READ_LINK_POLICY:
2142 hci_cc_read_link_policy(hdev, skb);
2143 break;
2145 case HCI_OP_WRITE_LINK_POLICY:
2146 hci_cc_write_link_policy(hdev, skb);
2147 break;
2149 case HCI_OP_READ_DEF_LINK_POLICY:
2150 hci_cc_read_def_link_policy(hdev, skb);
2151 break;
2153 case HCI_OP_WRITE_DEF_LINK_POLICY:
2154 hci_cc_write_def_link_policy(hdev, skb);
2155 break;
2157 case HCI_OP_RESET:
2158 hci_cc_reset(hdev, skb);
2159 break;
2161 case HCI_OP_WRITE_LOCAL_NAME:
2162 hci_cc_write_local_name(hdev, skb);
2163 break;
2165 case HCI_OP_READ_LOCAL_NAME:
2166 hci_cc_read_local_name(hdev, skb);
2167 break;
2169 case HCI_OP_WRITE_AUTH_ENABLE:
2170 hci_cc_write_auth_enable(hdev, skb);
2171 break;
2173 case HCI_OP_WRITE_ENCRYPT_MODE:
2174 hci_cc_write_encrypt_mode(hdev, skb);
2175 break;
2177 case HCI_OP_WRITE_SCAN_ENABLE:
2178 hci_cc_write_scan_enable(hdev, skb);
2179 break;
2181 case HCI_OP_READ_CLASS_OF_DEV:
2182 hci_cc_read_class_of_dev(hdev, skb);
2183 break;
2185 case HCI_OP_WRITE_CLASS_OF_DEV:
2186 hci_cc_write_class_of_dev(hdev, skb);
2187 break;
2189 case HCI_OP_READ_VOICE_SETTING:
2190 hci_cc_read_voice_setting(hdev, skb);
2191 break;
2193 case HCI_OP_WRITE_VOICE_SETTING:
2194 hci_cc_write_voice_setting(hdev, skb);
2195 break;
2197 case HCI_OP_WRITE_SSP_MODE:
2198 hci_cc_write_ssp_mode(hdev, skb);
2199 break;
2201 case HCI_OP_READ_LOCAL_VERSION:
2202 hci_cc_read_local_version(hdev, skb);
2203 break;
2205 case HCI_OP_READ_LOCAL_COMMANDS:
2206 hci_cc_read_local_commands(hdev, skb);
2207 break;
2209 case HCI_OP_READ_LOCAL_FEATURES:
2210 hci_cc_read_local_features(hdev, skb);
2211 break;
2213 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2214 hci_cc_read_local_ext_features(hdev, skb);
2215 break;
2217 case HCI_OP_READ_BUFFER_SIZE:
2218 hci_cc_read_buffer_size(hdev, skb);
2219 break;
2221 case HCI_OP_READ_BD_ADDR:
2222 hci_cc_read_bd_addr(hdev, skb);
2223 break;
2225 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2226 hci_cc_read_page_scan_activity(hdev, skb);
2227 break;
2229 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2230 hci_cc_write_page_scan_activity(hdev, skb);
2231 break;
2233 case HCI_OP_READ_PAGE_SCAN_TYPE:
2234 hci_cc_read_page_scan_type(hdev, skb);
2235 break;
2237 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2238 hci_cc_write_page_scan_type(hdev, skb);
2239 break;
2241 case HCI_OP_READ_DATA_BLOCK_SIZE:
2242 hci_cc_read_data_block_size(hdev, skb);
2243 break;
2245 case HCI_OP_READ_FLOW_CONTROL_MODE:
2246 hci_cc_read_flow_control_mode(hdev, skb);
2247 break;
2249 case HCI_OP_READ_LOCAL_AMP_INFO:
2250 hci_cc_read_local_amp_info(hdev, skb);
2251 break;
2253 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2254 hci_cc_read_local_amp_assoc(hdev, skb);
2255 break;
2257 case HCI_OP_READ_INQ_RSP_TX_POWER:
2258 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2259 break;
2261 case HCI_OP_PIN_CODE_REPLY:
2262 hci_cc_pin_code_reply(hdev, skb);
2263 break;
2265 case HCI_OP_PIN_CODE_NEG_REPLY:
2266 hci_cc_pin_code_neg_reply(hdev, skb);
2267 break;
2269 case HCI_OP_READ_LOCAL_OOB_DATA:
2270 hci_cc_read_local_oob_data_reply(hdev, skb);
2271 break;
2273 case HCI_OP_LE_READ_BUFFER_SIZE:
2274 hci_cc_le_read_buffer_size(hdev, skb);
2275 break;
2277 case HCI_OP_LE_READ_LOCAL_FEATURES:
2278 hci_cc_le_read_local_features(hdev, skb);
2279 break;
2281 case HCI_OP_LE_READ_ADV_TX_POWER:
2282 hci_cc_le_read_adv_tx_power(hdev, skb);
2283 break;
2285 case HCI_OP_USER_CONFIRM_REPLY:
2286 hci_cc_user_confirm_reply(hdev, skb);
2287 break;
2289 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2290 hci_cc_user_confirm_neg_reply(hdev, skb);
2291 break;
2293 case HCI_OP_USER_PASSKEY_REPLY:
2294 hci_cc_user_passkey_reply(hdev, skb);
2295 break;
2297 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2298 hci_cc_user_passkey_neg_reply(hdev, skb);
2299 break;
2301 case HCI_OP_LE_SET_SCAN_PARAM:
2302 hci_cc_le_set_scan_param(hdev, skb);
2303 break;
2305 case HCI_OP_LE_SET_ADV_ENABLE:
2306 hci_cc_le_set_adv_enable(hdev, skb);
2307 break;
2309 case HCI_OP_LE_SET_SCAN_ENABLE:
2310 hci_cc_le_set_scan_enable(hdev, skb);
2311 break;
2313 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2314 hci_cc_le_read_white_list_size(hdev, skb);
2315 break;
2317 case HCI_OP_LE_READ_SUPPORTED_STATES:
2318 hci_cc_le_read_supported_states(hdev, skb);
2319 break;
2321 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2322 hci_cc_write_le_host_supported(hdev, skb);
2323 break;
2325 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2326 hci_cc_write_remote_amp_assoc(hdev, skb);
2327 break;
2329 default:
2330 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2331 break;
2334 if (opcode != HCI_OP_NOP)
2335 del_timer(&hdev->cmd_timer);
2337 hci_req_cmd_complete(hdev, opcode, status);
2339 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2340 atomic_set(&hdev->cmd_cnt, 1);
2341 if (!skb_queue_empty(&hdev->cmd_q))
2342 queue_work(hdev->workqueue, &hdev->cmd_work);
2346 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2348 struct hci_ev_cmd_status *ev = (void *) skb->data;
2349 __u16 opcode;
2351 skb_pull(skb, sizeof(*ev));
2353 opcode = __le16_to_cpu(ev->opcode);
2355 switch (opcode) {
2356 case HCI_OP_INQUIRY:
2357 hci_cs_inquiry(hdev, ev->status);
2358 break;
2360 case HCI_OP_CREATE_CONN:
2361 hci_cs_create_conn(hdev, ev->status);
2362 break;
2364 case HCI_OP_ADD_SCO:
2365 hci_cs_add_sco(hdev, ev->status);
2366 break;
2368 case HCI_OP_AUTH_REQUESTED:
2369 hci_cs_auth_requested(hdev, ev->status);
2370 break;
2372 case HCI_OP_SET_CONN_ENCRYPT:
2373 hci_cs_set_conn_encrypt(hdev, ev->status);
2374 break;
2376 case HCI_OP_REMOTE_NAME_REQ:
2377 hci_cs_remote_name_req(hdev, ev->status);
2378 break;
2380 case HCI_OP_READ_REMOTE_FEATURES:
2381 hci_cs_read_remote_features(hdev, ev->status);
2382 break;
2384 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2385 hci_cs_read_remote_ext_features(hdev, ev->status);
2386 break;
2388 case HCI_OP_SETUP_SYNC_CONN:
2389 hci_cs_setup_sync_conn(hdev, ev->status);
2390 break;
2392 case HCI_OP_SNIFF_MODE:
2393 hci_cs_sniff_mode(hdev, ev->status);
2394 break;
2396 case HCI_OP_EXIT_SNIFF_MODE:
2397 hci_cs_exit_sniff_mode(hdev, ev->status);
2398 break;
2400 case HCI_OP_DISCONNECT:
2401 hci_cs_disconnect(hdev, ev->status);
2402 break;
2404 case HCI_OP_LE_CREATE_CONN:
2405 hci_cs_le_create_conn(hdev, ev->status);
2406 break;
2408 case HCI_OP_CREATE_PHY_LINK:
2409 hci_cs_create_phylink(hdev, ev->status);
2410 break;
2412 case HCI_OP_ACCEPT_PHY_LINK:
2413 hci_cs_accept_phylink(hdev, ev->status);
2414 break;
2416 default:
2417 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2418 break;
2421 if (opcode != HCI_OP_NOP)
2422 del_timer(&hdev->cmd_timer);
2424 if (ev->status ||
2425 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2426 hci_req_cmd_complete(hdev, opcode, ev->status);
2428 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2429 atomic_set(&hdev->cmd_cnt, 1);
2430 if (!skb_queue_empty(&hdev->cmd_q))
2431 queue_work(hdev->workqueue, &hdev->cmd_work);
2435 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2437 struct hci_ev_role_change *ev = (void *) skb->data;
2438 struct hci_conn *conn;
2440 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2442 hci_dev_lock(hdev);
2444 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2445 if (conn) {
2446 if (!ev->status) {
2447 if (ev->role)
2448 conn->link_mode &= ~HCI_LM_MASTER;
2449 else
2450 conn->link_mode |= HCI_LM_MASTER;
2453 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2455 hci_role_switch_cfm(conn, ev->status, ev->role);
2458 hci_dev_unlock(hdev);
2461 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2463 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2464 int i;
2466 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2467 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2468 return;
2471 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2472 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2473 BT_DBG("%s bad parameters", hdev->name);
2474 return;
2477 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2479 for (i = 0; i < ev->num_hndl; i++) {
2480 struct hci_comp_pkts_info *info = &ev->handles[i];
2481 struct hci_conn *conn;
2482 __u16 handle, count;
2484 handle = __le16_to_cpu(info->handle);
2485 count = __le16_to_cpu(info->count);
2487 conn = hci_conn_hash_lookup_handle(hdev, handle);
2488 if (!conn)
2489 continue;
2491 conn->sent -= count;
2493 switch (conn->type) {
2494 case ACL_LINK:
2495 hdev->acl_cnt += count;
2496 if (hdev->acl_cnt > hdev->acl_pkts)
2497 hdev->acl_cnt = hdev->acl_pkts;
2498 break;
2500 case LE_LINK:
2501 if (hdev->le_pkts) {
2502 hdev->le_cnt += count;
2503 if (hdev->le_cnt > hdev->le_pkts)
2504 hdev->le_cnt = hdev->le_pkts;
2505 } else {
2506 hdev->acl_cnt += count;
2507 if (hdev->acl_cnt > hdev->acl_pkts)
2508 hdev->acl_cnt = hdev->acl_pkts;
2510 break;
2512 case SCO_LINK:
2513 hdev->sco_cnt += count;
2514 if (hdev->sco_cnt > hdev->sco_pkts)
2515 hdev->sco_cnt = hdev->sco_pkts;
2516 break;
2518 default:
2519 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2520 break;
2524 queue_work(hdev->workqueue, &hdev->tx_work);
2527 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2528 __u16 handle)
2530 struct hci_chan *chan;
2532 switch (hdev->dev_type) {
2533 case HCI_BREDR:
2534 return hci_conn_hash_lookup_handle(hdev, handle);
2535 case HCI_AMP:
2536 chan = hci_chan_lookup_handle(hdev, handle);
2537 if (chan)
2538 return chan->conn;
2539 break;
2540 default:
2541 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2542 break;
2545 return NULL;
2548 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2550 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2551 int i;
2553 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2554 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2555 return;
2558 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2559 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2560 BT_DBG("%s bad parameters", hdev->name);
2561 return;
2564 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2565 ev->num_hndl);
2567 for (i = 0; i < ev->num_hndl; i++) {
2568 struct hci_comp_blocks_info *info = &ev->handles[i];
2569 struct hci_conn *conn = NULL;
2570 __u16 handle, block_count;
2572 handle = __le16_to_cpu(info->handle);
2573 block_count = __le16_to_cpu(info->blocks);
2575 conn = __hci_conn_lookup_handle(hdev, handle);
2576 if (!conn)
2577 continue;
2579 conn->sent -= block_count;
2581 switch (conn->type) {
2582 case ACL_LINK:
2583 case AMP_LINK:
2584 hdev->block_cnt += block_count;
2585 if (hdev->block_cnt > hdev->num_blocks)
2586 hdev->block_cnt = hdev->num_blocks;
2587 break;
2589 default:
2590 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2591 break;
2595 queue_work(hdev->workqueue, &hdev->tx_work);
2598 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2600 struct hci_ev_mode_change *ev = (void *) skb->data;
2601 struct hci_conn *conn;
2603 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2605 hci_dev_lock(hdev);
2607 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2608 if (conn) {
2609 conn->mode = ev->mode;
2610 conn->interval = __le16_to_cpu(ev->interval);
2612 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2613 &conn->flags)) {
2614 if (conn->mode == HCI_CM_ACTIVE)
2615 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2616 else
2617 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2620 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2621 hci_sco_setup(conn, ev->status);
2624 hci_dev_unlock(hdev);
2627 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2629 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2630 struct hci_conn *conn;
2632 BT_DBG("%s", hdev->name);
2634 hci_dev_lock(hdev);
2636 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2637 if (!conn)
2638 goto unlock;
2640 if (conn->state == BT_CONNECTED) {
2641 hci_conn_hold(conn);
2642 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2643 hci_conn_drop(conn);
2646 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2647 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2648 sizeof(ev->bdaddr), &ev->bdaddr);
2649 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2650 u8 secure;
2652 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2653 secure = 1;
2654 else
2655 secure = 0;
2657 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2660 unlock:
2661 hci_dev_unlock(hdev);
2664 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2666 struct hci_ev_link_key_req *ev = (void *) skb->data;
2667 struct hci_cp_link_key_reply cp;
2668 struct hci_conn *conn;
2669 struct link_key *key;
2671 BT_DBG("%s", hdev->name);
2673 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2674 return;
2676 hci_dev_lock(hdev);
2678 key = hci_find_link_key(hdev, &ev->bdaddr);
2679 if (!key) {
2680 BT_DBG("%s link key not found for %pMR", hdev->name,
2681 &ev->bdaddr);
2682 goto not_found;
2685 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2686 &ev->bdaddr);
2688 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2689 key->type == HCI_LK_DEBUG_COMBINATION) {
2690 BT_DBG("%s ignoring debug key", hdev->name);
2691 goto not_found;
2694 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2695 if (conn) {
2696 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2697 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2698 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2699 goto not_found;
2702 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2703 conn->pending_sec_level == BT_SECURITY_HIGH) {
2704 BT_DBG("%s ignoring key unauthenticated for high security",
2705 hdev->name);
2706 goto not_found;
2709 conn->key_type = key->type;
2710 conn->pin_length = key->pin_len;
2713 bacpy(&cp.bdaddr, &ev->bdaddr);
2714 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2716 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2718 hci_dev_unlock(hdev);
2720 return;
2722 not_found:
2723 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2724 hci_dev_unlock(hdev);
2727 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2729 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2730 struct hci_conn *conn;
2731 u8 pin_len = 0;
2733 BT_DBG("%s", hdev->name);
2735 hci_dev_lock(hdev);
2737 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2738 if (conn) {
2739 hci_conn_hold(conn);
2740 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2741 pin_len = conn->pin_length;
2743 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2744 conn->key_type = ev->key_type;
2746 hci_conn_drop(conn);
2749 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2750 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2751 ev->key_type, pin_len);
2753 hci_dev_unlock(hdev);
2756 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2758 struct hci_ev_clock_offset *ev = (void *) skb->data;
2759 struct hci_conn *conn;
2761 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2763 hci_dev_lock(hdev);
2765 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2766 if (conn && !ev->status) {
2767 struct inquiry_entry *ie;
2769 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2770 if (ie) {
2771 ie->data.clock_offset = ev->clock_offset;
2772 ie->timestamp = jiffies;
2776 hci_dev_unlock(hdev);
2779 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2781 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2782 struct hci_conn *conn;
2784 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2786 hci_dev_lock(hdev);
2788 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2789 if (conn && !ev->status)
2790 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2792 hci_dev_unlock(hdev);
2795 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2797 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2798 struct inquiry_entry *ie;
2800 BT_DBG("%s", hdev->name);
2802 hci_dev_lock(hdev);
2804 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2805 if (ie) {
2806 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2807 ie->timestamp = jiffies;
2810 hci_dev_unlock(hdev);
2813 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2814 struct sk_buff *skb)
2816 struct inquiry_data data;
2817 int num_rsp = *((__u8 *) skb->data);
2818 bool name_known, ssp;
2820 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2822 if (!num_rsp)
2823 return;
2825 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2826 return;
2828 hci_dev_lock(hdev);
2830 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2831 struct inquiry_info_with_rssi_and_pscan_mode *info;
2832 info = (void *) (skb->data + 1);
2834 for (; num_rsp; num_rsp--, info++) {
2835 bacpy(&data.bdaddr, &info->bdaddr);
2836 data.pscan_rep_mode = info->pscan_rep_mode;
2837 data.pscan_period_mode = info->pscan_period_mode;
2838 data.pscan_mode = info->pscan_mode;
2839 memcpy(data.dev_class, info->dev_class, 3);
2840 data.clock_offset = info->clock_offset;
2841 data.rssi = info->rssi;
2842 data.ssp_mode = 0x00;
2844 name_known = hci_inquiry_cache_update(hdev, &data,
2845 false, &ssp);
2846 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2847 info->dev_class, info->rssi,
2848 !name_known, ssp, NULL, 0);
2850 } else {
2851 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2853 for (; num_rsp; num_rsp--, info++) {
2854 bacpy(&data.bdaddr, &info->bdaddr);
2855 data.pscan_rep_mode = info->pscan_rep_mode;
2856 data.pscan_period_mode = info->pscan_period_mode;
2857 data.pscan_mode = 0x00;
2858 memcpy(data.dev_class, info->dev_class, 3);
2859 data.clock_offset = info->clock_offset;
2860 data.rssi = info->rssi;
2861 data.ssp_mode = 0x00;
2862 name_known = hci_inquiry_cache_update(hdev, &data,
2863 false, &ssp);
2864 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2865 info->dev_class, info->rssi,
2866 !name_known, ssp, NULL, 0);
2870 hci_dev_unlock(hdev);
2873 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2874 struct sk_buff *skb)
2876 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2877 struct hci_conn *conn;
2879 BT_DBG("%s", hdev->name);
2881 hci_dev_lock(hdev);
2883 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2884 if (!conn)
2885 goto unlock;
2887 if (ev->page < HCI_MAX_PAGES)
2888 memcpy(conn->features[ev->page], ev->features, 8);
2890 if (!ev->status && ev->page == 0x01) {
2891 struct inquiry_entry *ie;
2893 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2894 if (ie)
2895 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2897 if (ev->features[0] & LMP_HOST_SSP) {
2898 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2899 } else {
2900 /* It is mandatory by the Bluetooth specification that
2901 * Extended Inquiry Results are only used when Secure
2902 * Simple Pairing is enabled, but some devices violate
2903 * this.
2905 * To make these devices work, the internal SSP
2906 * enabled flag needs to be cleared if the remote host
2907 * features do not indicate SSP support */
2908 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2912 if (conn->state != BT_CONFIG)
2913 goto unlock;
2915 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2916 struct hci_cp_remote_name_req cp;
2917 memset(&cp, 0, sizeof(cp));
2918 bacpy(&cp.bdaddr, &conn->dst);
2919 cp.pscan_rep_mode = 0x02;
2920 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2921 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2922 mgmt_device_connected(hdev, &conn->dst, conn->type,
2923 conn->dst_type, 0, NULL, 0,
2924 conn->dev_class);
2926 if (!hci_outgoing_auth_needed(hdev, conn)) {
2927 conn->state = BT_CONNECTED;
2928 hci_proto_connect_cfm(conn, ev->status);
2929 hci_conn_drop(conn);
2932 unlock:
2933 hci_dev_unlock(hdev);
2936 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2937 struct sk_buff *skb)
2939 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2940 struct hci_conn *conn;
2942 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2944 hci_dev_lock(hdev);
2946 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2947 if (!conn) {
2948 if (ev->link_type == ESCO_LINK)
2949 goto unlock;
2951 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2952 if (!conn)
2953 goto unlock;
2955 conn->type = SCO_LINK;
2958 switch (ev->status) {
2959 case 0x00:
2960 conn->handle = __le16_to_cpu(ev->handle);
2961 conn->state = BT_CONNECTED;
2963 hci_conn_add_sysfs(conn);
2964 break;
2966 case 0x11: /* Unsupported Feature or Parameter Value */
2967 case 0x1c: /* SCO interval rejected */
2968 case 0x1a: /* Unsupported Remote Feature */
2969 case 0x1f: /* Unspecified error */
2970 if (conn->out && conn->attempt < 2) {
2971 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2972 (hdev->esco_type & EDR_ESCO_MASK);
2973 hci_setup_sync(conn, conn->link->handle);
2974 goto unlock;
2976 /* fall through */
2978 default:
2979 conn->state = BT_CLOSED;
2980 break;
2983 hci_proto_connect_cfm(conn, ev->status);
2984 if (ev->status)
2985 hci_conn_del(conn);
2987 unlock:
2988 hci_dev_unlock(hdev);
2991 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2992 struct sk_buff *skb)
2994 struct inquiry_data data;
2995 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2996 int num_rsp = *((__u8 *) skb->data);
2997 size_t eir_len;
2999 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3001 if (!num_rsp)
3002 return;
3004 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3005 return;
3007 hci_dev_lock(hdev);
3009 for (; num_rsp; num_rsp--, info++) {
3010 bool name_known, ssp;
3012 bacpy(&data.bdaddr, &info->bdaddr);
3013 data.pscan_rep_mode = info->pscan_rep_mode;
3014 data.pscan_period_mode = info->pscan_period_mode;
3015 data.pscan_mode = 0x00;
3016 memcpy(data.dev_class, info->dev_class, 3);
3017 data.clock_offset = info->clock_offset;
3018 data.rssi = info->rssi;
3019 data.ssp_mode = 0x01;
3021 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3022 name_known = eir_has_data_type(info->data,
3023 sizeof(info->data),
3024 EIR_NAME_COMPLETE);
3025 else
3026 name_known = true;
3028 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3029 &ssp);
3030 eir_len = eir_get_length(info->data, sizeof(info->data));
3031 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3032 info->dev_class, info->rssi, !name_known,
3033 ssp, info->data, eir_len);
3036 hci_dev_unlock(hdev);
3039 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3040 struct sk_buff *skb)
3042 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3043 struct hci_conn *conn;
3045 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3046 __le16_to_cpu(ev->handle));
3048 hci_dev_lock(hdev);
3050 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3051 if (!conn)
3052 goto unlock;
3054 if (!ev->status)
3055 conn->sec_level = conn->pending_sec_level;
3057 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3059 if (ev->status && conn->state == BT_CONNECTED) {
3060 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3061 hci_conn_drop(conn);
3062 goto unlock;
3065 if (conn->state == BT_CONFIG) {
3066 if (!ev->status)
3067 conn->state = BT_CONNECTED;
3069 hci_proto_connect_cfm(conn, ev->status);
3070 hci_conn_drop(conn);
3071 } else {
3072 hci_auth_cfm(conn, ev->status);
3074 hci_conn_hold(conn);
3075 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3076 hci_conn_drop(conn);
3079 unlock:
3080 hci_dev_unlock(hdev);
3083 static u8 hci_get_auth_req(struct hci_conn *conn)
3085 /* If remote requests dedicated bonding follow that lead */
3086 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3087 /* If both remote and local IO capabilities allow MITM
3088 * protection then require it, otherwise don't */
3089 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3090 return 0x02;
3091 else
3092 return 0x03;
3095 /* If remote requests no-bonding follow that lead */
3096 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3097 return conn->remote_auth | (conn->auth_type & 0x01);
3099 return conn->auth_type;
3102 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3104 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3105 struct hci_conn *conn;
3107 BT_DBG("%s", hdev->name);
3109 hci_dev_lock(hdev);
3111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3112 if (!conn)
3113 goto unlock;
3115 hci_conn_hold(conn);
3117 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3118 goto unlock;
3120 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3121 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3122 struct hci_cp_io_capability_reply cp;
3124 bacpy(&cp.bdaddr, &ev->bdaddr);
3125 /* Change the IO capability from KeyboardDisplay
3126 * to DisplayYesNo as it is not supported by BT spec. */
3127 cp.capability = (conn->io_capability == 0x04) ?
3128 0x01 : conn->io_capability;
3129 conn->auth_type = hci_get_auth_req(conn);
3130 cp.authentication = conn->auth_type;
3132 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3133 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3134 cp.oob_data = 0x01;
3135 else
3136 cp.oob_data = 0x00;
3138 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3139 sizeof(cp), &cp);
3140 } else {
3141 struct hci_cp_io_capability_neg_reply cp;
3143 bacpy(&cp.bdaddr, &ev->bdaddr);
3144 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3146 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3147 sizeof(cp), &cp);
3150 unlock:
3151 hci_dev_unlock(hdev);
3154 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3156 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3157 struct hci_conn *conn;
3159 BT_DBG("%s", hdev->name);
3161 hci_dev_lock(hdev);
3163 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3164 if (!conn)
3165 goto unlock;
3167 conn->remote_cap = ev->capability;
3168 conn->remote_auth = ev->authentication;
3169 if (ev->oob_data)
3170 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3172 unlock:
3173 hci_dev_unlock(hdev);
3176 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3177 struct sk_buff *skb)
3179 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3180 int loc_mitm, rem_mitm, confirm_hint = 0;
3181 struct hci_conn *conn;
3183 BT_DBG("%s", hdev->name);
3185 hci_dev_lock(hdev);
3187 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3188 goto unlock;
3190 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3191 if (!conn)
3192 goto unlock;
3194 loc_mitm = (conn->auth_type & 0x01);
3195 rem_mitm = (conn->remote_auth & 0x01);
3197 /* If we require MITM but the remote device can't provide that
3198 * (it has NoInputNoOutput) then reject the confirmation
3199 * request. The only exception is when we're dedicated bonding
3200 * initiators (connect_cfm_cb set) since then we always have the MITM
3201 * bit set. */
3202 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3203 BT_DBG("Rejecting request: remote device can't provide MITM");
3204 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3205 sizeof(ev->bdaddr), &ev->bdaddr);
3206 goto unlock;
3209 /* If no side requires MITM protection; auto-accept */
3210 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3211 (!rem_mitm || conn->io_capability == 0x03)) {
3213 /* If we're not the initiators request authorization to
3214 * proceed from user space (mgmt_user_confirm with
3215 * confirm_hint set to 1). */
3216 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3217 BT_DBG("Confirming auto-accept as acceptor");
3218 confirm_hint = 1;
3219 goto confirm;
3222 BT_DBG("Auto-accept of user confirmation with %ums delay",
3223 hdev->auto_accept_delay);
3225 if (hdev->auto_accept_delay > 0) {
3226 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3227 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3228 goto unlock;
3231 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3232 sizeof(ev->bdaddr), &ev->bdaddr);
3233 goto unlock;
3236 confirm:
3237 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3238 confirm_hint);
3240 unlock:
3241 hci_dev_unlock(hdev);
3244 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3245 struct sk_buff *skb)
3247 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3249 BT_DBG("%s", hdev->name);
3251 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3252 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3255 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3256 struct sk_buff *skb)
3258 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3259 struct hci_conn *conn;
3261 BT_DBG("%s", hdev->name);
3263 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3264 if (!conn)
3265 return;
3267 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3268 conn->passkey_entered = 0;
3270 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3271 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3272 conn->dst_type, conn->passkey_notify,
3273 conn->passkey_entered);
3276 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3278 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3279 struct hci_conn *conn;
3281 BT_DBG("%s", hdev->name);
3283 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3284 if (!conn)
3285 return;
3287 switch (ev->type) {
3288 case HCI_KEYPRESS_STARTED:
3289 conn->passkey_entered = 0;
3290 return;
3292 case HCI_KEYPRESS_ENTERED:
3293 conn->passkey_entered++;
3294 break;
3296 case HCI_KEYPRESS_ERASED:
3297 conn->passkey_entered--;
3298 break;
3300 case HCI_KEYPRESS_CLEARED:
3301 conn->passkey_entered = 0;
3302 break;
3304 case HCI_KEYPRESS_COMPLETED:
3305 return;
3308 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3309 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3310 conn->dst_type, conn->passkey_notify,
3311 conn->passkey_entered);
3314 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3315 struct sk_buff *skb)
3317 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3318 struct hci_conn *conn;
3320 BT_DBG("%s", hdev->name);
3322 hci_dev_lock(hdev);
3324 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3325 if (!conn)
3326 goto unlock;
3328 /* To avoid duplicate auth_failed events to user space we check
3329 * the HCI_CONN_AUTH_PEND flag which will be set if we
3330 * initiated the authentication. A traditional auth_complete
3331 * event gets always produced as initiator and is also mapped to
3332 * the mgmt_auth_failed event */
3333 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3334 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3335 ev->status);
3337 hci_conn_drop(conn);
3339 unlock:
3340 hci_dev_unlock(hdev);
3343 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3344 struct sk_buff *skb)
3346 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3347 struct inquiry_entry *ie;
3348 struct hci_conn *conn;
3350 BT_DBG("%s", hdev->name);
3352 hci_dev_lock(hdev);
3354 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3355 if (conn)
3356 memcpy(conn->features[1], ev->features, 8);
3358 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3359 if (ie)
3360 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3362 hci_dev_unlock(hdev);
3365 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3366 struct sk_buff *skb)
3368 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3369 struct oob_data *data;
3371 BT_DBG("%s", hdev->name);
3373 hci_dev_lock(hdev);
3375 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3376 goto unlock;
3378 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3379 if (data) {
3380 struct hci_cp_remote_oob_data_reply cp;
3382 bacpy(&cp.bdaddr, &ev->bdaddr);
3383 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3384 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3386 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3387 &cp);
3388 } else {
3389 struct hci_cp_remote_oob_data_neg_reply cp;
3391 bacpy(&cp.bdaddr, &ev->bdaddr);
3392 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3393 &cp);
3396 unlock:
3397 hci_dev_unlock(hdev);
3400 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3401 struct sk_buff *skb)
3403 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3404 struct hci_conn *hcon, *bredr_hcon;
3406 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3407 ev->status);
3409 hci_dev_lock(hdev);
3411 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3412 if (!hcon) {
3413 hci_dev_unlock(hdev);
3414 return;
3417 if (ev->status) {
3418 hci_conn_del(hcon);
3419 hci_dev_unlock(hdev);
3420 return;
3423 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3425 hcon->state = BT_CONNECTED;
3426 bacpy(&hcon->dst, &bredr_hcon->dst);
3428 hci_conn_hold(hcon);
3429 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3430 hci_conn_drop(hcon);
3432 hci_conn_add_sysfs(hcon);
3434 amp_physical_cfm(bredr_hcon, hcon);
3436 hci_dev_unlock(hdev);
3439 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3441 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3442 struct hci_conn *hcon;
3443 struct hci_chan *hchan;
3444 struct amp_mgr *mgr;
3446 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3447 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3448 ev->status);
3450 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3451 if (!hcon)
3452 return;
3454 /* Create AMP hchan */
3455 hchan = hci_chan_create(hcon);
3456 if (!hchan)
3457 return;
3459 hchan->handle = le16_to_cpu(ev->handle);
3461 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3463 mgr = hcon->amp_mgr;
3464 if (mgr && mgr->bredr_chan) {
3465 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3467 l2cap_chan_lock(bredr_chan);
3469 bredr_chan->conn->mtu = hdev->block_mtu;
3470 l2cap_logical_cfm(bredr_chan, hchan, 0);
3471 hci_conn_hold(hcon);
3473 l2cap_chan_unlock(bredr_chan);
3477 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3478 struct sk_buff *skb)
3480 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3481 struct hci_chan *hchan;
3483 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3484 le16_to_cpu(ev->handle), ev->status);
3486 if (ev->status)
3487 return;
3489 hci_dev_lock(hdev);
3491 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3492 if (!hchan)
3493 goto unlock;
3495 amp_destroy_logical_link(hchan, ev->reason);
3497 unlock:
3498 hci_dev_unlock(hdev);
3501 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3502 struct sk_buff *skb)
3504 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3505 struct hci_conn *hcon;
3507 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3509 if (ev->status)
3510 return;
3512 hci_dev_lock(hdev);
3514 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3515 if (hcon) {
3516 hcon->state = BT_CLOSED;
3517 hci_conn_del(hcon);
3520 hci_dev_unlock(hdev);
3523 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3525 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3526 struct hci_conn *conn;
3528 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3530 hci_dev_lock(hdev);
3532 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3533 if (!conn) {
3534 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3535 if (!conn) {
3536 BT_ERR("No memory for new connection");
3537 goto unlock;
3540 conn->dst_type = ev->bdaddr_type;
3542 if (ev->role == LE_CONN_ROLE_MASTER) {
3543 conn->out = true;
3544 conn->link_mode |= HCI_LM_MASTER;
3548 if (ev->status) {
3549 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3550 conn->dst_type, ev->status);
3551 hci_proto_connect_cfm(conn, ev->status);
3552 conn->state = BT_CLOSED;
3553 hci_conn_del(conn);
3554 goto unlock;
3557 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3558 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3559 conn->dst_type, 0, NULL, 0, NULL);
3561 conn->sec_level = BT_SECURITY_LOW;
3562 conn->handle = __le16_to_cpu(ev->handle);
3563 conn->state = BT_CONNECTED;
3565 hci_conn_add_sysfs(conn);
3567 hci_proto_connect_cfm(conn, ev->status);
3569 unlock:
3570 hci_dev_unlock(hdev);
3573 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3575 u8 num_reports = skb->data[0];
3576 void *ptr = &skb->data[1];
3577 s8 rssi;
3579 while (num_reports--) {
3580 struct hci_ev_le_advertising_info *ev = ptr;
3582 rssi = ev->data[ev->length];
3583 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3584 NULL, rssi, 0, 1, ev->data, ev->length);
3586 ptr += sizeof(*ev) + ev->length + 1;
3590 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3592 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3593 struct hci_cp_le_ltk_reply cp;
3594 struct hci_cp_le_ltk_neg_reply neg;
3595 struct hci_conn *conn;
3596 struct smp_ltk *ltk;
3598 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3600 hci_dev_lock(hdev);
3602 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3603 if (conn == NULL)
3604 goto not_found;
3606 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3607 if (ltk == NULL)
3608 goto not_found;
3610 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3611 cp.handle = cpu_to_le16(conn->handle);
3613 if (ltk->authenticated)
3614 conn->sec_level = BT_SECURITY_HIGH;
3616 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3618 if (ltk->type & HCI_SMP_STK) {
3619 list_del(&ltk->list);
3620 kfree(ltk);
3623 hci_dev_unlock(hdev);
3625 return;
3627 not_found:
3628 neg.handle = ev->handle;
3629 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3630 hci_dev_unlock(hdev);
3633 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3635 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3637 skb_pull(skb, sizeof(*le_ev));
3639 switch (le_ev->subevent) {
3640 case HCI_EV_LE_CONN_COMPLETE:
3641 hci_le_conn_complete_evt(hdev, skb);
3642 break;
3644 case HCI_EV_LE_ADVERTISING_REPORT:
3645 hci_le_adv_report_evt(hdev, skb);
3646 break;
3648 case HCI_EV_LE_LTK_REQ:
3649 hci_le_ltk_request_evt(hdev, skb);
3650 break;
3652 default:
3653 break;
3657 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3659 struct hci_ev_channel_selected *ev = (void *) skb->data;
3660 struct hci_conn *hcon;
3662 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3664 skb_pull(skb, sizeof(*ev));
3666 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3667 if (!hcon)
3668 return;
3670 amp_read_loc_assoc_final_data(hdev, hcon);
3673 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3675 struct hci_event_hdr *hdr = (void *) skb->data;
3676 __u8 event = hdr->evt;
3678 hci_dev_lock(hdev);
3680 /* Received events are (currently) only needed when a request is
3681 * ongoing so avoid unnecessary memory allocation.
3683 if (hdev->req_status == HCI_REQ_PEND) {
3684 kfree_skb(hdev->recv_evt);
3685 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3688 hci_dev_unlock(hdev);
3690 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3692 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3693 struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data;
3694 u16 opcode = __le16_to_cpu(hdr->opcode);
3696 hci_req_cmd_complete(hdev, opcode, 0);
3699 switch (event) {
3700 case HCI_EV_INQUIRY_COMPLETE:
3701 hci_inquiry_complete_evt(hdev, skb);
3702 break;
3704 case HCI_EV_INQUIRY_RESULT:
3705 hci_inquiry_result_evt(hdev, skb);
3706 break;
3708 case HCI_EV_CONN_COMPLETE:
3709 hci_conn_complete_evt(hdev, skb);
3710 break;
3712 case HCI_EV_CONN_REQUEST:
3713 hci_conn_request_evt(hdev, skb);
3714 break;
3716 case HCI_EV_DISCONN_COMPLETE:
3717 hci_disconn_complete_evt(hdev, skb);
3718 break;
3720 case HCI_EV_AUTH_COMPLETE:
3721 hci_auth_complete_evt(hdev, skb);
3722 break;
3724 case HCI_EV_REMOTE_NAME:
3725 hci_remote_name_evt(hdev, skb);
3726 break;
3728 case HCI_EV_ENCRYPT_CHANGE:
3729 hci_encrypt_change_evt(hdev, skb);
3730 break;
3732 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3733 hci_change_link_key_complete_evt(hdev, skb);
3734 break;
3736 case HCI_EV_REMOTE_FEATURES:
3737 hci_remote_features_evt(hdev, skb);
3738 break;
3740 case HCI_EV_CMD_COMPLETE:
3741 hci_cmd_complete_evt(hdev, skb);
3742 break;
3744 case HCI_EV_CMD_STATUS:
3745 hci_cmd_status_evt(hdev, skb);
3746 break;
3748 case HCI_EV_ROLE_CHANGE:
3749 hci_role_change_evt(hdev, skb);
3750 break;
3752 case HCI_EV_NUM_COMP_PKTS:
3753 hci_num_comp_pkts_evt(hdev, skb);
3754 break;
3756 case HCI_EV_MODE_CHANGE:
3757 hci_mode_change_evt(hdev, skb);
3758 break;
3760 case HCI_EV_PIN_CODE_REQ:
3761 hci_pin_code_request_evt(hdev, skb);
3762 break;
3764 case HCI_EV_LINK_KEY_REQ:
3765 hci_link_key_request_evt(hdev, skb);
3766 break;
3768 case HCI_EV_LINK_KEY_NOTIFY:
3769 hci_link_key_notify_evt(hdev, skb);
3770 break;
3772 case HCI_EV_CLOCK_OFFSET:
3773 hci_clock_offset_evt(hdev, skb);
3774 break;
3776 case HCI_EV_PKT_TYPE_CHANGE:
3777 hci_pkt_type_change_evt(hdev, skb);
3778 break;
3780 case HCI_EV_PSCAN_REP_MODE:
3781 hci_pscan_rep_mode_evt(hdev, skb);
3782 break;
3784 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3785 hci_inquiry_result_with_rssi_evt(hdev, skb);
3786 break;
3788 case HCI_EV_REMOTE_EXT_FEATURES:
3789 hci_remote_ext_features_evt(hdev, skb);
3790 break;
3792 case HCI_EV_SYNC_CONN_COMPLETE:
3793 hci_sync_conn_complete_evt(hdev, skb);
3794 break;
3796 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3797 hci_extended_inquiry_result_evt(hdev, skb);
3798 break;
3800 case HCI_EV_KEY_REFRESH_COMPLETE:
3801 hci_key_refresh_complete_evt(hdev, skb);
3802 break;
3804 case HCI_EV_IO_CAPA_REQUEST:
3805 hci_io_capa_request_evt(hdev, skb);
3806 break;
3808 case HCI_EV_IO_CAPA_REPLY:
3809 hci_io_capa_reply_evt(hdev, skb);
3810 break;
3812 case HCI_EV_USER_CONFIRM_REQUEST:
3813 hci_user_confirm_request_evt(hdev, skb);
3814 break;
3816 case HCI_EV_USER_PASSKEY_REQUEST:
3817 hci_user_passkey_request_evt(hdev, skb);
3818 break;
3820 case HCI_EV_USER_PASSKEY_NOTIFY:
3821 hci_user_passkey_notify_evt(hdev, skb);
3822 break;
3824 case HCI_EV_KEYPRESS_NOTIFY:
3825 hci_keypress_notify_evt(hdev, skb);
3826 break;
3828 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3829 hci_simple_pair_complete_evt(hdev, skb);
3830 break;
3832 case HCI_EV_REMOTE_HOST_FEATURES:
3833 hci_remote_host_features_evt(hdev, skb);
3834 break;
3836 case HCI_EV_LE_META:
3837 hci_le_meta_evt(hdev, skb);
3838 break;
3840 case HCI_EV_CHANNEL_SELECTED:
3841 hci_chan_selected_evt(hdev, skb);
3842 break;
3844 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3845 hci_remote_oob_data_request_evt(hdev, skb);
3846 break;
3848 case HCI_EV_PHY_LINK_COMPLETE:
3849 hci_phy_link_complete_evt(hdev, skb);
3850 break;
3852 case HCI_EV_LOGICAL_LINK_COMPLETE:
3853 hci_loglink_complete_evt(hdev, skb);
3854 break;
3856 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3857 hci_disconn_loglink_complete_evt(hdev, skb);
3858 break;
3860 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3861 hci_disconn_phylink_complete_evt(hdev, skb);
3862 break;
3864 case HCI_EV_NUM_COMP_BLOCKS:
3865 hci_num_comp_blocks_evt(hdev, skb);
3866 break;
3868 default:
3869 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3870 break;
3873 kfree_skb(skb);
3874 hdev->stat.evt_rx++;