[media] v4l: vb2: Set data_offset to 0 for single-plane output buffers
[linux-2.6/btrfs-unstable.git] / net / bluetooth / hci_event.c
blob2022b43c7353ee98d7546d6c9e0ef67c43811d3f
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
34 /* Handle HCI Event packets */
36 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 __u8 status = *((__u8 *) skb->data);
40 BT_DBG("%s status 0x%2.2x", hdev->name, status);
42 if (status) {
43 hci_dev_lock(hdev);
44 mgmt_stop_discovery_failed(hdev, status);
45 hci_dev_unlock(hdev);
46 return;
49 clear_bit(HCI_INQUIRY, &hdev->flags);
51 hci_dev_lock(hdev);
52 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
53 hci_dev_unlock(hdev);
55 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
57 hci_conn_check_pending(hdev);
60 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 __u8 status = *((__u8 *) skb->data);
64 BT_DBG("%s status 0x%2.2x", hdev->name, status);
66 if (status)
67 return;
69 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
72 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 __u8 status = *((__u8 *) skb->data);
76 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78 if (status)
79 return;
81 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83 hci_conn_check_pending(hdev);
86 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
87 struct sk_buff *skb)
89 BT_DBG("%s", hdev->name);
92 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 struct hci_rp_role_discovery *rp = (void *) skb->data;
95 struct hci_conn *conn;
97 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99 if (rp->status)
100 return;
102 hci_dev_lock(hdev);
104 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
105 if (conn) {
106 if (rp->role)
107 conn->link_mode &= ~HCI_LM_MASTER;
108 else
109 conn->link_mode |= HCI_LM_MASTER;
112 hci_dev_unlock(hdev);
115 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 struct hci_rp_read_link_policy *rp = (void *) skb->data;
118 struct hci_conn *conn;
120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122 if (rp->status)
123 return;
125 hci_dev_lock(hdev);
127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
128 if (conn)
129 conn->link_policy = __le16_to_cpu(rp->policy);
131 hci_dev_unlock(hdev);
134 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 struct hci_rp_write_link_policy *rp = (void *) skb->data;
137 struct hci_conn *conn;
138 void *sent;
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142 if (rp->status)
143 return;
145 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
146 if (!sent)
147 return;
149 hci_dev_lock(hdev);
151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 if (conn)
153 conn->link_policy = get_unaligned_le16(sent + 2);
155 hci_dev_unlock(hdev);
158 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
159 struct sk_buff *skb)
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165 if (rp->status)
166 return;
168 hdev->link_policy = __le16_to_cpu(rp->policy);
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
172 struct sk_buff *skb)
174 __u8 status = *((__u8 *) skb->data);
175 void *sent;
177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
179 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
180 if (!sent)
181 return;
183 if (!status)
184 hdev->link_policy = get_unaligned_le16(sent);
186 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
189 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
191 __u8 status = *((__u8 *) skb->data);
193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
195 clear_bit(HCI_RESET, &hdev->flags);
197 hci_req_complete(hdev, HCI_OP_RESET, status);
199 /* Reset all non-persistent flags */
200 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
201 BIT(HCI_PERIODIC_INQ));
203 hdev->discovery.state = DISCOVERY_STOPPED;
206 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208 __u8 status = *((__u8 *) skb->data);
209 void *sent;
211 BT_DBG("%s status 0x%2.2x", hdev->name, status);
213 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
214 if (!sent)
215 return;
217 hci_dev_lock(hdev);
219 if (test_bit(HCI_MGMT, &hdev->dev_flags))
220 mgmt_set_local_name_complete(hdev, sent, status);
221 else if (!status)
222 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224 hci_dev_unlock(hdev);
226 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
229 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
231 struct hci_rp_read_local_name *rp = (void *) skb->data;
233 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
235 if (rp->status)
236 return;
238 if (test_bit(HCI_SETUP, &hdev->dev_flags))
239 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
242 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
244 __u8 status = *((__u8 *) skb->data);
245 void *sent;
247 BT_DBG("%s status 0x%2.2x", hdev->name, status);
249 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
250 if (!sent)
251 return;
253 if (!status) {
254 __u8 param = *((__u8 *) sent);
256 if (param == AUTH_ENABLED)
257 set_bit(HCI_AUTH, &hdev->flags);
258 else
259 clear_bit(HCI_AUTH, &hdev->flags);
262 if (test_bit(HCI_MGMT, &hdev->dev_flags))
263 mgmt_auth_enable_complete(hdev, status);
265 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
268 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
270 __u8 status = *((__u8 *) skb->data);
271 void *sent;
273 BT_DBG("%s status 0x%2.2x", hdev->name, status);
275 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
276 if (!sent)
277 return;
279 if (!status) {
280 __u8 param = *((__u8 *) sent);
282 if (param)
283 set_bit(HCI_ENCRYPT, &hdev->flags);
284 else
285 clear_bit(HCI_ENCRYPT, &hdev->flags);
288 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
291 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
293 __u8 param, status = *((__u8 *) skb->data);
294 int old_pscan, old_iscan;
295 void *sent;
297 BT_DBG("%s status 0x%2.2x", hdev->name, status);
299 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
300 if (!sent)
301 return;
303 param = *((__u8 *) sent);
305 hci_dev_lock(hdev);
307 if (status) {
308 mgmt_write_scan_failed(hdev, param, status);
309 hdev->discov_timeout = 0;
310 goto done;
313 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
314 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
316 if (param & SCAN_INQUIRY) {
317 set_bit(HCI_ISCAN, &hdev->flags);
318 if (!old_iscan)
319 mgmt_discoverable(hdev, 1);
320 if (hdev->discov_timeout > 0) {
321 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
322 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
323 to);
325 } else if (old_iscan)
326 mgmt_discoverable(hdev, 0);
328 if (param & SCAN_PAGE) {
329 set_bit(HCI_PSCAN, &hdev->flags);
330 if (!old_pscan)
331 mgmt_connectable(hdev, 1);
332 } else if (old_pscan)
333 mgmt_connectable(hdev, 0);
335 done:
336 hci_dev_unlock(hdev);
337 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
340 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
342 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
344 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
346 if (rp->status)
347 return;
349 memcpy(hdev->dev_class, rp->dev_class, 3);
351 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
352 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
355 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
357 __u8 status = *((__u8 *) skb->data);
358 void *sent;
360 BT_DBG("%s status 0x%2.2x", hdev->name, status);
362 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
363 if (!sent)
364 return;
366 hci_dev_lock(hdev);
368 if (status == 0)
369 memcpy(hdev->dev_class, sent, 3);
371 if (test_bit(HCI_MGMT, &hdev->dev_flags))
372 mgmt_set_class_of_dev_complete(hdev, sent, status);
374 hci_dev_unlock(hdev);
377 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
379 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
380 __u16 setting;
382 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
384 if (rp->status)
385 return;
387 setting = __le16_to_cpu(rp->voice_setting);
389 if (hdev->voice_setting == setting)
390 return;
392 hdev->voice_setting = setting;
394 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
396 if (hdev->notify)
397 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
400 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
401 struct sk_buff *skb)
403 __u8 status = *((__u8 *) skb->data);
404 __u16 setting;
405 void *sent;
407 BT_DBG("%s status 0x%2.2x", hdev->name, status);
409 if (status)
410 return;
412 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
413 if (!sent)
414 return;
416 setting = get_unaligned_le16(sent);
418 if (hdev->voice_setting == setting)
419 return;
421 hdev->voice_setting = setting;
423 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
425 if (hdev->notify)
426 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
429 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
431 __u8 status = *((__u8 *) skb->data);
433 BT_DBG("%s status 0x%2.2x", hdev->name, status);
435 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
438 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
440 __u8 status = *((__u8 *) skb->data);
441 void *sent;
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
446 if (!sent)
447 return;
449 if (test_bit(HCI_MGMT, &hdev->dev_flags))
450 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
451 else if (!status) {
452 if (*((u8 *) sent))
453 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
454 else
455 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
459 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
461 if (hdev->features[6] & LMP_EXT_INQ)
462 return 2;
464 if (hdev->features[3] & LMP_RSSI_INQ)
465 return 1;
467 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
468 hdev->lmp_subver == 0x0757)
469 return 1;
471 if (hdev->manufacturer == 15) {
472 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
473 return 1;
474 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
475 return 1;
476 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
477 return 1;
480 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
481 hdev->lmp_subver == 0x1805)
482 return 1;
484 return 0;
487 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
489 u8 mode;
491 mode = hci_get_inquiry_mode(hdev);
493 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
496 static void hci_setup_event_mask(struct hci_dev *hdev)
498 /* The second byte is 0xff instead of 0x9f (two reserved bits
499 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
500 * command otherwise */
501 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
503 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
504 * any event mask for pre 1.2 devices */
505 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
506 return;
508 events[4] |= 0x01; /* Flow Specification Complete */
509 events[4] |= 0x02; /* Inquiry Result with RSSI */
510 events[4] |= 0x04; /* Read Remote Extended Features Complete */
511 events[5] |= 0x08; /* Synchronous Connection Complete */
512 events[5] |= 0x10; /* Synchronous Connection Changed */
514 if (hdev->features[3] & LMP_RSSI_INQ)
515 events[4] |= 0x02; /* Inquiry Result with RSSI */
517 if (lmp_sniffsubr_capable(hdev))
518 events[5] |= 0x20; /* Sniff Subrating */
520 if (hdev->features[5] & LMP_PAUSE_ENC)
521 events[5] |= 0x80; /* Encryption Key Refresh Complete */
523 if (hdev->features[6] & LMP_EXT_INQ)
524 events[5] |= 0x40; /* Extended Inquiry Result */
526 if (lmp_no_flush_capable(hdev))
527 events[7] |= 0x01; /* Enhanced Flush Complete */
529 if (hdev->features[7] & LMP_LSTO)
530 events[6] |= 0x80; /* Link Supervision Timeout Changed */
532 if (lmp_ssp_capable(hdev)) {
533 events[6] |= 0x01; /* IO Capability Request */
534 events[6] |= 0x02; /* IO Capability Response */
535 events[6] |= 0x04; /* User Confirmation Request */
536 events[6] |= 0x08; /* User Passkey Request */
537 events[6] |= 0x10; /* Remote OOB Data Request */
538 events[6] |= 0x20; /* Simple Pairing Complete */
539 events[7] |= 0x04; /* User Passkey Notification */
540 events[7] |= 0x08; /* Keypress Notification */
541 events[7] |= 0x10; /* Remote Host Supported
542 * Features Notification */
545 if (lmp_le_capable(hdev))
546 events[7] |= 0x20; /* LE Meta-Event */
548 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
551 static void hci_setup(struct hci_dev *hdev)
553 if (hdev->dev_type != HCI_BREDR)
554 return;
556 hci_setup_event_mask(hdev);
558 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
559 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
561 if (lmp_ssp_capable(hdev)) {
562 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
563 u8 mode = 0x01;
564 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
565 sizeof(mode), &mode);
566 } else {
567 struct hci_cp_write_eir cp;
569 memset(hdev->eir, 0, sizeof(hdev->eir));
570 memset(&cp, 0, sizeof(cp));
572 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
576 if (hdev->features[3] & LMP_RSSI_INQ)
577 hci_setup_inquiry_mode(hdev);
579 if (hdev->features[7] & LMP_INQ_TX_PWR)
580 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
582 if (hdev->features[7] & LMP_EXTFEATURES) {
583 struct hci_cp_read_local_ext_features cp;
585 cp.page = 0x01;
586 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
587 &cp);
590 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
591 u8 enable = 1;
592 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
593 &enable);
597 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
599 struct hci_rp_read_local_version *rp = (void *) skb->data;
601 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
603 if (rp->status)
604 goto done;
606 hdev->hci_ver = rp->hci_ver;
607 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
608 hdev->lmp_ver = rp->lmp_ver;
609 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
610 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
612 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
613 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
615 if (test_bit(HCI_INIT, &hdev->flags))
616 hci_setup(hdev);
618 done:
619 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
622 static void hci_setup_link_policy(struct hci_dev *hdev)
624 struct hci_cp_write_def_link_policy cp;
625 u16 link_policy = 0;
627 if (lmp_rswitch_capable(hdev))
628 link_policy |= HCI_LP_RSWITCH;
629 if (hdev->features[0] & LMP_HOLD)
630 link_policy |= HCI_LP_HOLD;
631 if (lmp_sniff_capable(hdev))
632 link_policy |= HCI_LP_SNIFF;
633 if (hdev->features[1] & LMP_PARK)
634 link_policy |= HCI_LP_PARK;
636 cp.policy = cpu_to_le16(link_policy);
637 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
640 static void hci_cc_read_local_commands(struct hci_dev *hdev,
641 struct sk_buff *skb)
643 struct hci_rp_read_local_commands *rp = (void *) skb->data;
645 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
647 if (rp->status)
648 goto done;
650 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
652 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
653 hci_setup_link_policy(hdev);
655 done:
656 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
659 static void hci_cc_read_local_features(struct hci_dev *hdev,
660 struct sk_buff *skb)
662 struct hci_rp_read_local_features *rp = (void *) skb->data;
664 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
666 if (rp->status)
667 return;
669 memcpy(hdev->features, rp->features, 8);
671 /* Adjust default settings according to features
672 * supported by device. */
674 if (hdev->features[0] & LMP_3SLOT)
675 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
677 if (hdev->features[0] & LMP_5SLOT)
678 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
680 if (hdev->features[1] & LMP_HV2) {
681 hdev->pkt_type |= (HCI_HV2);
682 hdev->esco_type |= (ESCO_HV2);
685 if (hdev->features[1] & LMP_HV3) {
686 hdev->pkt_type |= (HCI_HV3);
687 hdev->esco_type |= (ESCO_HV3);
690 if (lmp_esco_capable(hdev))
691 hdev->esco_type |= (ESCO_EV3);
693 if (hdev->features[4] & LMP_EV4)
694 hdev->esco_type |= (ESCO_EV4);
696 if (hdev->features[4] & LMP_EV5)
697 hdev->esco_type |= (ESCO_EV5);
699 if (hdev->features[5] & LMP_EDR_ESCO_2M)
700 hdev->esco_type |= (ESCO_2EV3);
702 if (hdev->features[5] & LMP_EDR_ESCO_3M)
703 hdev->esco_type |= (ESCO_3EV3);
705 if (hdev->features[5] & LMP_EDR_3S_ESCO)
706 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
708 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
709 hdev->features[0], hdev->features[1],
710 hdev->features[2], hdev->features[3],
711 hdev->features[4], hdev->features[5],
712 hdev->features[6], hdev->features[7]);
715 static void hci_set_le_support(struct hci_dev *hdev)
717 struct hci_cp_write_le_host_supported cp;
719 memset(&cp, 0, sizeof(cp));
721 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
722 cp.le = 1;
723 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
726 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
727 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
728 &cp);
731 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
732 struct sk_buff *skb)
734 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
736 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
738 if (rp->status)
739 goto done;
741 switch (rp->page) {
742 case 0:
743 memcpy(hdev->features, rp->features, 8);
744 break;
745 case 1:
746 memcpy(hdev->host_features, rp->features, 8);
747 break;
750 if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
751 hci_set_le_support(hdev);
753 done:
754 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
757 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
758 struct sk_buff *skb)
760 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
762 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
764 if (rp->status)
765 return;
767 hdev->flow_ctl_mode = rp->mode;
769 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
772 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
774 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
776 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
778 if (rp->status)
779 return;
781 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
782 hdev->sco_mtu = rp->sco_mtu;
783 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
784 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
786 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
787 hdev->sco_mtu = 64;
788 hdev->sco_pkts = 8;
791 hdev->acl_cnt = hdev->acl_pkts;
792 hdev->sco_cnt = hdev->sco_pkts;
794 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
795 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
798 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
800 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
802 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
804 if (!rp->status)
805 bacpy(&hdev->bdaddr, &rp->bdaddr);
807 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
810 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
811 struct sk_buff *skb)
813 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
815 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
817 if (rp->status)
818 return;
820 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
821 hdev->block_len = __le16_to_cpu(rp->block_len);
822 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
824 hdev->block_cnt = hdev->num_blocks;
826 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
827 hdev->block_cnt, hdev->block_len);
829 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
832 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
834 __u8 status = *((__u8 *) skb->data);
836 BT_DBG("%s status 0x%2.2x", hdev->name, status);
838 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
841 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
842 struct sk_buff *skb)
844 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
846 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
848 if (rp->status)
849 return;
851 hdev->amp_status = rp->amp_status;
852 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
853 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
854 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
855 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
856 hdev->amp_type = rp->amp_type;
857 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
858 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
859 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
860 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
862 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
865 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
866 struct sk_buff *skb)
868 __u8 status = *((__u8 *) skb->data);
870 BT_DBG("%s status 0x%2.2x", hdev->name, status);
872 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
875 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
877 __u8 status = *((__u8 *) skb->data);
879 BT_DBG("%s status 0x%2.2x", hdev->name, status);
881 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
884 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
885 struct sk_buff *skb)
887 __u8 status = *((__u8 *) skb->data);
889 BT_DBG("%s status 0x%2.2x", hdev->name, status);
891 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
894 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
895 struct sk_buff *skb)
897 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
899 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
901 if (!rp->status)
902 hdev->inq_tx_power = rp->tx_power;
904 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
907 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
909 __u8 status = *((__u8 *) skb->data);
911 BT_DBG("%s status 0x%2.2x", hdev->name, status);
913 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
916 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
918 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
919 struct hci_cp_pin_code_reply *cp;
920 struct hci_conn *conn;
922 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
924 hci_dev_lock(hdev);
926 if (test_bit(HCI_MGMT, &hdev->dev_flags))
927 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
929 if (rp->status)
930 goto unlock;
932 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
933 if (!cp)
934 goto unlock;
936 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
937 if (conn)
938 conn->pin_length = cp->pin_len;
940 unlock:
941 hci_dev_unlock(hdev);
944 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
946 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
948 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
950 hci_dev_lock(hdev);
952 if (test_bit(HCI_MGMT, &hdev->dev_flags))
953 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
954 rp->status);
956 hci_dev_unlock(hdev);
959 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
960 struct sk_buff *skb)
962 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
964 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
966 if (rp->status)
967 return;
969 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
970 hdev->le_pkts = rp->le_max_pkt;
972 hdev->le_cnt = hdev->le_pkts;
974 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
976 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
979 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
981 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
983 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
985 hci_dev_lock(hdev);
987 if (test_bit(HCI_MGMT, &hdev->dev_flags))
988 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
989 rp->status);
991 hci_dev_unlock(hdev);
994 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
995 struct sk_buff *skb)
997 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
999 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001 hci_dev_lock(hdev);
1003 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1004 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1005 ACL_LINK, 0, rp->status);
1007 hci_dev_unlock(hdev);
1010 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1012 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1014 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1016 hci_dev_lock(hdev);
1018 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1019 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1020 0, rp->status);
1022 hci_dev_unlock(hdev);
1025 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1026 struct sk_buff *skb)
1028 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1030 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1032 hci_dev_lock(hdev);
1034 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1035 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1036 ACL_LINK, 0, rp->status);
1038 hci_dev_unlock(hdev);
1041 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1042 struct sk_buff *skb)
1044 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1046 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1048 hci_dev_lock(hdev);
1049 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1050 rp->randomizer, rp->status);
1051 hci_dev_unlock(hdev);
1054 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1056 __u8 status = *((__u8 *) skb->data);
1058 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1060 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1062 if (status) {
1063 hci_dev_lock(hdev);
1064 mgmt_start_discovery_failed(hdev, status);
1065 hci_dev_unlock(hdev);
1066 return;
1070 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1071 struct sk_buff *skb)
1073 struct hci_cp_le_set_scan_enable *cp;
1074 __u8 status = *((__u8 *) skb->data);
1076 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1078 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1079 if (!cp)
1080 return;
1082 switch (cp->enable) {
1083 case LE_SCANNING_ENABLED:
1084 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1086 if (status) {
1087 hci_dev_lock(hdev);
1088 mgmt_start_discovery_failed(hdev, status);
1089 hci_dev_unlock(hdev);
1090 return;
1093 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1095 hci_dev_lock(hdev);
1096 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1097 hci_dev_unlock(hdev);
1098 break;
1100 case LE_SCANNING_DISABLED:
1101 if (status) {
1102 hci_dev_lock(hdev);
1103 mgmt_stop_discovery_failed(hdev, status);
1104 hci_dev_unlock(hdev);
1105 return;
1108 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1110 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1111 hdev->discovery.state == DISCOVERY_FINDING) {
1112 mgmt_interleaved_discovery(hdev);
1113 } else {
1114 hci_dev_lock(hdev);
1115 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1116 hci_dev_unlock(hdev);
1119 break;
1121 default:
1122 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1123 break;
1127 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1129 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1131 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1133 if (rp->status)
1134 return;
1136 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1139 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1141 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1145 if (rp->status)
1146 return;
1148 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1151 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1152 struct sk_buff *skb)
1154 struct hci_cp_write_le_host_supported *sent;
1155 __u8 status = *((__u8 *) skb->data);
1157 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1159 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1160 if (!sent)
1161 return;
1163 if (!status) {
1164 if (sent->le)
1165 hdev->host_features[0] |= LMP_HOST_LE;
1166 else
1167 hdev->host_features[0] &= ~LMP_HOST_LE;
1170 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1171 !test_bit(HCI_INIT, &hdev->flags))
1172 mgmt_le_enable_complete(hdev, sent->le, status);
1174 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1177 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1179 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1181 if (status) {
1182 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1183 hci_conn_check_pending(hdev);
1184 hci_dev_lock(hdev);
1185 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1186 mgmt_start_discovery_failed(hdev, status);
1187 hci_dev_unlock(hdev);
1188 return;
1191 set_bit(HCI_INQUIRY, &hdev->flags);
1193 hci_dev_lock(hdev);
1194 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1195 hci_dev_unlock(hdev);
1198 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1200 struct hci_cp_create_conn *cp;
1201 struct hci_conn *conn;
1203 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1205 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1206 if (!cp)
1207 return;
1209 hci_dev_lock(hdev);
1211 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1213 BT_DBG("%s bdaddr %s hcon %p", hdev->name, batostr(&cp->bdaddr), conn);
1215 if (status) {
1216 if (conn && conn->state == BT_CONNECT) {
1217 if (status != 0x0c || conn->attempt > 2) {
1218 conn->state = BT_CLOSED;
1219 hci_proto_connect_cfm(conn, status);
1220 hci_conn_del(conn);
1221 } else
1222 conn->state = BT_CONNECT2;
1224 } else {
1225 if (!conn) {
1226 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1227 if (conn) {
1228 conn->out = true;
1229 conn->link_mode |= HCI_LM_MASTER;
1230 } else
1231 BT_ERR("No memory for new connection");
1235 hci_dev_unlock(hdev);
1238 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1240 struct hci_cp_add_sco *cp;
1241 struct hci_conn *acl, *sco;
1242 __u16 handle;
1244 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1246 if (!status)
1247 return;
1249 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1250 if (!cp)
1251 return;
1253 handle = __le16_to_cpu(cp->handle);
1255 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1257 hci_dev_lock(hdev);
1259 acl = hci_conn_hash_lookup_handle(hdev, handle);
1260 if (acl) {
1261 sco = acl->link;
1262 if (sco) {
1263 sco->state = BT_CLOSED;
1265 hci_proto_connect_cfm(sco, status);
1266 hci_conn_del(sco);
1270 hci_dev_unlock(hdev);
1273 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1275 struct hci_cp_auth_requested *cp;
1276 struct hci_conn *conn;
1278 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1280 if (!status)
1281 return;
1283 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1284 if (!cp)
1285 return;
1287 hci_dev_lock(hdev);
1289 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1290 if (conn) {
1291 if (conn->state == BT_CONFIG) {
1292 hci_proto_connect_cfm(conn, status);
1293 hci_conn_put(conn);
1297 hci_dev_unlock(hdev);
1300 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1302 struct hci_cp_set_conn_encrypt *cp;
1303 struct hci_conn *conn;
1305 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1307 if (!status)
1308 return;
1310 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1311 if (!cp)
1312 return;
1314 hci_dev_lock(hdev);
1316 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1317 if (conn) {
1318 if (conn->state == BT_CONFIG) {
1319 hci_proto_connect_cfm(conn, status);
1320 hci_conn_put(conn);
1324 hci_dev_unlock(hdev);
1327 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1328 struct hci_conn *conn)
1330 if (conn->state != BT_CONFIG || !conn->out)
1331 return 0;
1333 if (conn->pending_sec_level == BT_SECURITY_SDP)
1334 return 0;
1336 /* Only request authentication for SSP connections or non-SSP
1337 * devices with sec_level HIGH or if MITM protection is requested */
1338 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1339 conn->pending_sec_level != BT_SECURITY_HIGH)
1340 return 0;
1342 return 1;
1345 static int hci_resolve_name(struct hci_dev *hdev,
1346 struct inquiry_entry *e)
1348 struct hci_cp_remote_name_req cp;
1350 memset(&cp, 0, sizeof(cp));
1352 bacpy(&cp.bdaddr, &e->data.bdaddr);
1353 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1354 cp.pscan_mode = e->data.pscan_mode;
1355 cp.clock_offset = e->data.clock_offset;
1357 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1360 static bool hci_resolve_next_name(struct hci_dev *hdev)
1362 struct discovery_state *discov = &hdev->discovery;
1363 struct inquiry_entry *e;
1365 if (list_empty(&discov->resolve))
1366 return false;
1368 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1369 if (!e)
1370 return false;
1372 if (hci_resolve_name(hdev, e) == 0) {
1373 e->name_state = NAME_PENDING;
1374 return true;
1377 return false;
1380 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1381 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1383 struct discovery_state *discov = &hdev->discovery;
1384 struct inquiry_entry *e;
1386 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1387 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1388 name_len, conn->dev_class);
1390 if (discov->state == DISCOVERY_STOPPED)
1391 return;
1393 if (discov->state == DISCOVERY_STOPPING)
1394 goto discov_complete;
1396 if (discov->state != DISCOVERY_RESOLVING)
1397 return;
1399 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1400 /* If the device was not found in a list of found devices names of which
1401 * are pending. there is no need to continue resolving a next name as it
1402 * will be done upon receiving another Remote Name Request Complete
1403 * Event */
1404 if (!e)
1405 return;
1407 list_del(&e->list);
1408 if (name) {
1409 e->name_state = NAME_KNOWN;
1410 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1411 e->data.rssi, name, name_len);
1412 } else {
1413 e->name_state = NAME_NOT_KNOWN;
1416 if (hci_resolve_next_name(hdev))
1417 return;
1419 discov_complete:
1420 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1423 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1425 struct hci_cp_remote_name_req *cp;
1426 struct hci_conn *conn;
1428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1430 /* If successful wait for the name req complete event before
1431 * checking for the need to do authentication */
1432 if (!status)
1433 return;
1435 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1436 if (!cp)
1437 return;
1439 hci_dev_lock(hdev);
1441 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1443 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1444 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1446 if (!conn)
1447 goto unlock;
1449 if (!hci_outgoing_auth_needed(hdev, conn))
1450 goto unlock;
1452 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1453 struct hci_cp_auth_requested cp;
1454 cp.handle = __cpu_to_le16(conn->handle);
1455 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1458 unlock:
1459 hci_dev_unlock(hdev);
1462 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1464 struct hci_cp_read_remote_features *cp;
1465 struct hci_conn *conn;
1467 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1469 if (!status)
1470 return;
1472 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1473 if (!cp)
1474 return;
1476 hci_dev_lock(hdev);
1478 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1479 if (conn) {
1480 if (conn->state == BT_CONFIG) {
1481 hci_proto_connect_cfm(conn, status);
1482 hci_conn_put(conn);
1486 hci_dev_unlock(hdev);
1489 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1491 struct hci_cp_read_remote_ext_features *cp;
1492 struct hci_conn *conn;
1494 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1496 if (!status)
1497 return;
1499 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1500 if (!cp)
1501 return;
1503 hci_dev_lock(hdev);
1505 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1506 if (conn) {
1507 if (conn->state == BT_CONFIG) {
1508 hci_proto_connect_cfm(conn, status);
1509 hci_conn_put(conn);
1513 hci_dev_unlock(hdev);
1516 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1518 struct hci_cp_setup_sync_conn *cp;
1519 struct hci_conn *acl, *sco;
1520 __u16 handle;
1522 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1524 if (!status)
1525 return;
1527 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1528 if (!cp)
1529 return;
1531 handle = __le16_to_cpu(cp->handle);
1533 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1535 hci_dev_lock(hdev);
1537 acl = hci_conn_hash_lookup_handle(hdev, handle);
1538 if (acl) {
1539 sco = acl->link;
1540 if (sco) {
1541 sco->state = BT_CLOSED;
1543 hci_proto_connect_cfm(sco, status);
1544 hci_conn_del(sco);
1548 hci_dev_unlock(hdev);
1551 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1553 struct hci_cp_sniff_mode *cp;
1554 struct hci_conn *conn;
1556 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1558 if (!status)
1559 return;
1561 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1562 if (!cp)
1563 return;
1565 hci_dev_lock(hdev);
1567 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1568 if (conn) {
1569 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1571 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1572 hci_sco_setup(conn, status);
1575 hci_dev_unlock(hdev);
1578 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1580 struct hci_cp_exit_sniff_mode *cp;
1581 struct hci_conn *conn;
1583 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1585 if (!status)
1586 return;
1588 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1589 if (!cp)
1590 return;
1592 hci_dev_lock(hdev);
1594 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1595 if (conn) {
1596 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1598 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1599 hci_sco_setup(conn, status);
1602 hci_dev_unlock(hdev);
1605 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1607 struct hci_cp_disconnect *cp;
1608 struct hci_conn *conn;
1610 if (!status)
1611 return;
1613 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1614 if (!cp)
1615 return;
1617 hci_dev_lock(hdev);
1619 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1620 if (conn)
1621 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1622 conn->dst_type, status);
1624 hci_dev_unlock(hdev);
1627 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1629 struct hci_conn *conn;
1631 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1633 if (status) {
1634 hci_dev_lock(hdev);
1636 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1637 if (!conn) {
1638 hci_dev_unlock(hdev);
1639 return;
1642 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&conn->dst),
1643 conn);
1645 conn->state = BT_CLOSED;
1646 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1647 conn->dst_type, status);
1648 hci_proto_connect_cfm(conn, status);
1649 hci_conn_del(conn);
1651 hci_dev_unlock(hdev);
1655 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1657 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1660 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1662 __u8 status = *((__u8 *) skb->data);
1663 struct discovery_state *discov = &hdev->discovery;
1664 struct inquiry_entry *e;
1666 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1668 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1670 hci_conn_check_pending(hdev);
1672 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1673 return;
1675 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1676 return;
1678 hci_dev_lock(hdev);
1680 if (discov->state != DISCOVERY_FINDING)
1681 goto unlock;
1683 if (list_empty(&discov->resolve)) {
1684 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1685 goto unlock;
1688 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1689 if (e && hci_resolve_name(hdev, e) == 0) {
1690 e->name_state = NAME_PENDING;
1691 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1692 } else {
1693 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1696 unlock:
1697 hci_dev_unlock(hdev);
1700 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1702 struct inquiry_data data;
1703 struct inquiry_info *info = (void *) (skb->data + 1);
1704 int num_rsp = *((__u8 *) skb->data);
1706 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1708 if (!num_rsp)
1709 return;
1711 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1712 return;
1714 hci_dev_lock(hdev);
1716 for (; num_rsp; num_rsp--, info++) {
1717 bool name_known, ssp;
1719 bacpy(&data.bdaddr, &info->bdaddr);
1720 data.pscan_rep_mode = info->pscan_rep_mode;
1721 data.pscan_period_mode = info->pscan_period_mode;
1722 data.pscan_mode = info->pscan_mode;
1723 memcpy(data.dev_class, info->dev_class, 3);
1724 data.clock_offset = info->clock_offset;
1725 data.rssi = 0x00;
1726 data.ssp_mode = 0x00;
1728 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1729 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1730 info->dev_class, 0, !name_known, ssp, NULL,
1734 hci_dev_unlock(hdev);
1737 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1739 struct hci_ev_conn_complete *ev = (void *) skb->data;
1740 struct hci_conn *conn;
1742 BT_DBG("%s", hdev->name);
1744 hci_dev_lock(hdev);
1746 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1747 if (!conn) {
1748 if (ev->link_type != SCO_LINK)
1749 goto unlock;
1751 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1752 if (!conn)
1753 goto unlock;
1755 conn->type = SCO_LINK;
1758 if (!ev->status) {
1759 conn->handle = __le16_to_cpu(ev->handle);
1761 if (conn->type == ACL_LINK) {
1762 conn->state = BT_CONFIG;
1763 hci_conn_hold(conn);
1765 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1766 !hci_find_link_key(hdev, &ev->bdaddr))
1767 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1768 else
1769 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1770 } else
1771 conn->state = BT_CONNECTED;
1773 hci_conn_hold_device(conn);
1774 hci_conn_add_sysfs(conn);
1776 if (test_bit(HCI_AUTH, &hdev->flags))
1777 conn->link_mode |= HCI_LM_AUTH;
1779 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1780 conn->link_mode |= HCI_LM_ENCRYPT;
1782 /* Get remote features */
1783 if (conn->type == ACL_LINK) {
1784 struct hci_cp_read_remote_features cp;
1785 cp.handle = ev->handle;
1786 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1787 sizeof(cp), &cp);
1790 /* Set packet type for incoming connection */
1791 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1792 struct hci_cp_change_conn_ptype cp;
1793 cp.handle = ev->handle;
1794 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1795 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1796 &cp);
1798 } else {
1799 conn->state = BT_CLOSED;
1800 if (conn->type == ACL_LINK)
1801 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1802 conn->dst_type, ev->status);
1805 if (conn->type == ACL_LINK)
1806 hci_sco_setup(conn, ev->status);
1808 if (ev->status) {
1809 hci_proto_connect_cfm(conn, ev->status);
1810 hci_conn_del(conn);
1811 } else if (ev->link_type != ACL_LINK)
1812 hci_proto_connect_cfm(conn, ev->status);
1814 unlock:
1815 hci_dev_unlock(hdev);
1817 hci_conn_check_pending(hdev);
1820 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1822 struct hci_ev_conn_request *ev = (void *) skb->data;
1823 int mask = hdev->link_mode;
1825 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
1826 ev->link_type);
1828 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1830 if ((mask & HCI_LM_ACCEPT) &&
1831 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1832 /* Connection accepted */
1833 struct inquiry_entry *ie;
1834 struct hci_conn *conn;
1836 hci_dev_lock(hdev);
1838 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1839 if (ie)
1840 memcpy(ie->data.dev_class, ev->dev_class, 3);
1842 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1843 &ev->bdaddr);
1844 if (!conn) {
1845 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1846 if (!conn) {
1847 BT_ERR("No memory for new connection");
1848 hci_dev_unlock(hdev);
1849 return;
1853 memcpy(conn->dev_class, ev->dev_class, 3);
1854 conn->state = BT_CONNECT;
1856 hci_dev_unlock(hdev);
1858 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1859 struct hci_cp_accept_conn_req cp;
1861 bacpy(&cp.bdaddr, &ev->bdaddr);
1863 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1864 cp.role = 0x00; /* Become master */
1865 else
1866 cp.role = 0x01; /* Remain slave */
1868 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1869 &cp);
1870 } else {
1871 struct hci_cp_accept_sync_conn_req cp;
1873 bacpy(&cp.bdaddr, &ev->bdaddr);
1874 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1876 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1877 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1878 cp.max_latency = __constant_cpu_to_le16(0xffff);
1879 cp.content_format = cpu_to_le16(hdev->voice_setting);
1880 cp.retrans_effort = 0xff;
1882 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1883 sizeof(cp), &cp);
1885 } else {
1886 /* Connection rejected */
1887 struct hci_cp_reject_conn_req cp;
1889 bacpy(&cp.bdaddr, &ev->bdaddr);
1890 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1891 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1895 static u8 hci_to_mgmt_reason(u8 err)
1897 switch (err) {
1898 case HCI_ERROR_CONNECTION_TIMEOUT:
1899 return MGMT_DEV_DISCONN_TIMEOUT;
1900 case HCI_ERROR_REMOTE_USER_TERM:
1901 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1902 case HCI_ERROR_REMOTE_POWER_OFF:
1903 return MGMT_DEV_DISCONN_REMOTE;
1904 case HCI_ERROR_LOCAL_HOST_TERM:
1905 return MGMT_DEV_DISCONN_LOCAL_HOST;
1906 default:
1907 return MGMT_DEV_DISCONN_UNKNOWN;
1911 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1913 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1914 struct hci_conn *conn;
1916 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1918 hci_dev_lock(hdev);
1920 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1921 if (!conn)
1922 goto unlock;
1924 if (ev->status == 0)
1925 conn->state = BT_CLOSED;
1927 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1928 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1929 if (ev->status) {
1930 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1931 conn->dst_type, ev->status);
1932 } else {
1933 u8 reason = hci_to_mgmt_reason(ev->reason);
1935 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1936 conn->dst_type, reason);
1940 if (ev->status == 0) {
1941 if (conn->type == ACL_LINK && conn->flush_key)
1942 hci_remove_link_key(hdev, &conn->dst);
1943 hci_proto_disconn_cfm(conn, ev->reason);
1944 hci_conn_del(conn);
1947 unlock:
1948 hci_dev_unlock(hdev);
1951 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1953 struct hci_ev_auth_complete *ev = (void *) skb->data;
1954 struct hci_conn *conn;
1956 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1958 hci_dev_lock(hdev);
1960 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1961 if (!conn)
1962 goto unlock;
1964 if (!ev->status) {
1965 if (!hci_conn_ssp_enabled(conn) &&
1966 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1967 BT_INFO("re-auth of legacy device is not possible.");
1968 } else {
1969 conn->link_mode |= HCI_LM_AUTH;
1970 conn->sec_level = conn->pending_sec_level;
1972 } else {
1973 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1974 ev->status);
1977 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1978 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1980 if (conn->state == BT_CONFIG) {
1981 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1982 struct hci_cp_set_conn_encrypt cp;
1983 cp.handle = ev->handle;
1984 cp.encrypt = 0x01;
1985 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1986 &cp);
1987 } else {
1988 conn->state = BT_CONNECTED;
1989 hci_proto_connect_cfm(conn, ev->status);
1990 hci_conn_put(conn);
1992 } else {
1993 hci_auth_cfm(conn, ev->status);
1995 hci_conn_hold(conn);
1996 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1997 hci_conn_put(conn);
2000 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2001 if (!ev->status) {
2002 struct hci_cp_set_conn_encrypt cp;
2003 cp.handle = ev->handle;
2004 cp.encrypt = 0x01;
2005 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2006 &cp);
2007 } else {
2008 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2009 hci_encrypt_cfm(conn, ev->status, 0x00);
2013 unlock:
2014 hci_dev_unlock(hdev);
2017 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2019 struct hci_ev_remote_name *ev = (void *) skb->data;
2020 struct hci_conn *conn;
2022 BT_DBG("%s", hdev->name);
2024 hci_conn_check_pending(hdev);
2026 hci_dev_lock(hdev);
2028 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2030 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2031 goto check_auth;
2033 if (ev->status == 0)
2034 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2035 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2036 else
2037 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2039 check_auth:
2040 if (!conn)
2041 goto unlock;
2043 if (!hci_outgoing_auth_needed(hdev, conn))
2044 goto unlock;
2046 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2047 struct hci_cp_auth_requested cp;
2048 cp.handle = __cpu_to_le16(conn->handle);
2049 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2052 unlock:
2053 hci_dev_unlock(hdev);
2056 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2058 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2059 struct hci_conn *conn;
2061 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2063 hci_dev_lock(hdev);
2065 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2066 if (conn) {
2067 if (!ev->status) {
2068 if (ev->encrypt) {
2069 /* Encryption implies authentication */
2070 conn->link_mode |= HCI_LM_AUTH;
2071 conn->link_mode |= HCI_LM_ENCRYPT;
2072 conn->sec_level = conn->pending_sec_level;
2073 } else
2074 conn->link_mode &= ~HCI_LM_ENCRYPT;
2077 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2079 if (ev->status && conn->state == BT_CONNECTED) {
2080 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2081 hci_conn_put(conn);
2082 goto unlock;
2085 if (conn->state == BT_CONFIG) {
2086 if (!ev->status)
2087 conn->state = BT_CONNECTED;
2089 hci_proto_connect_cfm(conn, ev->status);
2090 hci_conn_put(conn);
2091 } else
2092 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2095 unlock:
2096 hci_dev_unlock(hdev);
2099 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2100 struct sk_buff *skb)
2102 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2103 struct hci_conn *conn;
2105 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2107 hci_dev_lock(hdev);
2109 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2110 if (conn) {
2111 if (!ev->status)
2112 conn->link_mode |= HCI_LM_SECURE;
2114 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2116 hci_key_change_cfm(conn, ev->status);
2119 hci_dev_unlock(hdev);
2122 static void hci_remote_features_evt(struct hci_dev *hdev,
2123 struct sk_buff *skb)
2125 struct hci_ev_remote_features *ev = (void *) skb->data;
2126 struct hci_conn *conn;
2128 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2130 hci_dev_lock(hdev);
2132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2133 if (!conn)
2134 goto unlock;
2136 if (!ev->status)
2137 memcpy(conn->features, ev->features, 8);
2139 if (conn->state != BT_CONFIG)
2140 goto unlock;
2142 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2143 struct hci_cp_read_remote_ext_features cp;
2144 cp.handle = ev->handle;
2145 cp.page = 0x01;
2146 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2147 sizeof(cp), &cp);
2148 goto unlock;
2151 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2152 struct hci_cp_remote_name_req cp;
2153 memset(&cp, 0, sizeof(cp));
2154 bacpy(&cp.bdaddr, &conn->dst);
2155 cp.pscan_rep_mode = 0x02;
2156 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2157 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2158 mgmt_device_connected(hdev, &conn->dst, conn->type,
2159 conn->dst_type, 0, NULL, 0,
2160 conn->dev_class);
2162 if (!hci_outgoing_auth_needed(hdev, conn)) {
2163 conn->state = BT_CONNECTED;
2164 hci_proto_connect_cfm(conn, ev->status);
2165 hci_conn_put(conn);
2168 unlock:
2169 hci_dev_unlock(hdev);
2172 static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2174 BT_DBG("%s", hdev->name);
2177 static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2178 struct sk_buff *skb)
2180 BT_DBG("%s", hdev->name);
2183 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2185 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2186 __u16 opcode;
2188 skb_pull(skb, sizeof(*ev));
2190 opcode = __le16_to_cpu(ev->opcode);
2192 switch (opcode) {
2193 case HCI_OP_INQUIRY_CANCEL:
2194 hci_cc_inquiry_cancel(hdev, skb);
2195 break;
2197 case HCI_OP_PERIODIC_INQ:
2198 hci_cc_periodic_inq(hdev, skb);
2199 break;
2201 case HCI_OP_EXIT_PERIODIC_INQ:
2202 hci_cc_exit_periodic_inq(hdev, skb);
2203 break;
2205 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2206 hci_cc_remote_name_req_cancel(hdev, skb);
2207 break;
2209 case HCI_OP_ROLE_DISCOVERY:
2210 hci_cc_role_discovery(hdev, skb);
2211 break;
2213 case HCI_OP_READ_LINK_POLICY:
2214 hci_cc_read_link_policy(hdev, skb);
2215 break;
2217 case HCI_OP_WRITE_LINK_POLICY:
2218 hci_cc_write_link_policy(hdev, skb);
2219 break;
2221 case HCI_OP_READ_DEF_LINK_POLICY:
2222 hci_cc_read_def_link_policy(hdev, skb);
2223 break;
2225 case HCI_OP_WRITE_DEF_LINK_POLICY:
2226 hci_cc_write_def_link_policy(hdev, skb);
2227 break;
2229 case HCI_OP_RESET:
2230 hci_cc_reset(hdev, skb);
2231 break;
2233 case HCI_OP_WRITE_LOCAL_NAME:
2234 hci_cc_write_local_name(hdev, skb);
2235 break;
2237 case HCI_OP_READ_LOCAL_NAME:
2238 hci_cc_read_local_name(hdev, skb);
2239 break;
2241 case HCI_OP_WRITE_AUTH_ENABLE:
2242 hci_cc_write_auth_enable(hdev, skb);
2243 break;
2245 case HCI_OP_WRITE_ENCRYPT_MODE:
2246 hci_cc_write_encrypt_mode(hdev, skb);
2247 break;
2249 case HCI_OP_WRITE_SCAN_ENABLE:
2250 hci_cc_write_scan_enable(hdev, skb);
2251 break;
2253 case HCI_OP_READ_CLASS_OF_DEV:
2254 hci_cc_read_class_of_dev(hdev, skb);
2255 break;
2257 case HCI_OP_WRITE_CLASS_OF_DEV:
2258 hci_cc_write_class_of_dev(hdev, skb);
2259 break;
2261 case HCI_OP_READ_VOICE_SETTING:
2262 hci_cc_read_voice_setting(hdev, skb);
2263 break;
2265 case HCI_OP_WRITE_VOICE_SETTING:
2266 hci_cc_write_voice_setting(hdev, skb);
2267 break;
2269 case HCI_OP_HOST_BUFFER_SIZE:
2270 hci_cc_host_buffer_size(hdev, skb);
2271 break;
2273 case HCI_OP_WRITE_SSP_MODE:
2274 hci_cc_write_ssp_mode(hdev, skb);
2275 break;
2277 case HCI_OP_READ_LOCAL_VERSION:
2278 hci_cc_read_local_version(hdev, skb);
2279 break;
2281 case HCI_OP_READ_LOCAL_COMMANDS:
2282 hci_cc_read_local_commands(hdev, skb);
2283 break;
2285 case HCI_OP_READ_LOCAL_FEATURES:
2286 hci_cc_read_local_features(hdev, skb);
2287 break;
2289 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2290 hci_cc_read_local_ext_features(hdev, skb);
2291 break;
2293 case HCI_OP_READ_BUFFER_SIZE:
2294 hci_cc_read_buffer_size(hdev, skb);
2295 break;
2297 case HCI_OP_READ_BD_ADDR:
2298 hci_cc_read_bd_addr(hdev, skb);
2299 break;
2301 case HCI_OP_READ_DATA_BLOCK_SIZE:
2302 hci_cc_read_data_block_size(hdev, skb);
2303 break;
2305 case HCI_OP_WRITE_CA_TIMEOUT:
2306 hci_cc_write_ca_timeout(hdev, skb);
2307 break;
2309 case HCI_OP_READ_FLOW_CONTROL_MODE:
2310 hci_cc_read_flow_control_mode(hdev, skb);
2311 break;
2313 case HCI_OP_READ_LOCAL_AMP_INFO:
2314 hci_cc_read_local_amp_info(hdev, skb);
2315 break;
2317 case HCI_OP_DELETE_STORED_LINK_KEY:
2318 hci_cc_delete_stored_link_key(hdev, skb);
2319 break;
2321 case HCI_OP_SET_EVENT_MASK:
2322 hci_cc_set_event_mask(hdev, skb);
2323 break;
2325 case HCI_OP_WRITE_INQUIRY_MODE:
2326 hci_cc_write_inquiry_mode(hdev, skb);
2327 break;
2329 case HCI_OP_READ_INQ_RSP_TX_POWER:
2330 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2331 break;
2333 case HCI_OP_SET_EVENT_FLT:
2334 hci_cc_set_event_flt(hdev, skb);
2335 break;
2337 case HCI_OP_PIN_CODE_REPLY:
2338 hci_cc_pin_code_reply(hdev, skb);
2339 break;
2341 case HCI_OP_PIN_CODE_NEG_REPLY:
2342 hci_cc_pin_code_neg_reply(hdev, skb);
2343 break;
2345 case HCI_OP_READ_LOCAL_OOB_DATA:
2346 hci_cc_read_local_oob_data_reply(hdev, skb);
2347 break;
2349 case HCI_OP_LE_READ_BUFFER_SIZE:
2350 hci_cc_le_read_buffer_size(hdev, skb);
2351 break;
2353 case HCI_OP_USER_CONFIRM_REPLY:
2354 hci_cc_user_confirm_reply(hdev, skb);
2355 break;
2357 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2358 hci_cc_user_confirm_neg_reply(hdev, skb);
2359 break;
2361 case HCI_OP_USER_PASSKEY_REPLY:
2362 hci_cc_user_passkey_reply(hdev, skb);
2363 break;
2365 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2366 hci_cc_user_passkey_neg_reply(hdev, skb);
2367 break;
2369 case HCI_OP_LE_SET_SCAN_PARAM:
2370 hci_cc_le_set_scan_param(hdev, skb);
2371 break;
2373 case HCI_OP_LE_SET_SCAN_ENABLE:
2374 hci_cc_le_set_scan_enable(hdev, skb);
2375 break;
2377 case HCI_OP_LE_LTK_REPLY:
2378 hci_cc_le_ltk_reply(hdev, skb);
2379 break;
2381 case HCI_OP_LE_LTK_NEG_REPLY:
2382 hci_cc_le_ltk_neg_reply(hdev, skb);
2383 break;
2385 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2386 hci_cc_write_le_host_supported(hdev, skb);
2387 break;
2389 default:
2390 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2391 break;
2394 if (ev->opcode != HCI_OP_NOP)
2395 del_timer(&hdev->cmd_timer);
2397 if (ev->ncmd) {
2398 atomic_set(&hdev->cmd_cnt, 1);
2399 if (!skb_queue_empty(&hdev->cmd_q))
2400 queue_work(hdev->workqueue, &hdev->cmd_work);
2404 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2406 struct hci_ev_cmd_status *ev = (void *) skb->data;
2407 __u16 opcode;
2409 skb_pull(skb, sizeof(*ev));
2411 opcode = __le16_to_cpu(ev->opcode);
2413 switch (opcode) {
2414 case HCI_OP_INQUIRY:
2415 hci_cs_inquiry(hdev, ev->status);
2416 break;
2418 case HCI_OP_CREATE_CONN:
2419 hci_cs_create_conn(hdev, ev->status);
2420 break;
2422 case HCI_OP_ADD_SCO:
2423 hci_cs_add_sco(hdev, ev->status);
2424 break;
2426 case HCI_OP_AUTH_REQUESTED:
2427 hci_cs_auth_requested(hdev, ev->status);
2428 break;
2430 case HCI_OP_SET_CONN_ENCRYPT:
2431 hci_cs_set_conn_encrypt(hdev, ev->status);
2432 break;
2434 case HCI_OP_REMOTE_NAME_REQ:
2435 hci_cs_remote_name_req(hdev, ev->status);
2436 break;
2438 case HCI_OP_READ_REMOTE_FEATURES:
2439 hci_cs_read_remote_features(hdev, ev->status);
2440 break;
2442 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2443 hci_cs_read_remote_ext_features(hdev, ev->status);
2444 break;
2446 case HCI_OP_SETUP_SYNC_CONN:
2447 hci_cs_setup_sync_conn(hdev, ev->status);
2448 break;
2450 case HCI_OP_SNIFF_MODE:
2451 hci_cs_sniff_mode(hdev, ev->status);
2452 break;
2454 case HCI_OP_EXIT_SNIFF_MODE:
2455 hci_cs_exit_sniff_mode(hdev, ev->status);
2456 break;
2458 case HCI_OP_DISCONNECT:
2459 hci_cs_disconnect(hdev, ev->status);
2460 break;
2462 case HCI_OP_LE_CREATE_CONN:
2463 hci_cs_le_create_conn(hdev, ev->status);
2464 break;
2466 case HCI_OP_LE_START_ENC:
2467 hci_cs_le_start_enc(hdev, ev->status);
2468 break;
2470 default:
2471 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2472 break;
2475 if (ev->opcode != HCI_OP_NOP)
2476 del_timer(&hdev->cmd_timer);
2478 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2479 atomic_set(&hdev->cmd_cnt, 1);
2480 if (!skb_queue_empty(&hdev->cmd_q))
2481 queue_work(hdev->workqueue, &hdev->cmd_work);
2485 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2487 struct hci_ev_role_change *ev = (void *) skb->data;
2488 struct hci_conn *conn;
2490 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2492 hci_dev_lock(hdev);
2494 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2495 if (conn) {
2496 if (!ev->status) {
2497 if (ev->role)
2498 conn->link_mode &= ~HCI_LM_MASTER;
2499 else
2500 conn->link_mode |= HCI_LM_MASTER;
2503 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2505 hci_role_switch_cfm(conn, ev->status, ev->role);
2508 hci_dev_unlock(hdev);
2511 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2513 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2514 int i;
2516 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2517 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2518 return;
2521 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2522 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2523 BT_DBG("%s bad parameters", hdev->name);
2524 return;
2527 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2529 for (i = 0; i < ev->num_hndl; i++) {
2530 struct hci_comp_pkts_info *info = &ev->handles[i];
2531 struct hci_conn *conn;
2532 __u16 handle, count;
2534 handle = __le16_to_cpu(info->handle);
2535 count = __le16_to_cpu(info->count);
2537 conn = hci_conn_hash_lookup_handle(hdev, handle);
2538 if (!conn)
2539 continue;
2541 conn->sent -= count;
2543 switch (conn->type) {
2544 case ACL_LINK:
2545 hdev->acl_cnt += count;
2546 if (hdev->acl_cnt > hdev->acl_pkts)
2547 hdev->acl_cnt = hdev->acl_pkts;
2548 break;
2550 case LE_LINK:
2551 if (hdev->le_pkts) {
2552 hdev->le_cnt += count;
2553 if (hdev->le_cnt > hdev->le_pkts)
2554 hdev->le_cnt = hdev->le_pkts;
2555 } else {
2556 hdev->acl_cnt += count;
2557 if (hdev->acl_cnt > hdev->acl_pkts)
2558 hdev->acl_cnt = hdev->acl_pkts;
2560 break;
2562 case SCO_LINK:
2563 hdev->sco_cnt += count;
2564 if (hdev->sco_cnt > hdev->sco_pkts)
2565 hdev->sco_cnt = hdev->sco_pkts;
2566 break;
2568 default:
2569 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2570 break;
2574 queue_work(hdev->workqueue, &hdev->tx_work);
2577 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2579 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2580 int i;
2582 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2583 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2584 return;
2587 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2588 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2589 BT_DBG("%s bad parameters", hdev->name);
2590 return;
2593 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2594 ev->num_hndl);
2596 for (i = 0; i < ev->num_hndl; i++) {
2597 struct hci_comp_blocks_info *info = &ev->handles[i];
2598 struct hci_conn *conn;
2599 __u16 handle, block_count;
2601 handle = __le16_to_cpu(info->handle);
2602 block_count = __le16_to_cpu(info->blocks);
2604 conn = hci_conn_hash_lookup_handle(hdev, handle);
2605 if (!conn)
2606 continue;
2608 conn->sent -= block_count;
2610 switch (conn->type) {
2611 case ACL_LINK:
2612 hdev->block_cnt += block_count;
2613 if (hdev->block_cnt > hdev->num_blocks)
2614 hdev->block_cnt = hdev->num_blocks;
2615 break;
2617 default:
2618 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2619 break;
2623 queue_work(hdev->workqueue, &hdev->tx_work);
2626 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2628 struct hci_ev_mode_change *ev = (void *) skb->data;
2629 struct hci_conn *conn;
2631 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2633 hci_dev_lock(hdev);
2635 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2636 if (conn) {
2637 conn->mode = ev->mode;
2638 conn->interval = __le16_to_cpu(ev->interval);
2640 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2641 &conn->flags)) {
2642 if (conn->mode == HCI_CM_ACTIVE)
2643 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2644 else
2645 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2648 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2649 hci_sco_setup(conn, ev->status);
2652 hci_dev_unlock(hdev);
2655 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2657 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2658 struct hci_conn *conn;
2660 BT_DBG("%s", hdev->name);
2662 hci_dev_lock(hdev);
2664 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2665 if (!conn)
2666 goto unlock;
2668 if (conn->state == BT_CONNECTED) {
2669 hci_conn_hold(conn);
2670 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2671 hci_conn_put(conn);
2674 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2675 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2676 sizeof(ev->bdaddr), &ev->bdaddr);
2677 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2678 u8 secure;
2680 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2681 secure = 1;
2682 else
2683 secure = 0;
2685 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2688 unlock:
2689 hci_dev_unlock(hdev);
2692 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2694 struct hci_ev_link_key_req *ev = (void *) skb->data;
2695 struct hci_cp_link_key_reply cp;
2696 struct hci_conn *conn;
2697 struct link_key *key;
2699 BT_DBG("%s", hdev->name);
2701 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2702 return;
2704 hci_dev_lock(hdev);
2706 key = hci_find_link_key(hdev, &ev->bdaddr);
2707 if (!key) {
2708 BT_DBG("%s link key not found for %s", hdev->name,
2709 batostr(&ev->bdaddr));
2710 goto not_found;
2713 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2714 batostr(&ev->bdaddr));
2716 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2717 key->type == HCI_LK_DEBUG_COMBINATION) {
2718 BT_DBG("%s ignoring debug key", hdev->name);
2719 goto not_found;
2722 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2723 if (conn) {
2724 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2725 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2726 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2727 goto not_found;
2730 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2731 conn->pending_sec_level == BT_SECURITY_HIGH) {
2732 BT_DBG("%s ignoring key unauthenticated for high security",
2733 hdev->name);
2734 goto not_found;
2737 conn->key_type = key->type;
2738 conn->pin_length = key->pin_len;
2741 bacpy(&cp.bdaddr, &ev->bdaddr);
2742 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2744 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2746 hci_dev_unlock(hdev);
2748 return;
2750 not_found:
2751 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2752 hci_dev_unlock(hdev);
2755 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2757 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2758 struct hci_conn *conn;
2759 u8 pin_len = 0;
2761 BT_DBG("%s", hdev->name);
2763 hci_dev_lock(hdev);
2765 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2766 if (conn) {
2767 hci_conn_hold(conn);
2768 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2769 pin_len = conn->pin_length;
2771 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2772 conn->key_type = ev->key_type;
2774 hci_conn_put(conn);
2777 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2778 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2779 ev->key_type, pin_len);
2781 hci_dev_unlock(hdev);
2784 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2786 struct hci_ev_clock_offset *ev = (void *) skb->data;
2787 struct hci_conn *conn;
2789 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2791 hci_dev_lock(hdev);
2793 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2794 if (conn && !ev->status) {
2795 struct inquiry_entry *ie;
2797 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2798 if (ie) {
2799 ie->data.clock_offset = ev->clock_offset;
2800 ie->timestamp = jiffies;
2804 hci_dev_unlock(hdev);
2807 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2809 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2810 struct hci_conn *conn;
2812 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2814 hci_dev_lock(hdev);
2816 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2817 if (conn && !ev->status)
2818 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2820 hci_dev_unlock(hdev);
2823 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2825 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2826 struct inquiry_entry *ie;
2828 BT_DBG("%s", hdev->name);
2830 hci_dev_lock(hdev);
2832 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2833 if (ie) {
2834 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2835 ie->timestamp = jiffies;
2838 hci_dev_unlock(hdev);
2841 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2842 struct sk_buff *skb)
2844 struct inquiry_data data;
2845 int num_rsp = *((__u8 *) skb->data);
2846 bool name_known, ssp;
2848 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2850 if (!num_rsp)
2851 return;
2853 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2854 return;
2856 hci_dev_lock(hdev);
2858 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2859 struct inquiry_info_with_rssi_and_pscan_mode *info;
2860 info = (void *) (skb->data + 1);
2862 for (; num_rsp; num_rsp--, info++) {
2863 bacpy(&data.bdaddr, &info->bdaddr);
2864 data.pscan_rep_mode = info->pscan_rep_mode;
2865 data.pscan_period_mode = info->pscan_period_mode;
2866 data.pscan_mode = info->pscan_mode;
2867 memcpy(data.dev_class, info->dev_class, 3);
2868 data.clock_offset = info->clock_offset;
2869 data.rssi = info->rssi;
2870 data.ssp_mode = 0x00;
2872 name_known = hci_inquiry_cache_update(hdev, &data,
2873 false, &ssp);
2874 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2875 info->dev_class, info->rssi,
2876 !name_known, ssp, NULL, 0);
2878 } else {
2879 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2881 for (; num_rsp; num_rsp--, info++) {
2882 bacpy(&data.bdaddr, &info->bdaddr);
2883 data.pscan_rep_mode = info->pscan_rep_mode;
2884 data.pscan_period_mode = info->pscan_period_mode;
2885 data.pscan_mode = 0x00;
2886 memcpy(data.dev_class, info->dev_class, 3);
2887 data.clock_offset = info->clock_offset;
2888 data.rssi = info->rssi;
2889 data.ssp_mode = 0x00;
2890 name_known = hci_inquiry_cache_update(hdev, &data,
2891 false, &ssp);
2892 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2893 info->dev_class, info->rssi,
2894 !name_known, ssp, NULL, 0);
2898 hci_dev_unlock(hdev);
2901 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2902 struct sk_buff *skb)
2904 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2905 struct hci_conn *conn;
2907 BT_DBG("%s", hdev->name);
2909 hci_dev_lock(hdev);
2911 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2912 if (!conn)
2913 goto unlock;
2915 if (!ev->status && ev->page == 0x01) {
2916 struct inquiry_entry *ie;
2918 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2919 if (ie)
2920 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2922 if (ev->features[0] & LMP_HOST_SSP)
2923 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2926 if (conn->state != BT_CONFIG)
2927 goto unlock;
2929 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2930 struct hci_cp_remote_name_req cp;
2931 memset(&cp, 0, sizeof(cp));
2932 bacpy(&cp.bdaddr, &conn->dst);
2933 cp.pscan_rep_mode = 0x02;
2934 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2935 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2936 mgmt_device_connected(hdev, &conn->dst, conn->type,
2937 conn->dst_type, 0, NULL, 0,
2938 conn->dev_class);
2940 if (!hci_outgoing_auth_needed(hdev, conn)) {
2941 conn->state = BT_CONNECTED;
2942 hci_proto_connect_cfm(conn, ev->status);
2943 hci_conn_put(conn);
2946 unlock:
2947 hci_dev_unlock(hdev);
2950 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2951 struct sk_buff *skb)
2953 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2954 struct hci_conn *conn;
2956 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2958 hci_dev_lock(hdev);
2960 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2961 if (!conn) {
2962 if (ev->link_type == ESCO_LINK)
2963 goto unlock;
2965 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2966 if (!conn)
2967 goto unlock;
2969 conn->type = SCO_LINK;
2972 switch (ev->status) {
2973 case 0x00:
2974 conn->handle = __le16_to_cpu(ev->handle);
2975 conn->state = BT_CONNECTED;
2977 hci_conn_hold_device(conn);
2978 hci_conn_add_sysfs(conn);
2979 break;
2981 case 0x11: /* Unsupported Feature or Parameter Value */
2982 case 0x1c: /* SCO interval rejected */
2983 case 0x1a: /* Unsupported Remote Feature */
2984 case 0x1f: /* Unspecified error */
2985 if (conn->out && conn->attempt < 2) {
2986 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2987 (hdev->esco_type & EDR_ESCO_MASK);
2988 hci_setup_sync(conn, conn->link->handle);
2989 goto unlock;
2991 /* fall through */
2993 default:
2994 conn->state = BT_CLOSED;
2995 break;
2998 hci_proto_connect_cfm(conn, ev->status);
2999 if (ev->status)
3000 hci_conn_del(conn);
3002 unlock:
3003 hci_dev_unlock(hdev);
3006 static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
3008 BT_DBG("%s", hdev->name);
3011 static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
3013 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
3015 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3018 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3019 struct sk_buff *skb)
3021 struct inquiry_data data;
3022 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3023 int num_rsp = *((__u8 *) skb->data);
3024 size_t eir_len;
3026 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3028 if (!num_rsp)
3029 return;
3031 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3032 return;
3034 hci_dev_lock(hdev);
3036 for (; num_rsp; num_rsp--, info++) {
3037 bool name_known, ssp;
3039 bacpy(&data.bdaddr, &info->bdaddr);
3040 data.pscan_rep_mode = info->pscan_rep_mode;
3041 data.pscan_period_mode = info->pscan_period_mode;
3042 data.pscan_mode = 0x00;
3043 memcpy(data.dev_class, info->dev_class, 3);
3044 data.clock_offset = info->clock_offset;
3045 data.rssi = info->rssi;
3046 data.ssp_mode = 0x01;
3048 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3049 name_known = eir_has_data_type(info->data,
3050 sizeof(info->data),
3051 EIR_NAME_COMPLETE);
3052 else
3053 name_known = true;
3055 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3056 &ssp);
3057 eir_len = eir_get_length(info->data, sizeof(info->data));
3058 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3059 info->dev_class, info->rssi, !name_known,
3060 ssp, info->data, eir_len);
3063 hci_dev_unlock(hdev);
3066 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3067 struct sk_buff *skb)
3069 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3070 struct hci_conn *conn;
3072 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3073 __le16_to_cpu(ev->handle));
3075 hci_dev_lock(hdev);
3077 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3078 if (!conn)
3079 goto unlock;
3081 if (!ev->status)
3082 conn->sec_level = conn->pending_sec_level;
3084 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3086 if (ev->status && conn->state == BT_CONNECTED) {
3087 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3088 hci_conn_put(conn);
3089 goto unlock;
3092 if (conn->state == BT_CONFIG) {
3093 if (!ev->status)
3094 conn->state = BT_CONNECTED;
3096 hci_proto_connect_cfm(conn, ev->status);
3097 hci_conn_put(conn);
3098 } else {
3099 hci_auth_cfm(conn, ev->status);
3101 hci_conn_hold(conn);
3102 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3103 hci_conn_put(conn);
3106 unlock:
3107 hci_dev_unlock(hdev);
3110 static u8 hci_get_auth_req(struct hci_conn *conn)
3112 /* If remote requests dedicated bonding follow that lead */
3113 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3114 /* If both remote and local IO capabilities allow MITM
3115 * protection then require it, otherwise don't */
3116 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3117 return 0x02;
3118 else
3119 return 0x03;
3122 /* If remote requests no-bonding follow that lead */
3123 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3124 return conn->remote_auth | (conn->auth_type & 0x01);
3126 return conn->auth_type;
3129 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3131 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3132 struct hci_conn *conn;
3134 BT_DBG("%s", hdev->name);
3136 hci_dev_lock(hdev);
3138 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3139 if (!conn)
3140 goto unlock;
3142 hci_conn_hold(conn);
3144 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3145 goto unlock;
3147 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3148 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3149 struct hci_cp_io_capability_reply cp;
3151 bacpy(&cp.bdaddr, &ev->bdaddr);
3152 /* Change the IO capability from KeyboardDisplay
3153 * to DisplayYesNo as it is not supported by BT spec. */
3154 cp.capability = (conn->io_capability == 0x04) ?
3155 0x01 : conn->io_capability;
3156 conn->auth_type = hci_get_auth_req(conn);
3157 cp.authentication = conn->auth_type;
3159 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3160 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3161 cp.oob_data = 0x01;
3162 else
3163 cp.oob_data = 0x00;
3165 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3166 sizeof(cp), &cp);
3167 } else {
3168 struct hci_cp_io_capability_neg_reply cp;
3170 bacpy(&cp.bdaddr, &ev->bdaddr);
3171 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3173 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3174 sizeof(cp), &cp);
3177 unlock:
3178 hci_dev_unlock(hdev);
3181 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3183 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3184 struct hci_conn *conn;
3186 BT_DBG("%s", hdev->name);
3188 hci_dev_lock(hdev);
3190 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3191 if (!conn)
3192 goto unlock;
3194 conn->remote_cap = ev->capability;
3195 conn->remote_auth = ev->authentication;
3196 if (ev->oob_data)
3197 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3199 unlock:
3200 hci_dev_unlock(hdev);
3203 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3204 struct sk_buff *skb)
3206 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3207 int loc_mitm, rem_mitm, confirm_hint = 0;
3208 struct hci_conn *conn;
3210 BT_DBG("%s", hdev->name);
3212 hci_dev_lock(hdev);
3214 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3215 goto unlock;
3217 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3218 if (!conn)
3219 goto unlock;
3221 loc_mitm = (conn->auth_type & 0x01);
3222 rem_mitm = (conn->remote_auth & 0x01);
3224 /* If we require MITM but the remote device can't provide that
3225 * (it has NoInputNoOutput) then reject the confirmation
3226 * request. The only exception is when we're dedicated bonding
3227 * initiators (connect_cfm_cb set) since then we always have the MITM
3228 * bit set. */
3229 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3230 BT_DBG("Rejecting request: remote device can't provide MITM");
3231 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3232 sizeof(ev->bdaddr), &ev->bdaddr);
3233 goto unlock;
3236 /* If no side requires MITM protection; auto-accept */
3237 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3238 (!rem_mitm || conn->io_capability == 0x03)) {
3240 /* If we're not the initiators request authorization to
3241 * proceed from user space (mgmt_user_confirm with
3242 * confirm_hint set to 1). */
3243 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3244 BT_DBG("Confirming auto-accept as acceptor");
3245 confirm_hint = 1;
3246 goto confirm;
3249 BT_DBG("Auto-accept of user confirmation with %ums delay",
3250 hdev->auto_accept_delay);
3252 if (hdev->auto_accept_delay > 0) {
3253 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3254 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3255 goto unlock;
3258 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3259 sizeof(ev->bdaddr), &ev->bdaddr);
3260 goto unlock;
3263 confirm:
3264 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3265 confirm_hint);
3267 unlock:
3268 hci_dev_unlock(hdev);
3271 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3272 struct sk_buff *skb)
3274 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3276 BT_DBG("%s", hdev->name);
3278 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3279 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3282 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3283 struct sk_buff *skb)
3285 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3286 struct hci_conn *conn;
3288 BT_DBG("%s", hdev->name);
3290 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3291 if (!conn)
3292 return;
3294 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3295 conn->passkey_entered = 0;
3297 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3298 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3299 conn->dst_type, conn->passkey_notify,
3300 conn->passkey_entered);
3303 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3305 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3306 struct hci_conn *conn;
3308 BT_DBG("%s", hdev->name);
3310 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3311 if (!conn)
3312 return;
3314 switch (ev->type) {
3315 case HCI_KEYPRESS_STARTED:
3316 conn->passkey_entered = 0;
3317 return;
3319 case HCI_KEYPRESS_ENTERED:
3320 conn->passkey_entered++;
3321 break;
3323 case HCI_KEYPRESS_ERASED:
3324 conn->passkey_entered--;
3325 break;
3327 case HCI_KEYPRESS_CLEARED:
3328 conn->passkey_entered = 0;
3329 break;
3331 case HCI_KEYPRESS_COMPLETED:
3332 return;
3335 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3336 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3337 conn->dst_type, conn->passkey_notify,
3338 conn->passkey_entered);
3341 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3342 struct sk_buff *skb)
3344 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3345 struct hci_conn *conn;
3347 BT_DBG("%s", hdev->name);
3349 hci_dev_lock(hdev);
3351 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3352 if (!conn)
3353 goto unlock;
3355 /* To avoid duplicate auth_failed events to user space we check
3356 * the HCI_CONN_AUTH_PEND flag which will be set if we
3357 * initiated the authentication. A traditional auth_complete
3358 * event gets always produced as initiator and is also mapped to
3359 * the mgmt_auth_failed event */
3360 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3361 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3362 ev->status);
3364 hci_conn_put(conn);
3366 unlock:
3367 hci_dev_unlock(hdev);
3370 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3371 struct sk_buff *skb)
3373 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3374 struct inquiry_entry *ie;
3376 BT_DBG("%s", hdev->name);
3378 hci_dev_lock(hdev);
3380 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3381 if (ie)
3382 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3384 hci_dev_unlock(hdev);
3387 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3388 struct sk_buff *skb)
3390 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3391 struct oob_data *data;
3393 BT_DBG("%s", hdev->name);
3395 hci_dev_lock(hdev);
3397 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3398 goto unlock;
3400 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3401 if (data) {
3402 struct hci_cp_remote_oob_data_reply cp;
3404 bacpy(&cp.bdaddr, &ev->bdaddr);
3405 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3406 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3408 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3409 &cp);
3410 } else {
3411 struct hci_cp_remote_oob_data_neg_reply cp;
3413 bacpy(&cp.bdaddr, &ev->bdaddr);
3414 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3415 &cp);
3418 unlock:
3419 hci_dev_unlock(hdev);
3422 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3424 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3425 struct hci_conn *conn;
3427 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3429 hci_dev_lock(hdev);
3431 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3432 if (!conn) {
3433 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3434 if (!conn) {
3435 BT_ERR("No memory for new connection");
3436 goto unlock;
3439 conn->dst_type = ev->bdaddr_type;
3441 if (ev->role == LE_CONN_ROLE_MASTER) {
3442 conn->out = true;
3443 conn->link_mode |= HCI_LM_MASTER;
3447 if (ev->status) {
3448 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3449 conn->dst_type, ev->status);
3450 hci_proto_connect_cfm(conn, ev->status);
3451 conn->state = BT_CLOSED;
3452 hci_conn_del(conn);
3453 goto unlock;
3456 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3457 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3458 conn->dst_type, 0, NULL, 0, NULL);
3460 conn->sec_level = BT_SECURITY_LOW;
3461 conn->handle = __le16_to_cpu(ev->handle);
3462 conn->state = BT_CONNECTED;
3464 hci_conn_hold_device(conn);
3465 hci_conn_add_sysfs(conn);
3467 hci_proto_connect_cfm(conn, ev->status);
3469 unlock:
3470 hci_dev_unlock(hdev);
3473 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3475 u8 num_reports = skb->data[0];
3476 void *ptr = &skb->data[1];
3477 s8 rssi;
3479 hci_dev_lock(hdev);
3481 while (num_reports--) {
3482 struct hci_ev_le_advertising_info *ev = ptr;
3484 rssi = ev->data[ev->length];
3485 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3486 NULL, rssi, 0, 1, ev->data, ev->length);
3488 ptr += sizeof(*ev) + ev->length + 1;
3491 hci_dev_unlock(hdev);
3494 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3496 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3497 struct hci_cp_le_ltk_reply cp;
3498 struct hci_cp_le_ltk_neg_reply neg;
3499 struct hci_conn *conn;
3500 struct smp_ltk *ltk;
3502 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3504 hci_dev_lock(hdev);
3506 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3507 if (conn == NULL)
3508 goto not_found;
3510 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3511 if (ltk == NULL)
3512 goto not_found;
3514 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3515 cp.handle = cpu_to_le16(conn->handle);
3517 if (ltk->authenticated)
3518 conn->sec_level = BT_SECURITY_HIGH;
3520 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3522 if (ltk->type & HCI_SMP_STK) {
3523 list_del(&ltk->list);
3524 kfree(ltk);
3527 hci_dev_unlock(hdev);
3529 return;
3531 not_found:
3532 neg.handle = ev->handle;
3533 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3534 hci_dev_unlock(hdev);
3537 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3539 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3541 skb_pull(skb, sizeof(*le_ev));
3543 switch (le_ev->subevent) {
3544 case HCI_EV_LE_CONN_COMPLETE:
3545 hci_le_conn_complete_evt(hdev, skb);
3546 break;
3548 case HCI_EV_LE_ADVERTISING_REPORT:
3549 hci_le_adv_report_evt(hdev, skb);
3550 break;
3552 case HCI_EV_LE_LTK_REQ:
3553 hci_le_ltk_request_evt(hdev, skb);
3554 break;
3556 default:
3557 break;
3561 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3563 struct hci_event_hdr *hdr = (void *) skb->data;
3564 __u8 event = hdr->evt;
3566 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3568 switch (event) {
3569 case HCI_EV_INQUIRY_COMPLETE:
3570 hci_inquiry_complete_evt(hdev, skb);
3571 break;
3573 case HCI_EV_INQUIRY_RESULT:
3574 hci_inquiry_result_evt(hdev, skb);
3575 break;
3577 case HCI_EV_CONN_COMPLETE:
3578 hci_conn_complete_evt(hdev, skb);
3579 break;
3581 case HCI_EV_CONN_REQUEST:
3582 hci_conn_request_evt(hdev, skb);
3583 break;
3585 case HCI_EV_DISCONN_COMPLETE:
3586 hci_disconn_complete_evt(hdev, skb);
3587 break;
3589 case HCI_EV_AUTH_COMPLETE:
3590 hci_auth_complete_evt(hdev, skb);
3591 break;
3593 case HCI_EV_REMOTE_NAME:
3594 hci_remote_name_evt(hdev, skb);
3595 break;
3597 case HCI_EV_ENCRYPT_CHANGE:
3598 hci_encrypt_change_evt(hdev, skb);
3599 break;
3601 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3602 hci_change_link_key_complete_evt(hdev, skb);
3603 break;
3605 case HCI_EV_REMOTE_FEATURES:
3606 hci_remote_features_evt(hdev, skb);
3607 break;
3609 case HCI_EV_REMOTE_VERSION:
3610 hci_remote_version_evt(hdev, skb);
3611 break;
3613 case HCI_EV_QOS_SETUP_COMPLETE:
3614 hci_qos_setup_complete_evt(hdev, skb);
3615 break;
3617 case HCI_EV_CMD_COMPLETE:
3618 hci_cmd_complete_evt(hdev, skb);
3619 break;
3621 case HCI_EV_CMD_STATUS:
3622 hci_cmd_status_evt(hdev, skb);
3623 break;
3625 case HCI_EV_ROLE_CHANGE:
3626 hci_role_change_evt(hdev, skb);
3627 break;
3629 case HCI_EV_NUM_COMP_PKTS:
3630 hci_num_comp_pkts_evt(hdev, skb);
3631 break;
3633 case HCI_EV_MODE_CHANGE:
3634 hci_mode_change_evt(hdev, skb);
3635 break;
3637 case HCI_EV_PIN_CODE_REQ:
3638 hci_pin_code_request_evt(hdev, skb);
3639 break;
3641 case HCI_EV_LINK_KEY_REQ:
3642 hci_link_key_request_evt(hdev, skb);
3643 break;
3645 case HCI_EV_LINK_KEY_NOTIFY:
3646 hci_link_key_notify_evt(hdev, skb);
3647 break;
3649 case HCI_EV_CLOCK_OFFSET:
3650 hci_clock_offset_evt(hdev, skb);
3651 break;
3653 case HCI_EV_PKT_TYPE_CHANGE:
3654 hci_pkt_type_change_evt(hdev, skb);
3655 break;
3657 case HCI_EV_PSCAN_REP_MODE:
3658 hci_pscan_rep_mode_evt(hdev, skb);
3659 break;
3661 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3662 hci_inquiry_result_with_rssi_evt(hdev, skb);
3663 break;
3665 case HCI_EV_REMOTE_EXT_FEATURES:
3666 hci_remote_ext_features_evt(hdev, skb);
3667 break;
3669 case HCI_EV_SYNC_CONN_COMPLETE:
3670 hci_sync_conn_complete_evt(hdev, skb);
3671 break;
3673 case HCI_EV_SYNC_CONN_CHANGED:
3674 hci_sync_conn_changed_evt(hdev, skb);
3675 break;
3677 case HCI_EV_SNIFF_SUBRATE:
3678 hci_sniff_subrate_evt(hdev, skb);
3679 break;
3681 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3682 hci_extended_inquiry_result_evt(hdev, skb);
3683 break;
3685 case HCI_EV_KEY_REFRESH_COMPLETE:
3686 hci_key_refresh_complete_evt(hdev, skb);
3687 break;
3689 case HCI_EV_IO_CAPA_REQUEST:
3690 hci_io_capa_request_evt(hdev, skb);
3691 break;
3693 case HCI_EV_IO_CAPA_REPLY:
3694 hci_io_capa_reply_evt(hdev, skb);
3695 break;
3697 case HCI_EV_USER_CONFIRM_REQUEST:
3698 hci_user_confirm_request_evt(hdev, skb);
3699 break;
3701 case HCI_EV_USER_PASSKEY_REQUEST:
3702 hci_user_passkey_request_evt(hdev, skb);
3703 break;
3705 case HCI_EV_USER_PASSKEY_NOTIFY:
3706 hci_user_passkey_notify_evt(hdev, skb);
3707 break;
3709 case HCI_EV_KEYPRESS_NOTIFY:
3710 hci_keypress_notify_evt(hdev, skb);
3711 break;
3713 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3714 hci_simple_pair_complete_evt(hdev, skb);
3715 break;
3717 case HCI_EV_REMOTE_HOST_FEATURES:
3718 hci_remote_host_features_evt(hdev, skb);
3719 break;
3721 case HCI_EV_LE_META:
3722 hci_le_meta_evt(hdev, skb);
3723 break;
3725 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3726 hci_remote_oob_data_request_evt(hdev, skb);
3727 break;
3729 case HCI_EV_NUM_COMP_BLOCKS:
3730 hci_num_comp_blocks_evt(hdev, skb);
3731 break;
3733 default:
3734 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3735 break;
3738 kfree_skb(skb);
3739 hdev->stat.evt_rx++;