Merge tag 'vfs-6.10-rc4.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[linux-stable.git] / net / bluetooth / hci_core.c
blobdd3b0f5010187a85f343778fbcfe3fee40f3a18c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
68 __u8 scan = opt;
70 BT_DBG("%s %x", req->hdev->name, scan);
72 /* Inquiry and Page scans */
73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74 return 0;
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
79 __u8 auth = opt;
81 BT_DBG("%s %x", req->hdev->name, auth);
83 /* Authentication */
84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85 return 0;
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
90 __u8 encrypt = opt;
92 BT_DBG("%s %x", req->hdev->name, encrypt);
94 /* Encryption */
95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96 return 0;
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
101 __le16 policy = cpu_to_le16(opt);
103 BT_DBG("%s %x", req->hdev->name, policy);
105 /* Default link policy */
106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107 return 0;
110 /* Get HCI device by index.
111 * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
114 struct hci_dev *hdev = NULL, *d;
116 BT_DBG("%d", index);
118 if (index < 0)
119 return NULL;
121 read_lock(&hci_dev_list_lock);
122 list_for_each_entry(d, &hci_dev_list, list) {
123 if (d->id == index) {
124 hdev = hci_dev_hold(d);
125 break;
128 read_unlock(&hci_dev_list_lock);
129 return hdev;
132 /* ---- Inquiry support ---- */
134 bool hci_discovery_active(struct hci_dev *hdev)
136 struct discovery_state *discov = &hdev->discovery;
138 switch (discov->state) {
139 case DISCOVERY_FINDING:
140 case DISCOVERY_RESOLVING:
141 return true;
143 default:
144 return false;
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
150 int old_state = hdev->discovery.state;
152 if (old_state == state)
153 return;
155 hdev->discovery.state = state;
157 switch (state) {
158 case DISCOVERY_STOPPED:
159 hci_update_passive_scan(hdev);
161 if (old_state != DISCOVERY_STARTING)
162 mgmt_discovering(hdev, 0);
163 break;
164 case DISCOVERY_STARTING:
165 break;
166 case DISCOVERY_FINDING:
167 /* If discovery was not started then it was initiated by the
168 * MGMT interface so no MGMT event shall be generated either
170 if (old_state != DISCOVERY_STARTING) {
171 hdev->discovery.state = old_state;
172 return;
174 mgmt_discovering(hdev, 1);
175 break;
176 case DISCOVERY_RESOLVING:
177 break;
178 case DISCOVERY_STOPPING:
179 break;
182 bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
185 void hci_inquiry_cache_flush(struct hci_dev *hdev)
187 struct discovery_state *cache = &hdev->discovery;
188 struct inquiry_entry *p, *n;
190 list_for_each_entry_safe(p, n, &cache->all, all) {
191 list_del(&p->all);
192 kfree(p);
195 INIT_LIST_HEAD(&cache->unknown);
196 INIT_LIST_HEAD(&cache->resolve);
199 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
200 bdaddr_t *bdaddr)
202 struct discovery_state *cache = &hdev->discovery;
203 struct inquiry_entry *e;
205 BT_DBG("cache %p, %pMR", cache, bdaddr);
207 list_for_each_entry(e, &cache->all, all) {
208 if (!bacmp(&e->data.bdaddr, bdaddr))
209 return e;
212 return NULL;
215 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
216 bdaddr_t *bdaddr)
218 struct discovery_state *cache = &hdev->discovery;
219 struct inquiry_entry *e;
221 BT_DBG("cache %p, %pMR", cache, bdaddr);
223 list_for_each_entry(e, &cache->unknown, list) {
224 if (!bacmp(&e->data.bdaddr, bdaddr))
225 return e;
228 return NULL;
231 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
232 bdaddr_t *bdaddr,
233 int state)
235 struct discovery_state *cache = &hdev->discovery;
236 struct inquiry_entry *e;
238 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
240 list_for_each_entry(e, &cache->resolve, list) {
241 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
242 return e;
243 if (!bacmp(&e->data.bdaddr, bdaddr))
244 return e;
247 return NULL;
250 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
251 struct inquiry_entry *ie)
253 struct discovery_state *cache = &hdev->discovery;
254 struct list_head *pos = &cache->resolve;
255 struct inquiry_entry *p;
257 list_del(&ie->list);
259 list_for_each_entry(p, &cache->resolve, list) {
260 if (p->name_state != NAME_PENDING &&
261 abs(p->data.rssi) >= abs(ie->data.rssi))
262 break;
263 pos = &p->list;
266 list_add(&ie->list, pos);
269 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
270 bool name_known)
272 struct discovery_state *cache = &hdev->discovery;
273 struct inquiry_entry *ie;
274 u32 flags = 0;
276 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
278 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
280 if (!data->ssp_mode)
281 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
283 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
284 if (ie) {
285 if (!ie->data.ssp_mode)
286 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
288 if (ie->name_state == NAME_NEEDED &&
289 data->rssi != ie->data.rssi) {
290 ie->data.rssi = data->rssi;
291 hci_inquiry_cache_update_resolve(hdev, ie);
294 goto update;
297 /* Entry not in the cache. Add new one. */
298 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
299 if (!ie) {
300 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
301 goto done;
304 list_add(&ie->all, &cache->all);
306 if (name_known) {
307 ie->name_state = NAME_KNOWN;
308 } else {
309 ie->name_state = NAME_NOT_KNOWN;
310 list_add(&ie->list, &cache->unknown);
313 update:
314 if (name_known && ie->name_state != NAME_KNOWN &&
315 ie->name_state != NAME_PENDING) {
316 ie->name_state = NAME_KNOWN;
317 list_del(&ie->list);
320 memcpy(&ie->data, data, sizeof(*data));
321 ie->timestamp = jiffies;
322 cache->timestamp = jiffies;
324 if (ie->name_state == NAME_NOT_KNOWN)
325 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
327 done:
328 return flags;
331 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
333 struct discovery_state *cache = &hdev->discovery;
334 struct inquiry_info *info = (struct inquiry_info *) buf;
335 struct inquiry_entry *e;
336 int copied = 0;
338 list_for_each_entry(e, &cache->all, all) {
339 struct inquiry_data *data = &e->data;
341 if (copied >= num)
342 break;
344 bacpy(&info->bdaddr, &data->bdaddr);
345 info->pscan_rep_mode = data->pscan_rep_mode;
346 info->pscan_period_mode = data->pscan_period_mode;
347 info->pscan_mode = data->pscan_mode;
348 memcpy(info->dev_class, data->dev_class, 3);
349 info->clock_offset = data->clock_offset;
351 info++;
352 copied++;
355 BT_DBG("cache %p, copied %d", cache, copied);
356 return copied;
359 static int hci_inq_req(struct hci_request *req, unsigned long opt)
361 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
362 struct hci_dev *hdev = req->hdev;
363 struct hci_cp_inquiry cp;
365 BT_DBG("%s", hdev->name);
367 if (test_bit(HCI_INQUIRY, &hdev->flags))
368 return 0;
370 /* Start Inquiry */
371 memcpy(&cp.lap, &ir->lap, 3);
372 cp.length = ir->length;
373 cp.num_rsp = ir->num_rsp;
374 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
376 return 0;
379 int hci_inquiry(void __user *arg)
381 __u8 __user *ptr = arg;
382 struct hci_inquiry_req ir;
383 struct hci_dev *hdev;
384 int err = 0, do_inquiry = 0, max_rsp;
385 long timeo;
386 __u8 *buf;
388 if (copy_from_user(&ir, ptr, sizeof(ir)))
389 return -EFAULT;
391 hdev = hci_dev_get(ir.dev_id);
392 if (!hdev)
393 return -ENODEV;
395 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
396 err = -EBUSY;
397 goto done;
400 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
401 err = -EOPNOTSUPP;
402 goto done;
405 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
406 err = -EOPNOTSUPP;
407 goto done;
410 /* Restrict maximum inquiry length to 60 seconds */
411 if (ir.length > 60) {
412 err = -EINVAL;
413 goto done;
416 hci_dev_lock(hdev);
417 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
418 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
419 hci_inquiry_cache_flush(hdev);
420 do_inquiry = 1;
422 hci_dev_unlock(hdev);
424 timeo = ir.length * msecs_to_jiffies(2000);
426 if (do_inquiry) {
427 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
428 timeo, NULL);
429 if (err < 0)
430 goto done;
432 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
433 * cleared). If it is interrupted by a signal, return -EINTR.
435 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
436 TASK_INTERRUPTIBLE)) {
437 err = -EINTR;
438 goto done;
442 /* for unlimited number of responses we will use buffer with
443 * 255 entries
445 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
447 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
448 * copy it to the user space.
450 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
451 if (!buf) {
452 err = -ENOMEM;
453 goto done;
456 hci_dev_lock(hdev);
457 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
458 hci_dev_unlock(hdev);
460 BT_DBG("num_rsp %d", ir.num_rsp);
462 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
463 ptr += sizeof(ir);
464 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
465 ir.num_rsp))
466 err = -EFAULT;
467 } else
468 err = -EFAULT;
470 kfree(buf);
472 done:
473 hci_dev_put(hdev);
474 return err;
477 static int hci_dev_do_open(struct hci_dev *hdev)
479 int ret = 0;
481 BT_DBG("%s %p", hdev->name, hdev);
483 hci_req_sync_lock(hdev);
485 ret = hci_dev_open_sync(hdev);
487 hci_req_sync_unlock(hdev);
488 return ret;
491 /* ---- HCI ioctl helpers ---- */
493 int hci_dev_open(__u16 dev)
495 struct hci_dev *hdev;
496 int err;
498 hdev = hci_dev_get(dev);
499 if (!hdev)
500 return -ENODEV;
502 /* Devices that are marked as unconfigured can only be powered
503 * up as user channel. Trying to bring them up as normal devices
504 * will result into a failure. Only user channel operation is
505 * possible.
507 * When this function is called for a user channel, the flag
508 * HCI_USER_CHANNEL will be set first before attempting to
509 * open the device.
511 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
512 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
513 err = -EOPNOTSUPP;
514 goto done;
517 /* We need to ensure that no other power on/off work is pending
518 * before proceeding to call hci_dev_do_open. This is
519 * particularly important if the setup procedure has not yet
520 * completed.
522 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
523 cancel_delayed_work(&hdev->power_off);
525 /* After this call it is guaranteed that the setup procedure
526 * has finished. This means that error conditions like RFKILL
527 * or no valid public or static random address apply.
529 flush_workqueue(hdev->req_workqueue);
531 /* For controllers not using the management interface and that
532 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
533 * so that pairing works for them. Once the management interface
534 * is in use this bit will be cleared again and userspace has
535 * to explicitly enable it.
537 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
538 !hci_dev_test_flag(hdev, HCI_MGMT))
539 hci_dev_set_flag(hdev, HCI_BONDABLE);
541 err = hci_dev_do_open(hdev);
543 done:
544 hci_dev_put(hdev);
545 return err;
548 int hci_dev_do_close(struct hci_dev *hdev)
550 int err;
552 BT_DBG("%s %p", hdev->name, hdev);
554 hci_req_sync_lock(hdev);
556 err = hci_dev_close_sync(hdev);
558 hci_req_sync_unlock(hdev);
560 return err;
563 int hci_dev_close(__u16 dev)
565 struct hci_dev *hdev;
566 int err;
568 hdev = hci_dev_get(dev);
569 if (!hdev)
570 return -ENODEV;
572 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
573 err = -EBUSY;
574 goto done;
577 cancel_work_sync(&hdev->power_on);
578 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
579 cancel_delayed_work(&hdev->power_off);
581 err = hci_dev_do_close(hdev);
583 done:
584 hci_dev_put(hdev);
585 return err;
588 static int hci_dev_do_reset(struct hci_dev *hdev)
590 int ret;
592 BT_DBG("%s %p", hdev->name, hdev);
594 hci_req_sync_lock(hdev);
596 /* Drop queues */
597 skb_queue_purge(&hdev->rx_q);
598 skb_queue_purge(&hdev->cmd_q);
600 /* Cancel these to avoid queueing non-chained pending work */
601 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
602 /* Wait for
604 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
605 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
607 * inside RCU section to see the flag or complete scheduling.
609 synchronize_rcu();
610 /* Explicitly cancel works in case scheduled after setting the flag. */
611 cancel_delayed_work(&hdev->cmd_timer);
612 cancel_delayed_work(&hdev->ncmd_timer);
614 /* Avoid potential lockdep warnings from the *_flush() calls by
615 * ensuring the workqueue is empty up front.
617 drain_workqueue(hdev->workqueue);
619 hci_dev_lock(hdev);
620 hci_inquiry_cache_flush(hdev);
621 hci_conn_hash_flush(hdev);
622 hci_dev_unlock(hdev);
624 if (hdev->flush)
625 hdev->flush(hdev);
627 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
629 atomic_set(&hdev->cmd_cnt, 1);
630 hdev->acl_cnt = 0;
631 hdev->sco_cnt = 0;
632 hdev->le_cnt = 0;
633 hdev->iso_cnt = 0;
635 ret = hci_reset_sync(hdev);
637 hci_req_sync_unlock(hdev);
638 return ret;
641 int hci_dev_reset(__u16 dev)
643 struct hci_dev *hdev;
644 int err;
646 hdev = hci_dev_get(dev);
647 if (!hdev)
648 return -ENODEV;
650 if (!test_bit(HCI_UP, &hdev->flags)) {
651 err = -ENETDOWN;
652 goto done;
655 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
656 err = -EBUSY;
657 goto done;
660 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
661 err = -EOPNOTSUPP;
662 goto done;
665 err = hci_dev_do_reset(hdev);
667 done:
668 hci_dev_put(hdev);
669 return err;
672 int hci_dev_reset_stat(__u16 dev)
674 struct hci_dev *hdev;
675 int ret = 0;
677 hdev = hci_dev_get(dev);
678 if (!hdev)
679 return -ENODEV;
681 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
682 ret = -EBUSY;
683 goto done;
686 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
687 ret = -EOPNOTSUPP;
688 goto done;
691 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
693 done:
694 hci_dev_put(hdev);
695 return ret;
698 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
700 bool conn_changed, discov_changed;
702 BT_DBG("%s scan 0x%02x", hdev->name, scan);
704 if ((scan & SCAN_PAGE))
705 conn_changed = !hci_dev_test_and_set_flag(hdev,
706 HCI_CONNECTABLE);
707 else
708 conn_changed = hci_dev_test_and_clear_flag(hdev,
709 HCI_CONNECTABLE);
711 if ((scan & SCAN_INQUIRY)) {
712 discov_changed = !hci_dev_test_and_set_flag(hdev,
713 HCI_DISCOVERABLE);
714 } else {
715 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
716 discov_changed = hci_dev_test_and_clear_flag(hdev,
717 HCI_DISCOVERABLE);
720 if (!hci_dev_test_flag(hdev, HCI_MGMT))
721 return;
723 if (conn_changed || discov_changed) {
724 /* In case this was disabled through mgmt */
725 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
727 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
728 hci_update_adv_data(hdev, hdev->cur_adv_instance);
730 mgmt_new_settings(hdev);
734 int hci_dev_cmd(unsigned int cmd, void __user *arg)
736 struct hci_dev *hdev;
737 struct hci_dev_req dr;
738 int err = 0;
740 if (copy_from_user(&dr, arg, sizeof(dr)))
741 return -EFAULT;
743 hdev = hci_dev_get(dr.dev_id);
744 if (!hdev)
745 return -ENODEV;
747 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
748 err = -EBUSY;
749 goto done;
752 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
753 err = -EOPNOTSUPP;
754 goto done;
757 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
758 err = -EOPNOTSUPP;
759 goto done;
762 switch (cmd) {
763 case HCISETAUTH:
764 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
765 HCI_INIT_TIMEOUT, NULL);
766 break;
768 case HCISETENCRYPT:
769 if (!lmp_encrypt_capable(hdev)) {
770 err = -EOPNOTSUPP;
771 break;
774 if (!test_bit(HCI_AUTH, &hdev->flags)) {
775 /* Auth must be enabled first */
776 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
777 HCI_INIT_TIMEOUT, NULL);
778 if (err)
779 break;
782 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
783 HCI_INIT_TIMEOUT, NULL);
784 break;
786 case HCISETSCAN:
787 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
788 HCI_INIT_TIMEOUT, NULL);
790 /* Ensure that the connectable and discoverable states
791 * get correctly modified as this was a non-mgmt change.
793 if (!err)
794 hci_update_passive_scan_state(hdev, dr.dev_opt);
795 break;
797 case HCISETLINKPOL:
798 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
799 HCI_INIT_TIMEOUT, NULL);
800 break;
802 case HCISETLINKMODE:
803 hdev->link_mode = ((__u16) dr.dev_opt) &
804 (HCI_LM_MASTER | HCI_LM_ACCEPT);
805 break;
807 case HCISETPTYPE:
808 if (hdev->pkt_type == (__u16) dr.dev_opt)
809 break;
811 hdev->pkt_type = (__u16) dr.dev_opt;
812 mgmt_phy_configuration_changed(hdev, NULL);
813 break;
815 case HCISETACLMTU:
816 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
817 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
818 break;
820 case HCISETSCOMTU:
821 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
822 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
823 break;
825 default:
826 err = -EINVAL;
827 break;
830 done:
831 hci_dev_put(hdev);
832 return err;
835 int hci_get_dev_list(void __user *arg)
837 struct hci_dev *hdev;
838 struct hci_dev_list_req *dl;
839 struct hci_dev_req *dr;
840 int n = 0, size, err;
841 __u16 dev_num;
843 if (get_user(dev_num, (__u16 __user *) arg))
844 return -EFAULT;
846 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
847 return -EINVAL;
849 size = sizeof(*dl) + dev_num * sizeof(*dr);
851 dl = kzalloc(size, GFP_KERNEL);
852 if (!dl)
853 return -ENOMEM;
855 dr = dl->dev_req;
857 read_lock(&hci_dev_list_lock);
858 list_for_each_entry(hdev, &hci_dev_list, list) {
859 unsigned long flags = hdev->flags;
861 /* When the auto-off is configured it means the transport
862 * is running, but in that case still indicate that the
863 * device is actually down.
865 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
866 flags &= ~BIT(HCI_UP);
868 (dr + n)->dev_id = hdev->id;
869 (dr + n)->dev_opt = flags;
871 if (++n >= dev_num)
872 break;
874 read_unlock(&hci_dev_list_lock);
876 dl->dev_num = n;
877 size = sizeof(*dl) + n * sizeof(*dr);
879 err = copy_to_user(arg, dl, size);
880 kfree(dl);
882 return err ? -EFAULT : 0;
885 int hci_get_dev_info(void __user *arg)
887 struct hci_dev *hdev;
888 struct hci_dev_info di;
889 unsigned long flags;
890 int err = 0;
892 if (copy_from_user(&di, arg, sizeof(di)))
893 return -EFAULT;
895 hdev = hci_dev_get(di.dev_id);
896 if (!hdev)
897 return -ENODEV;
899 /* When the auto-off is configured it means the transport
900 * is running, but in that case still indicate that the
901 * device is actually down.
903 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
904 flags = hdev->flags & ~BIT(HCI_UP);
905 else
906 flags = hdev->flags;
908 strscpy(di.name, hdev->name, sizeof(di.name));
909 di.bdaddr = hdev->bdaddr;
910 di.type = (hdev->bus & 0x0f);
911 di.flags = flags;
912 di.pkt_type = hdev->pkt_type;
913 if (lmp_bredr_capable(hdev)) {
914 di.acl_mtu = hdev->acl_mtu;
915 di.acl_pkts = hdev->acl_pkts;
916 di.sco_mtu = hdev->sco_mtu;
917 di.sco_pkts = hdev->sco_pkts;
918 } else {
919 di.acl_mtu = hdev->le_mtu;
920 di.acl_pkts = hdev->le_pkts;
921 di.sco_mtu = 0;
922 di.sco_pkts = 0;
924 di.link_policy = hdev->link_policy;
925 di.link_mode = hdev->link_mode;
927 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
928 memcpy(&di.features, &hdev->features, sizeof(di.features));
930 if (copy_to_user(arg, &di, sizeof(di)))
931 err = -EFAULT;
933 hci_dev_put(hdev);
935 return err;
938 /* ---- Interface to HCI drivers ---- */
940 static int hci_dev_do_poweroff(struct hci_dev *hdev)
942 int err;
944 BT_DBG("%s %p", hdev->name, hdev);
946 hci_req_sync_lock(hdev);
948 err = hci_set_powered_sync(hdev, false);
950 hci_req_sync_unlock(hdev);
952 return err;
955 static int hci_rfkill_set_block(void *data, bool blocked)
957 struct hci_dev *hdev = data;
958 int err;
960 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
962 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
963 return -EBUSY;
965 if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
966 return 0;
968 if (blocked) {
969 hci_dev_set_flag(hdev, HCI_RFKILLED);
971 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
972 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
973 err = hci_dev_do_poweroff(hdev);
974 if (err) {
975 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
976 err);
978 /* Make sure the device is still closed even if
979 * anything during power off sequence (eg.
980 * disconnecting devices) failed.
982 hci_dev_do_close(hdev);
985 } else {
986 hci_dev_clear_flag(hdev, HCI_RFKILLED);
989 return 0;
992 static const struct rfkill_ops hci_rfkill_ops = {
993 .set_block = hci_rfkill_set_block,
996 static void hci_power_on(struct work_struct *work)
998 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
999 int err;
1001 BT_DBG("%s", hdev->name);
1003 if (test_bit(HCI_UP, &hdev->flags) &&
1004 hci_dev_test_flag(hdev, HCI_MGMT) &&
1005 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1006 cancel_delayed_work(&hdev->power_off);
1007 err = hci_powered_update_sync(hdev);
1008 mgmt_power_on(hdev, err);
1009 return;
1012 err = hci_dev_do_open(hdev);
1013 if (err < 0) {
1014 hci_dev_lock(hdev);
1015 mgmt_set_powered_failed(hdev, err);
1016 hci_dev_unlock(hdev);
1017 return;
1020 /* During the HCI setup phase, a few error conditions are
1021 * ignored and they need to be checked now. If they are still
1022 * valid, it is important to turn the device back off.
1024 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
1025 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
1026 (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1027 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1028 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1029 hci_dev_do_close(hdev);
1030 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1031 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1032 HCI_AUTO_OFF_TIMEOUT);
1035 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1036 /* For unconfigured devices, set the HCI_RAW flag
1037 * so that userspace can easily identify them.
1039 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1040 set_bit(HCI_RAW, &hdev->flags);
1042 /* For fully configured devices, this will send
1043 * the Index Added event. For unconfigured devices,
1044 * it will send Unconfigued Index Added event.
1046 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1047 * and no event will be send.
1049 mgmt_index_added(hdev);
1050 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1051 /* When the controller is now configured, then it
1052 * is important to clear the HCI_RAW flag.
1054 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1055 clear_bit(HCI_RAW, &hdev->flags);
1057 /* Powering on the controller with HCI_CONFIG set only
1058 * happens with the transition from unconfigured to
1059 * configured. This will send the Index Added event.
1061 mgmt_index_added(hdev);
1065 static void hci_power_off(struct work_struct *work)
1067 struct hci_dev *hdev = container_of(work, struct hci_dev,
1068 power_off.work);
1070 BT_DBG("%s", hdev->name);
1072 hci_dev_do_close(hdev);
1075 static void hci_error_reset(struct work_struct *work)
1077 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1079 hci_dev_hold(hdev);
1080 BT_DBG("%s", hdev->name);
1082 if (hdev->hw_error)
1083 hdev->hw_error(hdev, hdev->hw_error_code);
1084 else
1085 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1087 if (!hci_dev_do_close(hdev))
1088 hci_dev_do_open(hdev);
1090 hci_dev_put(hdev);
1093 void hci_uuids_clear(struct hci_dev *hdev)
1095 struct bt_uuid *uuid, *tmp;
1097 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1098 list_del(&uuid->list);
1099 kfree(uuid);
1103 void hci_link_keys_clear(struct hci_dev *hdev)
1105 struct link_key *key, *tmp;
1107 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1108 list_del_rcu(&key->list);
1109 kfree_rcu(key, rcu);
1113 void hci_smp_ltks_clear(struct hci_dev *hdev)
1115 struct smp_ltk *k, *tmp;
1117 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1118 list_del_rcu(&k->list);
1119 kfree_rcu(k, rcu);
1123 void hci_smp_irks_clear(struct hci_dev *hdev)
1125 struct smp_irk *k, *tmp;
1127 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1128 list_del_rcu(&k->list);
1129 kfree_rcu(k, rcu);
1133 void hci_blocked_keys_clear(struct hci_dev *hdev)
1135 struct blocked_key *b, *tmp;
1137 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1138 list_del_rcu(&b->list);
1139 kfree_rcu(b, rcu);
1143 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1145 bool blocked = false;
1146 struct blocked_key *b;
1148 rcu_read_lock();
1149 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1150 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1151 blocked = true;
1152 break;
1156 rcu_read_unlock();
1157 return blocked;
1160 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1162 struct link_key *k;
1164 rcu_read_lock();
1165 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1166 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1167 rcu_read_unlock();
1169 if (hci_is_blocked_key(hdev,
1170 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1171 k->val)) {
1172 bt_dev_warn_ratelimited(hdev,
1173 "Link key blocked for %pMR",
1174 &k->bdaddr);
1175 return NULL;
1178 return k;
1181 rcu_read_unlock();
1183 return NULL;
1186 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1187 u8 key_type, u8 old_key_type)
1189 /* Legacy key */
1190 if (key_type < 0x03)
1191 return true;
1193 /* Debug keys are insecure so don't store them persistently */
1194 if (key_type == HCI_LK_DEBUG_COMBINATION)
1195 return false;
1197 /* Changed combination key and there's no previous one */
1198 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1199 return false;
1201 /* Security mode 3 case */
1202 if (!conn)
1203 return true;
1205 /* BR/EDR key derived using SC from an LE link */
1206 if (conn->type == LE_LINK)
1207 return true;
1209 /* Neither local nor remote side had no-bonding as requirement */
1210 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1211 return true;
1213 /* Local side had dedicated bonding as requirement */
1214 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1215 return true;
1217 /* Remote side had dedicated bonding as requirement */
1218 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1219 return true;
1221 /* If none of the above criteria match, then don't store the key
1222 * persistently */
1223 return false;
1226 static u8 ltk_role(u8 type)
1228 if (type == SMP_LTK)
1229 return HCI_ROLE_MASTER;
1231 return HCI_ROLE_SLAVE;
1234 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1235 u8 addr_type, u8 role)
1237 struct smp_ltk *k;
1239 rcu_read_lock();
1240 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1241 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1242 continue;
1244 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1245 rcu_read_unlock();
1247 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1248 k->val)) {
1249 bt_dev_warn_ratelimited(hdev,
1250 "LTK blocked for %pMR",
1251 &k->bdaddr);
1252 return NULL;
1255 return k;
1258 rcu_read_unlock();
1260 return NULL;
1263 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1265 struct smp_irk *irk_to_return = NULL;
1266 struct smp_irk *irk;
1268 rcu_read_lock();
1269 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1270 if (!bacmp(&irk->rpa, rpa)) {
1271 irk_to_return = irk;
1272 goto done;
1276 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1277 if (smp_irk_matches(hdev, irk->val, rpa)) {
1278 bacpy(&irk->rpa, rpa);
1279 irk_to_return = irk;
1280 goto done;
1284 done:
1285 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1286 irk_to_return->val)) {
1287 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1288 &irk_to_return->bdaddr);
1289 irk_to_return = NULL;
1292 rcu_read_unlock();
1294 return irk_to_return;
1297 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1298 u8 addr_type)
1300 struct smp_irk *irk_to_return = NULL;
1301 struct smp_irk *irk;
1303 /* Identity Address must be public or static random */
1304 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1305 return NULL;
1307 rcu_read_lock();
1308 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1309 if (addr_type == irk->addr_type &&
1310 bacmp(bdaddr, &irk->bdaddr) == 0) {
1311 irk_to_return = irk;
1312 goto done;
1316 done:
1318 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1319 irk_to_return->val)) {
1320 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1321 &irk_to_return->bdaddr);
1322 irk_to_return = NULL;
1325 rcu_read_unlock();
1327 return irk_to_return;
1330 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1331 bdaddr_t *bdaddr, u8 *val, u8 type,
1332 u8 pin_len, bool *persistent)
1334 struct link_key *key, *old_key;
1335 u8 old_key_type;
1337 old_key = hci_find_link_key(hdev, bdaddr);
1338 if (old_key) {
1339 old_key_type = old_key->type;
1340 key = old_key;
1341 } else {
1342 old_key_type = conn ? conn->key_type : 0xff;
1343 key = kzalloc(sizeof(*key), GFP_KERNEL);
1344 if (!key)
1345 return NULL;
1346 list_add_rcu(&key->list, &hdev->link_keys);
1349 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1351 /* Some buggy controller combinations generate a changed
1352 * combination key for legacy pairing even when there's no
1353 * previous key */
1354 if (type == HCI_LK_CHANGED_COMBINATION &&
1355 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1356 type = HCI_LK_COMBINATION;
1357 if (conn)
1358 conn->key_type = type;
1361 bacpy(&key->bdaddr, bdaddr);
1362 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1363 key->pin_len = pin_len;
1365 if (type == HCI_LK_CHANGED_COMBINATION)
1366 key->type = old_key_type;
1367 else
1368 key->type = type;
1370 if (persistent)
1371 *persistent = hci_persistent_key(hdev, conn, type,
1372 old_key_type);
1374 return key;
1377 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1378 u8 addr_type, u8 type, u8 authenticated,
1379 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1381 struct smp_ltk *key, *old_key;
1382 u8 role = ltk_role(type);
1384 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1385 if (old_key)
1386 key = old_key;
1387 else {
1388 key = kzalloc(sizeof(*key), GFP_KERNEL);
1389 if (!key)
1390 return NULL;
1391 list_add_rcu(&key->list, &hdev->long_term_keys);
1394 bacpy(&key->bdaddr, bdaddr);
1395 key->bdaddr_type = addr_type;
1396 memcpy(key->val, tk, sizeof(key->val));
1397 key->authenticated = authenticated;
1398 key->ediv = ediv;
1399 key->rand = rand;
1400 key->enc_size = enc_size;
1401 key->type = type;
1403 return key;
1406 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1407 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1409 struct smp_irk *irk;
1411 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1412 if (!irk) {
1413 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1414 if (!irk)
1415 return NULL;
1417 bacpy(&irk->bdaddr, bdaddr);
1418 irk->addr_type = addr_type;
1420 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1423 memcpy(irk->val, val, 16);
1424 bacpy(&irk->rpa, rpa);
1426 return irk;
1429 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1431 struct link_key *key;
1433 key = hci_find_link_key(hdev, bdaddr);
1434 if (!key)
1435 return -ENOENT;
1437 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1439 list_del_rcu(&key->list);
1440 kfree_rcu(key, rcu);
1442 return 0;
1445 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1447 struct smp_ltk *k, *tmp;
1448 int removed = 0;
1450 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1451 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1452 continue;
1454 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1456 list_del_rcu(&k->list);
1457 kfree_rcu(k, rcu);
1458 removed++;
1461 return removed ? 0 : -ENOENT;
1464 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1466 struct smp_irk *k, *tmp;
1468 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1469 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1470 continue;
1472 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1474 list_del_rcu(&k->list);
1475 kfree_rcu(k, rcu);
1479 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1481 struct smp_ltk *k;
1482 struct smp_irk *irk;
1483 u8 addr_type;
1485 if (type == BDADDR_BREDR) {
1486 if (hci_find_link_key(hdev, bdaddr))
1487 return true;
1488 return false;
1491 /* Convert to HCI addr type which struct smp_ltk uses */
1492 if (type == BDADDR_LE_PUBLIC)
1493 addr_type = ADDR_LE_DEV_PUBLIC;
1494 else
1495 addr_type = ADDR_LE_DEV_RANDOM;
1497 irk = hci_get_irk(hdev, bdaddr, addr_type);
1498 if (irk) {
1499 bdaddr = &irk->bdaddr;
1500 addr_type = irk->addr_type;
1503 rcu_read_lock();
1504 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1505 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1506 rcu_read_unlock();
1507 return true;
1510 rcu_read_unlock();
1512 return false;
1515 /* HCI command timer function */
1516 static void hci_cmd_timeout(struct work_struct *work)
1518 struct hci_dev *hdev = container_of(work, struct hci_dev,
1519 cmd_timer.work);
1521 if (hdev->req_skb) {
1522 u16 opcode = hci_skb_opcode(hdev->req_skb);
1524 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1526 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1527 } else {
1528 bt_dev_err(hdev, "command tx timeout");
1531 if (hdev->cmd_timeout)
1532 hdev->cmd_timeout(hdev);
1534 atomic_set(&hdev->cmd_cnt, 1);
1535 queue_work(hdev->workqueue, &hdev->cmd_work);
1538 /* HCI ncmd timer function */
1539 static void hci_ncmd_timeout(struct work_struct *work)
1541 struct hci_dev *hdev = container_of(work, struct hci_dev,
1542 ncmd_timer.work);
1544 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1546 /* During HCI_INIT phase no events can be injected if the ncmd timer
1547 * triggers since the procedure has its own timeout handling.
1549 if (test_bit(HCI_INIT, &hdev->flags))
1550 return;
1552 /* This is an irrecoverable state, inject hardware error event */
1553 hci_reset_dev(hdev);
1556 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1557 bdaddr_t *bdaddr, u8 bdaddr_type)
1559 struct oob_data *data;
1561 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1562 if (bacmp(bdaddr, &data->bdaddr) != 0)
1563 continue;
1564 if (data->bdaddr_type != bdaddr_type)
1565 continue;
1566 return data;
1569 return NULL;
1572 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1573 u8 bdaddr_type)
1575 struct oob_data *data;
1577 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1578 if (!data)
1579 return -ENOENT;
1581 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1583 list_del(&data->list);
1584 kfree(data);
1586 return 0;
1589 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1591 struct oob_data *data, *n;
1593 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1594 list_del(&data->list);
1595 kfree(data);
1599 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1600 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1601 u8 *hash256, u8 *rand256)
1603 struct oob_data *data;
1605 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1606 if (!data) {
1607 data = kmalloc(sizeof(*data), GFP_KERNEL);
1608 if (!data)
1609 return -ENOMEM;
1611 bacpy(&data->bdaddr, bdaddr);
1612 data->bdaddr_type = bdaddr_type;
1613 list_add(&data->list, &hdev->remote_oob_data);
1616 if (hash192 && rand192) {
1617 memcpy(data->hash192, hash192, sizeof(data->hash192));
1618 memcpy(data->rand192, rand192, sizeof(data->rand192));
1619 if (hash256 && rand256)
1620 data->present = 0x03;
1621 } else {
1622 memset(data->hash192, 0, sizeof(data->hash192));
1623 memset(data->rand192, 0, sizeof(data->rand192));
1624 if (hash256 && rand256)
1625 data->present = 0x02;
1626 else
1627 data->present = 0x00;
1630 if (hash256 && rand256) {
1631 memcpy(data->hash256, hash256, sizeof(data->hash256));
1632 memcpy(data->rand256, rand256, sizeof(data->rand256));
1633 } else {
1634 memset(data->hash256, 0, sizeof(data->hash256));
1635 memset(data->rand256, 0, sizeof(data->rand256));
1636 if (hash192 && rand192)
1637 data->present = 0x01;
1640 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1642 return 0;
1645 /* This function requires the caller holds hdev->lock */
1646 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1648 struct adv_info *adv_instance;
1650 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1651 if (adv_instance->instance == instance)
1652 return adv_instance;
1655 return NULL;
1658 /* This function requires the caller holds hdev->lock */
1659 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1661 struct adv_info *cur_instance;
1663 cur_instance = hci_find_adv_instance(hdev, instance);
1664 if (!cur_instance)
1665 return NULL;
1667 if (cur_instance == list_last_entry(&hdev->adv_instances,
1668 struct adv_info, list))
1669 return list_first_entry(&hdev->adv_instances,
1670 struct adv_info, list);
1671 else
1672 return list_next_entry(cur_instance, list);
1675 /* This function requires the caller holds hdev->lock */
1676 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1678 struct adv_info *adv_instance;
1680 adv_instance = hci_find_adv_instance(hdev, instance);
1681 if (!adv_instance)
1682 return -ENOENT;
1684 BT_DBG("%s removing %dMR", hdev->name, instance);
1686 if (hdev->cur_adv_instance == instance) {
1687 if (hdev->adv_instance_timeout) {
1688 cancel_delayed_work(&hdev->adv_instance_expire);
1689 hdev->adv_instance_timeout = 0;
1691 hdev->cur_adv_instance = 0x00;
1694 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1696 list_del(&adv_instance->list);
1697 kfree(adv_instance);
1699 hdev->adv_instance_cnt--;
1701 return 0;
1704 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1706 struct adv_info *adv_instance, *n;
1708 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1709 adv_instance->rpa_expired = rpa_expired;
1712 /* This function requires the caller holds hdev->lock */
1713 void hci_adv_instances_clear(struct hci_dev *hdev)
1715 struct adv_info *adv_instance, *n;
1717 if (hdev->adv_instance_timeout) {
1718 cancel_delayed_work(&hdev->adv_instance_expire);
1719 hdev->adv_instance_timeout = 0;
1722 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1723 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1724 list_del(&adv_instance->list);
1725 kfree(adv_instance);
1728 hdev->adv_instance_cnt = 0;
1729 hdev->cur_adv_instance = 0x00;
1732 static void adv_instance_rpa_expired(struct work_struct *work)
1734 struct adv_info *adv_instance = container_of(work, struct adv_info,
1735 rpa_expired_cb.work);
1737 BT_DBG("");
1739 adv_instance->rpa_expired = true;
1742 /* This function requires the caller holds hdev->lock */
1743 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1744 u32 flags, u16 adv_data_len, u8 *adv_data,
1745 u16 scan_rsp_len, u8 *scan_rsp_data,
1746 u16 timeout, u16 duration, s8 tx_power,
1747 u32 min_interval, u32 max_interval,
1748 u8 mesh_handle)
1750 struct adv_info *adv;
1752 adv = hci_find_adv_instance(hdev, instance);
1753 if (adv) {
1754 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1755 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1756 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1757 } else {
1758 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1759 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1760 return ERR_PTR(-EOVERFLOW);
1762 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1763 if (!adv)
1764 return ERR_PTR(-ENOMEM);
1766 adv->pending = true;
1767 adv->instance = instance;
1769 /* If controller support only one set and the instance is set to
1770 * 1 then there is no option other than using handle 0x00.
1772 if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1773 adv->handle = 0x00;
1774 else
1775 adv->handle = instance;
1777 list_add(&adv->list, &hdev->adv_instances);
1778 hdev->adv_instance_cnt++;
1781 adv->flags = flags;
1782 adv->min_interval = min_interval;
1783 adv->max_interval = max_interval;
1784 adv->tx_power = tx_power;
1785 /* Defining a mesh_handle changes the timing units to ms,
1786 * rather than seconds, and ties the instance to the requested
1787 * mesh_tx queue.
1789 adv->mesh = mesh_handle;
1791 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1792 scan_rsp_len, scan_rsp_data);
1794 adv->timeout = timeout;
1795 adv->remaining_time = timeout;
1797 if (duration == 0)
1798 adv->duration = hdev->def_multi_adv_rotation_duration;
1799 else
1800 adv->duration = duration;
1802 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1804 BT_DBG("%s for %dMR", hdev->name, instance);
1806 return adv;
1809 /* This function requires the caller holds hdev->lock */
1810 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1811 u32 flags, u8 data_len, u8 *data,
1812 u32 min_interval, u32 max_interval)
1814 struct adv_info *adv;
1816 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1817 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1818 min_interval, max_interval, 0);
1819 if (IS_ERR(adv))
1820 return adv;
1822 adv->periodic = true;
1823 adv->per_adv_data_len = data_len;
1825 if (data)
1826 memcpy(adv->per_adv_data, data, data_len);
1828 return adv;
1831 /* This function requires the caller holds hdev->lock */
1832 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1833 u16 adv_data_len, u8 *adv_data,
1834 u16 scan_rsp_len, u8 *scan_rsp_data)
1836 struct adv_info *adv;
1838 adv = hci_find_adv_instance(hdev, instance);
1840 /* If advertisement doesn't exist, we can't modify its data */
1841 if (!adv)
1842 return -ENOENT;
1844 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1845 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1846 memcpy(adv->adv_data, adv_data, adv_data_len);
1847 adv->adv_data_len = adv_data_len;
1848 adv->adv_data_changed = true;
1851 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1852 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1853 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1854 adv->scan_rsp_len = scan_rsp_len;
1855 adv->scan_rsp_changed = true;
1858 /* Mark as changed if there are flags which would affect it */
1859 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1860 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1861 adv->scan_rsp_changed = true;
1863 return 0;
1866 /* This function requires the caller holds hdev->lock */
1867 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1869 u32 flags;
1870 struct adv_info *adv;
1872 if (instance == 0x00) {
1873 /* Instance 0 always manages the "Tx Power" and "Flags"
1874 * fields
1876 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1878 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1879 * corresponds to the "connectable" instance flag.
1881 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1882 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1884 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1885 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1886 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1887 flags |= MGMT_ADV_FLAG_DISCOV;
1889 return flags;
1892 adv = hci_find_adv_instance(hdev, instance);
1894 /* Return 0 when we got an invalid instance identifier. */
1895 if (!adv)
1896 return 0;
1898 return adv->flags;
1901 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1903 struct adv_info *adv;
1905 /* Instance 0x00 always set local name */
1906 if (instance == 0x00)
1907 return true;
1909 adv = hci_find_adv_instance(hdev, instance);
1910 if (!adv)
1911 return false;
1913 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1914 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1915 return true;
1917 return adv->scan_rsp_len ? true : false;
1920 /* This function requires the caller holds hdev->lock */
1921 void hci_adv_monitors_clear(struct hci_dev *hdev)
1923 struct adv_monitor *monitor;
1924 int handle;
1926 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1927 hci_free_adv_monitor(hdev, monitor);
1929 idr_destroy(&hdev->adv_monitors_idr);
1932 /* Frees the monitor structure and do some bookkeepings.
1933 * This function requires the caller holds hdev->lock.
1935 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1937 struct adv_pattern *pattern;
1938 struct adv_pattern *tmp;
1940 if (!monitor)
1941 return;
1943 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1944 list_del(&pattern->list);
1945 kfree(pattern);
1948 if (monitor->handle)
1949 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1951 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1952 hdev->adv_monitors_cnt--;
1953 mgmt_adv_monitor_removed(hdev, monitor->handle);
1956 kfree(monitor);
1959 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1960 * also attempts to forward the request to the controller.
1961 * This function requires the caller holds hci_req_sync_lock.
1963 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1965 int min, max, handle;
1966 int status = 0;
1968 if (!monitor)
1969 return -EINVAL;
1971 hci_dev_lock(hdev);
1973 min = HCI_MIN_ADV_MONITOR_HANDLE;
1974 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1975 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1976 GFP_KERNEL);
1978 hci_dev_unlock(hdev);
1980 if (handle < 0)
1981 return handle;
1983 monitor->handle = handle;
1985 if (!hdev_is_powered(hdev))
1986 return status;
1988 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1989 case HCI_ADV_MONITOR_EXT_NONE:
1990 bt_dev_dbg(hdev, "add monitor %d status %d",
1991 monitor->handle, status);
1992 /* Message was not forwarded to controller - not an error */
1993 break;
1995 case HCI_ADV_MONITOR_EXT_MSFT:
1996 status = msft_add_monitor_pattern(hdev, monitor);
1997 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1998 handle, status);
1999 break;
2002 return status;
2005 /* Attempts to tell the controller and free the monitor. If somehow the
2006 * controller doesn't have a corresponding handle, remove anyway.
2007 * This function requires the caller holds hci_req_sync_lock.
2009 static int hci_remove_adv_monitor(struct hci_dev *hdev,
2010 struct adv_monitor *monitor)
2012 int status = 0;
2013 int handle;
2015 switch (hci_get_adv_monitor_offload_ext(hdev)) {
2016 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
2017 bt_dev_dbg(hdev, "remove monitor %d status %d",
2018 monitor->handle, status);
2019 goto free_monitor;
2021 case HCI_ADV_MONITOR_EXT_MSFT:
2022 handle = monitor->handle;
2023 status = msft_remove_monitor(hdev, monitor);
2024 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
2025 handle, status);
2026 break;
2029 /* In case no matching handle registered, just free the monitor */
2030 if (status == -ENOENT)
2031 goto free_monitor;
2033 return status;
2035 free_monitor:
2036 if (status == -ENOENT)
2037 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2038 monitor->handle);
2039 hci_free_adv_monitor(hdev, monitor);
2041 return status;
2044 /* This function requires the caller holds hci_req_sync_lock */
2045 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2047 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2049 if (!monitor)
2050 return -EINVAL;
2052 return hci_remove_adv_monitor(hdev, monitor);
2055 /* This function requires the caller holds hci_req_sync_lock */
2056 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2058 struct adv_monitor *monitor;
2059 int idr_next_id = 0;
2060 int status = 0;
2062 while (1) {
2063 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2064 if (!monitor)
2065 break;
2067 status = hci_remove_adv_monitor(hdev, monitor);
2068 if (status)
2069 return status;
2071 idr_next_id++;
2074 return status;
2077 /* This function requires the caller holds hdev->lock */
2078 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2080 return !idr_is_empty(&hdev->adv_monitors_idr);
2083 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2085 if (msft_monitor_supported(hdev))
2086 return HCI_ADV_MONITOR_EXT_MSFT;
2088 return HCI_ADV_MONITOR_EXT_NONE;
2091 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2092 bdaddr_t *bdaddr, u8 type)
2094 struct bdaddr_list *b;
2096 list_for_each_entry(b, bdaddr_list, list) {
2097 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2098 return b;
2101 return NULL;
2104 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2105 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2106 u8 type)
2108 struct bdaddr_list_with_irk *b;
2110 list_for_each_entry(b, bdaddr_list, list) {
2111 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2112 return b;
2115 return NULL;
2118 struct bdaddr_list_with_flags *
2119 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2120 bdaddr_t *bdaddr, u8 type)
2122 struct bdaddr_list_with_flags *b;
2124 list_for_each_entry(b, bdaddr_list, list) {
2125 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2126 return b;
2129 return NULL;
2132 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2134 struct bdaddr_list *b, *n;
2136 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2137 list_del(&b->list);
2138 kfree(b);
2142 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2144 struct bdaddr_list *entry;
2146 if (!bacmp(bdaddr, BDADDR_ANY))
2147 return -EBADF;
2149 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2150 return -EEXIST;
2152 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2153 if (!entry)
2154 return -ENOMEM;
2156 bacpy(&entry->bdaddr, bdaddr);
2157 entry->bdaddr_type = type;
2159 list_add(&entry->list, list);
2161 return 0;
2164 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2165 u8 type, u8 *peer_irk, u8 *local_irk)
2167 struct bdaddr_list_with_irk *entry;
2169 if (!bacmp(bdaddr, BDADDR_ANY))
2170 return -EBADF;
2172 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2173 return -EEXIST;
2175 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2176 if (!entry)
2177 return -ENOMEM;
2179 bacpy(&entry->bdaddr, bdaddr);
2180 entry->bdaddr_type = type;
2182 if (peer_irk)
2183 memcpy(entry->peer_irk, peer_irk, 16);
2185 if (local_irk)
2186 memcpy(entry->local_irk, local_irk, 16);
2188 list_add(&entry->list, list);
2190 return 0;
2193 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2194 u8 type, u32 flags)
2196 struct bdaddr_list_with_flags *entry;
2198 if (!bacmp(bdaddr, BDADDR_ANY))
2199 return -EBADF;
2201 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2202 return -EEXIST;
2204 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2205 if (!entry)
2206 return -ENOMEM;
2208 bacpy(&entry->bdaddr, bdaddr);
2209 entry->bdaddr_type = type;
2210 entry->flags = flags;
2212 list_add(&entry->list, list);
2214 return 0;
2217 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2219 struct bdaddr_list *entry;
2221 if (!bacmp(bdaddr, BDADDR_ANY)) {
2222 hci_bdaddr_list_clear(list);
2223 return 0;
2226 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2227 if (!entry)
2228 return -ENOENT;
2230 list_del(&entry->list);
2231 kfree(entry);
2233 return 0;
2236 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2237 u8 type)
2239 struct bdaddr_list_with_irk *entry;
2241 if (!bacmp(bdaddr, BDADDR_ANY)) {
2242 hci_bdaddr_list_clear(list);
2243 return 0;
2246 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2247 if (!entry)
2248 return -ENOENT;
2250 list_del(&entry->list);
2251 kfree(entry);
2253 return 0;
2256 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2257 u8 type)
2259 struct bdaddr_list_with_flags *entry;
2261 if (!bacmp(bdaddr, BDADDR_ANY)) {
2262 hci_bdaddr_list_clear(list);
2263 return 0;
2266 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2267 if (!entry)
2268 return -ENOENT;
2270 list_del(&entry->list);
2271 kfree(entry);
2273 return 0;
2276 /* This function requires the caller holds hdev->lock */
2277 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2278 bdaddr_t *addr, u8 addr_type)
2280 struct hci_conn_params *params;
2282 list_for_each_entry(params, &hdev->le_conn_params, list) {
2283 if (bacmp(&params->addr, addr) == 0 &&
2284 params->addr_type == addr_type) {
2285 return params;
2289 return NULL;
2292 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2293 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2294 bdaddr_t *addr, u8 addr_type)
2296 struct hci_conn_params *param;
2298 rcu_read_lock();
2300 list_for_each_entry_rcu(param, list, action) {
2301 if (bacmp(&param->addr, addr) == 0 &&
2302 param->addr_type == addr_type) {
2303 rcu_read_unlock();
2304 return param;
2308 rcu_read_unlock();
2310 return NULL;
2313 /* This function requires the caller holds hdev->lock */
2314 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2316 if (list_empty(&param->action))
2317 return;
2319 list_del_rcu(&param->action);
2320 synchronize_rcu();
2321 INIT_LIST_HEAD(&param->action);
2324 /* This function requires the caller holds hdev->lock */
2325 void hci_pend_le_list_add(struct hci_conn_params *param,
2326 struct list_head *list)
2328 list_add_rcu(&param->action, list);
2331 /* This function requires the caller holds hdev->lock */
2332 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2333 bdaddr_t *addr, u8 addr_type)
2335 struct hci_conn_params *params;
2337 params = hci_conn_params_lookup(hdev, addr, addr_type);
2338 if (params)
2339 return params;
2341 params = kzalloc(sizeof(*params), GFP_KERNEL);
2342 if (!params) {
2343 bt_dev_err(hdev, "out of memory");
2344 return NULL;
2347 bacpy(&params->addr, addr);
2348 params->addr_type = addr_type;
2350 list_add(&params->list, &hdev->le_conn_params);
2351 INIT_LIST_HEAD(&params->action);
2353 params->conn_min_interval = hdev->le_conn_min_interval;
2354 params->conn_max_interval = hdev->le_conn_max_interval;
2355 params->conn_latency = hdev->le_conn_latency;
2356 params->supervision_timeout = hdev->le_supv_timeout;
2357 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2359 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2361 return params;
2364 void hci_conn_params_free(struct hci_conn_params *params)
2366 hci_pend_le_list_del_init(params);
2368 if (params->conn) {
2369 hci_conn_drop(params->conn);
2370 hci_conn_put(params->conn);
2373 list_del(&params->list);
2374 kfree(params);
2377 /* This function requires the caller holds hdev->lock */
2378 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2380 struct hci_conn_params *params;
2382 params = hci_conn_params_lookup(hdev, addr, addr_type);
2383 if (!params)
2384 return;
2386 hci_conn_params_free(params);
2388 hci_update_passive_scan(hdev);
2390 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2393 /* This function requires the caller holds hdev->lock */
2394 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2396 struct hci_conn_params *params, *tmp;
2398 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2399 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2400 continue;
2402 /* If trying to establish one time connection to disabled
2403 * device, leave the params, but mark them as just once.
2405 if (params->explicit_connect) {
2406 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2407 continue;
2410 hci_conn_params_free(params);
2413 BT_DBG("All LE disabled connection parameters were removed");
2416 /* This function requires the caller holds hdev->lock */
2417 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2419 struct hci_conn_params *params, *tmp;
2421 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2422 hci_conn_params_free(params);
2424 BT_DBG("All LE connection parameters were removed");
2427 /* Copy the Identity Address of the controller.
2429 * If the controller has a public BD_ADDR, then by default use that one.
2430 * If this is a LE only controller without a public address, default to
2431 * the static random address.
2433 * For debugging purposes it is possible to force controllers with a
2434 * public address to use the static random address instead.
2436 * In case BR/EDR has been disabled on a dual-mode controller and
2437 * userspace has configured a static address, then that address
2438 * becomes the identity address instead of the public BR/EDR address.
2440 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2441 u8 *bdaddr_type)
2443 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2444 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2445 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2446 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2447 bacpy(bdaddr, &hdev->static_addr);
2448 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2449 } else {
2450 bacpy(bdaddr, &hdev->bdaddr);
2451 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2455 static void hci_clear_wake_reason(struct hci_dev *hdev)
2457 hci_dev_lock(hdev);
2459 hdev->wake_reason = 0;
2460 bacpy(&hdev->wake_addr, BDADDR_ANY);
2461 hdev->wake_addr_type = 0;
2463 hci_dev_unlock(hdev);
2466 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2467 void *data)
2469 struct hci_dev *hdev =
2470 container_of(nb, struct hci_dev, suspend_notifier);
2471 int ret = 0;
2473 /* Userspace has full control of this device. Do nothing. */
2474 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2475 return NOTIFY_DONE;
2477 /* To avoid a potential race with hci_unregister_dev. */
2478 hci_dev_hold(hdev);
2480 if (action == PM_SUSPEND_PREPARE)
2481 ret = hci_suspend_dev(hdev);
2482 else if (action == PM_POST_SUSPEND)
2483 ret = hci_resume_dev(hdev);
2485 if (ret)
2486 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2487 action, ret);
2489 hci_dev_put(hdev);
2490 return NOTIFY_DONE;
2493 /* Alloc HCI device */
2494 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2496 struct hci_dev *hdev;
2497 unsigned int alloc_size;
2499 alloc_size = sizeof(*hdev);
2500 if (sizeof_priv) {
2501 /* Fixme: May need ALIGN-ment? */
2502 alloc_size += sizeof_priv;
2505 hdev = kzalloc(alloc_size, GFP_KERNEL);
2506 if (!hdev)
2507 return NULL;
2509 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2510 hdev->esco_type = (ESCO_HV1);
2511 hdev->link_mode = (HCI_LM_ACCEPT);
2512 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2513 hdev->io_capability = 0x03; /* No Input No Output */
2514 hdev->manufacturer = 0xffff; /* Default to internal use */
2515 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2516 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2517 hdev->adv_instance_cnt = 0;
2518 hdev->cur_adv_instance = 0x00;
2519 hdev->adv_instance_timeout = 0;
2521 hdev->advmon_allowlist_duration = 300;
2522 hdev->advmon_no_filter_duration = 500;
2523 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2525 hdev->sniff_max_interval = 800;
2526 hdev->sniff_min_interval = 80;
2528 hdev->le_adv_channel_map = 0x07;
2529 hdev->le_adv_min_interval = 0x0800;
2530 hdev->le_adv_max_interval = 0x0800;
2531 hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2532 hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2533 hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2534 hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2535 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2536 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2537 hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2538 hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2539 hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2540 hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2541 hdev->le_conn_min_interval = 0x0018;
2542 hdev->le_conn_max_interval = 0x0028;
2543 hdev->le_conn_latency = 0x0000;
2544 hdev->le_supv_timeout = 0x002a;
2545 hdev->le_def_tx_len = 0x001b;
2546 hdev->le_def_tx_time = 0x0148;
2547 hdev->le_max_tx_len = 0x001b;
2548 hdev->le_max_tx_time = 0x0148;
2549 hdev->le_max_rx_len = 0x001b;
2550 hdev->le_max_rx_time = 0x0148;
2551 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2552 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2553 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2554 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2555 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2556 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2557 hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2558 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2559 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2561 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2562 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2563 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2564 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2565 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2566 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2568 /* default 1.28 sec page scan */
2569 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2570 hdev->def_page_scan_int = 0x0800;
2571 hdev->def_page_scan_window = 0x0012;
2573 mutex_init(&hdev->lock);
2574 mutex_init(&hdev->req_lock);
2576 ida_init(&hdev->unset_handle_ida);
2578 INIT_LIST_HEAD(&hdev->mesh_pending);
2579 INIT_LIST_HEAD(&hdev->mgmt_pending);
2580 INIT_LIST_HEAD(&hdev->reject_list);
2581 INIT_LIST_HEAD(&hdev->accept_list);
2582 INIT_LIST_HEAD(&hdev->uuids);
2583 INIT_LIST_HEAD(&hdev->link_keys);
2584 INIT_LIST_HEAD(&hdev->long_term_keys);
2585 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2586 INIT_LIST_HEAD(&hdev->remote_oob_data);
2587 INIT_LIST_HEAD(&hdev->le_accept_list);
2588 INIT_LIST_HEAD(&hdev->le_resolv_list);
2589 INIT_LIST_HEAD(&hdev->le_conn_params);
2590 INIT_LIST_HEAD(&hdev->pend_le_conns);
2591 INIT_LIST_HEAD(&hdev->pend_le_reports);
2592 INIT_LIST_HEAD(&hdev->conn_hash.list);
2593 INIT_LIST_HEAD(&hdev->adv_instances);
2594 INIT_LIST_HEAD(&hdev->blocked_keys);
2595 INIT_LIST_HEAD(&hdev->monitored_devices);
2597 INIT_LIST_HEAD(&hdev->local_codecs);
2598 INIT_WORK(&hdev->rx_work, hci_rx_work);
2599 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2600 INIT_WORK(&hdev->tx_work, hci_tx_work);
2601 INIT_WORK(&hdev->power_on, hci_power_on);
2602 INIT_WORK(&hdev->error_reset, hci_error_reset);
2604 hci_cmd_sync_init(hdev);
2606 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2608 skb_queue_head_init(&hdev->rx_q);
2609 skb_queue_head_init(&hdev->cmd_q);
2610 skb_queue_head_init(&hdev->raw_q);
2612 init_waitqueue_head(&hdev->req_wait_q);
2614 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2615 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2617 hci_devcd_setup(hdev);
2618 hci_request_setup(hdev);
2620 hci_init_sysfs(hdev);
2621 discovery_init(hdev);
2623 return hdev;
2625 EXPORT_SYMBOL(hci_alloc_dev_priv);
2627 /* Free HCI device */
2628 void hci_free_dev(struct hci_dev *hdev)
2630 /* will free via device release */
2631 put_device(&hdev->dev);
2633 EXPORT_SYMBOL(hci_free_dev);
2635 /* Register HCI device */
2636 int hci_register_dev(struct hci_dev *hdev)
2638 int id, error;
2640 if (!hdev->open || !hdev->close || !hdev->send)
2641 return -EINVAL;
2643 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2644 if (id < 0)
2645 return id;
2647 error = dev_set_name(&hdev->dev, "hci%u", id);
2648 if (error)
2649 return error;
2651 hdev->name = dev_name(&hdev->dev);
2652 hdev->id = id;
2654 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2656 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2657 if (!hdev->workqueue) {
2658 error = -ENOMEM;
2659 goto err;
2662 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2663 hdev->name);
2664 if (!hdev->req_workqueue) {
2665 destroy_workqueue(hdev->workqueue);
2666 error = -ENOMEM;
2667 goto err;
2670 if (!IS_ERR_OR_NULL(bt_debugfs))
2671 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2673 error = device_add(&hdev->dev);
2674 if (error < 0)
2675 goto err_wqueue;
2677 hci_leds_init(hdev);
2679 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2680 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2681 hdev);
2682 if (hdev->rfkill) {
2683 if (rfkill_register(hdev->rfkill) < 0) {
2684 rfkill_destroy(hdev->rfkill);
2685 hdev->rfkill = NULL;
2689 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2690 hci_dev_set_flag(hdev, HCI_RFKILLED);
2692 hci_dev_set_flag(hdev, HCI_SETUP);
2693 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2695 /* Assume BR/EDR support until proven otherwise (such as
2696 * through reading supported features during init.
2698 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2700 write_lock(&hci_dev_list_lock);
2701 list_add(&hdev->list, &hci_dev_list);
2702 write_unlock(&hci_dev_list_lock);
2704 /* Devices that are marked for raw-only usage are unconfigured
2705 * and should not be included in normal operation.
2707 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2708 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2710 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2711 * callback.
2713 if (hdev->wakeup)
2714 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2716 hci_sock_dev_event(hdev, HCI_DEV_REG);
2717 hci_dev_hold(hdev);
2719 error = hci_register_suspend_notifier(hdev);
2720 if (error)
2721 BT_WARN("register suspend notifier failed error:%d\n", error);
2723 queue_work(hdev->req_workqueue, &hdev->power_on);
2725 idr_init(&hdev->adv_monitors_idr);
2726 msft_register(hdev);
2728 return id;
2730 err_wqueue:
2731 debugfs_remove_recursive(hdev->debugfs);
2732 destroy_workqueue(hdev->workqueue);
2733 destroy_workqueue(hdev->req_workqueue);
2734 err:
2735 ida_free(&hci_index_ida, hdev->id);
2737 return error;
2739 EXPORT_SYMBOL(hci_register_dev);
2741 /* Unregister HCI device */
2742 void hci_unregister_dev(struct hci_dev *hdev)
2744 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2746 mutex_lock(&hdev->unregister_lock);
2747 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2748 mutex_unlock(&hdev->unregister_lock);
2750 write_lock(&hci_dev_list_lock);
2751 list_del(&hdev->list);
2752 write_unlock(&hci_dev_list_lock);
2754 cancel_work_sync(&hdev->power_on);
2756 hci_cmd_sync_clear(hdev);
2758 hci_unregister_suspend_notifier(hdev);
2760 hci_dev_do_close(hdev);
2762 if (!test_bit(HCI_INIT, &hdev->flags) &&
2763 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2764 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2765 hci_dev_lock(hdev);
2766 mgmt_index_removed(hdev);
2767 hci_dev_unlock(hdev);
2770 /* mgmt_index_removed should take care of emptying the
2771 * pending list */
2772 BUG_ON(!list_empty(&hdev->mgmt_pending));
2774 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2776 if (hdev->rfkill) {
2777 rfkill_unregister(hdev->rfkill);
2778 rfkill_destroy(hdev->rfkill);
2781 device_del(&hdev->dev);
2782 /* Actual cleanup is deferred until hci_release_dev(). */
2783 hci_dev_put(hdev);
2785 EXPORT_SYMBOL(hci_unregister_dev);
2787 /* Release HCI device */
2788 void hci_release_dev(struct hci_dev *hdev)
2790 debugfs_remove_recursive(hdev->debugfs);
2791 kfree_const(hdev->hw_info);
2792 kfree_const(hdev->fw_info);
2794 destroy_workqueue(hdev->workqueue);
2795 destroy_workqueue(hdev->req_workqueue);
2797 hci_dev_lock(hdev);
2798 hci_bdaddr_list_clear(&hdev->reject_list);
2799 hci_bdaddr_list_clear(&hdev->accept_list);
2800 hci_uuids_clear(hdev);
2801 hci_link_keys_clear(hdev);
2802 hci_smp_ltks_clear(hdev);
2803 hci_smp_irks_clear(hdev);
2804 hci_remote_oob_data_clear(hdev);
2805 hci_adv_instances_clear(hdev);
2806 hci_adv_monitors_clear(hdev);
2807 hci_bdaddr_list_clear(&hdev->le_accept_list);
2808 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2809 hci_conn_params_clear_all(hdev);
2810 hci_discovery_filter_clear(hdev);
2811 hci_blocked_keys_clear(hdev);
2812 hci_codec_list_clear(&hdev->local_codecs);
2813 msft_release(hdev);
2814 hci_dev_unlock(hdev);
2816 ida_destroy(&hdev->unset_handle_ida);
2817 ida_free(&hci_index_ida, hdev->id);
2818 kfree_skb(hdev->sent_cmd);
2819 kfree_skb(hdev->req_skb);
2820 kfree_skb(hdev->recv_event);
2821 kfree(hdev);
2823 EXPORT_SYMBOL(hci_release_dev);
2825 int hci_register_suspend_notifier(struct hci_dev *hdev)
2827 int ret = 0;
2829 if (!hdev->suspend_notifier.notifier_call &&
2830 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2831 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2832 ret = register_pm_notifier(&hdev->suspend_notifier);
2835 return ret;
2838 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2840 int ret = 0;
2842 if (hdev->suspend_notifier.notifier_call) {
2843 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2844 if (!ret)
2845 hdev->suspend_notifier.notifier_call = NULL;
2848 return ret;
2851 /* Cancel ongoing command synchronously:
2853 * - Cancel command timer
2854 * - Reset command counter
2855 * - Cancel command request
2857 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2859 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2861 cancel_delayed_work_sync(&hdev->cmd_timer);
2862 cancel_delayed_work_sync(&hdev->ncmd_timer);
2863 atomic_set(&hdev->cmd_cnt, 1);
2865 hci_cmd_sync_cancel_sync(hdev, err);
2868 /* Suspend HCI device */
2869 int hci_suspend_dev(struct hci_dev *hdev)
2871 int ret;
2873 bt_dev_dbg(hdev, "");
2875 /* Suspend should only act on when powered. */
2876 if (!hdev_is_powered(hdev) ||
2877 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2878 return 0;
2880 /* If powering down don't attempt to suspend */
2881 if (mgmt_powering_down(hdev))
2882 return 0;
2884 /* Cancel potentially blocking sync operation before suspend */
2885 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2887 hci_req_sync_lock(hdev);
2888 ret = hci_suspend_sync(hdev);
2889 hci_req_sync_unlock(hdev);
2891 hci_clear_wake_reason(hdev);
2892 mgmt_suspending(hdev, hdev->suspend_state);
2894 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2895 return ret;
2897 EXPORT_SYMBOL(hci_suspend_dev);
2899 /* Resume HCI device */
2900 int hci_resume_dev(struct hci_dev *hdev)
2902 int ret;
2904 bt_dev_dbg(hdev, "");
2906 /* Resume should only act on when powered. */
2907 if (!hdev_is_powered(hdev) ||
2908 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2909 return 0;
2911 /* If powering down don't attempt to resume */
2912 if (mgmt_powering_down(hdev))
2913 return 0;
2915 hci_req_sync_lock(hdev);
2916 ret = hci_resume_sync(hdev);
2917 hci_req_sync_unlock(hdev);
2919 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2920 hdev->wake_addr_type);
2922 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2923 return ret;
2925 EXPORT_SYMBOL(hci_resume_dev);
2927 /* Reset HCI device */
2928 int hci_reset_dev(struct hci_dev *hdev)
2930 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2931 struct sk_buff *skb;
2933 skb = bt_skb_alloc(3, GFP_ATOMIC);
2934 if (!skb)
2935 return -ENOMEM;
2937 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2938 skb_put_data(skb, hw_err, 3);
2940 bt_dev_err(hdev, "Injecting HCI hardware error event");
2942 /* Send Hardware Error to upper stack */
2943 return hci_recv_frame(hdev, skb);
2945 EXPORT_SYMBOL(hci_reset_dev);
2947 /* Receive frame from HCI drivers */
2948 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2950 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2951 && !test_bit(HCI_INIT, &hdev->flags))) {
2952 kfree_skb(skb);
2953 return -ENXIO;
2956 switch (hci_skb_pkt_type(skb)) {
2957 case HCI_EVENT_PKT:
2958 break;
2959 case HCI_ACLDATA_PKT:
2960 /* Detect if ISO packet has been sent as ACL */
2961 if (hci_conn_num(hdev, ISO_LINK)) {
2962 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2963 __u8 type;
2965 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2966 if (type == ISO_LINK)
2967 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2969 break;
2970 case HCI_SCODATA_PKT:
2971 break;
2972 case HCI_ISODATA_PKT:
2973 break;
2974 default:
2975 kfree_skb(skb);
2976 return -EINVAL;
2979 /* Incoming skb */
2980 bt_cb(skb)->incoming = 1;
2982 /* Time stamp */
2983 __net_timestamp(skb);
2985 skb_queue_tail(&hdev->rx_q, skb);
2986 queue_work(hdev->workqueue, &hdev->rx_work);
2988 return 0;
2990 EXPORT_SYMBOL(hci_recv_frame);
2992 /* Receive diagnostic message from HCI drivers */
2993 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2995 /* Mark as diagnostic packet */
2996 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2998 /* Time stamp */
2999 __net_timestamp(skb);
3001 skb_queue_tail(&hdev->rx_q, skb);
3002 queue_work(hdev->workqueue, &hdev->rx_work);
3004 return 0;
3006 EXPORT_SYMBOL(hci_recv_diag);
3008 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3010 va_list vargs;
3012 va_start(vargs, fmt);
3013 kfree_const(hdev->hw_info);
3014 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3015 va_end(vargs);
3017 EXPORT_SYMBOL(hci_set_hw_info);
3019 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3021 va_list vargs;
3023 va_start(vargs, fmt);
3024 kfree_const(hdev->fw_info);
3025 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3026 va_end(vargs);
3028 EXPORT_SYMBOL(hci_set_fw_info);
3030 /* ---- Interface to upper protocols ---- */
3032 int hci_register_cb(struct hci_cb *cb)
3034 BT_DBG("%p name %s", cb, cb->name);
3036 mutex_lock(&hci_cb_list_lock);
3037 list_add_tail(&cb->list, &hci_cb_list);
3038 mutex_unlock(&hci_cb_list_lock);
3040 return 0;
3042 EXPORT_SYMBOL(hci_register_cb);
3044 int hci_unregister_cb(struct hci_cb *cb)
3046 BT_DBG("%p name %s", cb, cb->name);
3048 mutex_lock(&hci_cb_list_lock);
3049 list_del(&cb->list);
3050 mutex_unlock(&hci_cb_list_lock);
3052 return 0;
3054 EXPORT_SYMBOL(hci_unregister_cb);
3056 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3058 int err;
3060 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3061 skb->len);
3063 /* Time stamp */
3064 __net_timestamp(skb);
3066 /* Send copy to monitor */
3067 hci_send_to_monitor(hdev, skb);
3069 if (atomic_read(&hdev->promisc)) {
3070 /* Send copy to the sockets */
3071 hci_send_to_sock(hdev, skb);
3074 /* Get rid of skb owner, prior to sending to the driver. */
3075 skb_orphan(skb);
3077 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3078 kfree_skb(skb);
3079 return -EINVAL;
3082 err = hdev->send(hdev, skb);
3083 if (err < 0) {
3084 bt_dev_err(hdev, "sending frame failed (%d)", err);
3085 kfree_skb(skb);
3086 return err;
3089 return 0;
3092 /* Send HCI command */
3093 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3094 const void *param)
3096 struct sk_buff *skb;
3098 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3100 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3101 if (!skb) {
3102 bt_dev_err(hdev, "no memory for command");
3103 return -ENOMEM;
3106 /* Stand-alone HCI commands must be flagged as
3107 * single-command requests.
3109 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3111 skb_queue_tail(&hdev->cmd_q, skb);
3112 queue_work(hdev->workqueue, &hdev->cmd_work);
3114 return 0;
3117 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3118 const void *param)
3120 struct sk_buff *skb;
3122 if (hci_opcode_ogf(opcode) != 0x3f) {
3123 /* A controller receiving a command shall respond with either
3124 * a Command Status Event or a Command Complete Event.
3125 * Therefore, all standard HCI commands must be sent via the
3126 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3127 * Some vendors do not comply with this rule for vendor-specific
3128 * commands and do not return any event. We want to support
3129 * unresponded commands for such cases only.
3131 bt_dev_err(hdev, "unresponded command not supported");
3132 return -EINVAL;
3135 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3136 if (!skb) {
3137 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3138 opcode);
3139 return -ENOMEM;
3142 hci_send_frame(hdev, skb);
3144 return 0;
3146 EXPORT_SYMBOL(__hci_cmd_send);
3148 /* Get data from the previously sent command */
3149 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3151 struct hci_command_hdr *hdr;
3153 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3154 return NULL;
3156 hdr = (void *)skb->data;
3158 if (hdr->opcode != cpu_to_le16(opcode))
3159 return NULL;
3161 return skb->data + HCI_COMMAND_HDR_SIZE;
3164 /* Get data from the previously sent command */
3165 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3167 void *data;
3169 /* Check if opcode matches last sent command */
3170 data = hci_cmd_data(hdev->sent_cmd, opcode);
3171 if (!data)
3172 /* Check if opcode matches last request */
3173 data = hci_cmd_data(hdev->req_skb, opcode);
3175 return data;
3178 /* Get data from last received event */
3179 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3181 struct hci_event_hdr *hdr;
3182 int offset;
3184 if (!hdev->recv_event)
3185 return NULL;
3187 hdr = (void *)hdev->recv_event->data;
3188 offset = sizeof(*hdr);
3190 if (hdr->evt != event) {
3191 /* In case of LE metaevent check the subevent match */
3192 if (hdr->evt == HCI_EV_LE_META) {
3193 struct hci_ev_le_meta *ev;
3195 ev = (void *)hdev->recv_event->data + offset;
3196 offset += sizeof(*ev);
3197 if (ev->subevent == event)
3198 goto found;
3200 return NULL;
3203 found:
3204 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3206 return hdev->recv_event->data + offset;
3209 /* Send ACL data */
3210 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3212 struct hci_acl_hdr *hdr;
3213 int len = skb->len;
3215 skb_push(skb, HCI_ACL_HDR_SIZE);
3216 skb_reset_transport_header(skb);
3217 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3218 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3219 hdr->dlen = cpu_to_le16(len);
3222 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3223 struct sk_buff *skb, __u16 flags)
3225 struct hci_conn *conn = chan->conn;
3226 struct hci_dev *hdev = conn->hdev;
3227 struct sk_buff *list;
3229 skb->len = skb_headlen(skb);
3230 skb->data_len = 0;
3232 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3234 hci_add_acl_hdr(skb, conn->handle, flags);
3236 list = skb_shinfo(skb)->frag_list;
3237 if (!list) {
3238 /* Non fragmented */
3239 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3241 skb_queue_tail(queue, skb);
3242 } else {
3243 /* Fragmented */
3244 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3246 skb_shinfo(skb)->frag_list = NULL;
3248 /* Queue all fragments atomically. We need to use spin_lock_bh
3249 * here because of 6LoWPAN links, as there this function is
3250 * called from softirq and using normal spin lock could cause
3251 * deadlocks.
3253 spin_lock_bh(&queue->lock);
3255 __skb_queue_tail(queue, skb);
3257 flags &= ~ACL_START;
3258 flags |= ACL_CONT;
3259 do {
3260 skb = list; list = list->next;
3262 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3263 hci_add_acl_hdr(skb, conn->handle, flags);
3265 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3267 __skb_queue_tail(queue, skb);
3268 } while (list);
3270 spin_unlock_bh(&queue->lock);
3274 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3276 struct hci_dev *hdev = chan->conn->hdev;
3278 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3280 hci_queue_acl(chan, &chan->data_q, skb, flags);
3282 queue_work(hdev->workqueue, &hdev->tx_work);
3285 /* Send SCO data */
3286 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3288 struct hci_dev *hdev = conn->hdev;
3289 struct hci_sco_hdr hdr;
3291 BT_DBG("%s len %d", hdev->name, skb->len);
3293 hdr.handle = cpu_to_le16(conn->handle);
3294 hdr.dlen = skb->len;
3296 skb_push(skb, HCI_SCO_HDR_SIZE);
3297 skb_reset_transport_header(skb);
3298 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3300 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3302 skb_queue_tail(&conn->data_q, skb);
3303 queue_work(hdev->workqueue, &hdev->tx_work);
3306 /* Send ISO data */
3307 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3309 struct hci_iso_hdr *hdr;
3310 int len = skb->len;
3312 skb_push(skb, HCI_ISO_HDR_SIZE);
3313 skb_reset_transport_header(skb);
3314 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3315 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3316 hdr->dlen = cpu_to_le16(len);
3319 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3320 struct sk_buff *skb)
3322 struct hci_dev *hdev = conn->hdev;
3323 struct sk_buff *list;
3324 __u16 flags;
3326 skb->len = skb_headlen(skb);
3327 skb->data_len = 0;
3329 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3331 list = skb_shinfo(skb)->frag_list;
3333 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3334 hci_add_iso_hdr(skb, conn->handle, flags);
3336 if (!list) {
3337 /* Non fragmented */
3338 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3340 skb_queue_tail(queue, skb);
3341 } else {
3342 /* Fragmented */
3343 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3345 skb_shinfo(skb)->frag_list = NULL;
3347 __skb_queue_tail(queue, skb);
3349 do {
3350 skb = list; list = list->next;
3352 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3353 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3354 0x00);
3355 hci_add_iso_hdr(skb, conn->handle, flags);
3357 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3359 __skb_queue_tail(queue, skb);
3360 } while (list);
3364 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3366 struct hci_dev *hdev = conn->hdev;
3368 BT_DBG("%s len %d", hdev->name, skb->len);
3370 hci_queue_iso(conn, &conn->data_q, skb);
3372 queue_work(hdev->workqueue, &hdev->tx_work);
3375 /* ---- HCI TX task (outgoing data) ---- */
3377 /* HCI Connection scheduler */
3378 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3380 struct hci_dev *hdev;
3381 int cnt, q;
3383 if (!conn) {
3384 *quote = 0;
3385 return;
3388 hdev = conn->hdev;
3390 switch (conn->type) {
3391 case ACL_LINK:
3392 cnt = hdev->acl_cnt;
3393 break;
3394 case SCO_LINK:
3395 case ESCO_LINK:
3396 cnt = hdev->sco_cnt;
3397 break;
3398 case LE_LINK:
3399 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3400 break;
3401 case ISO_LINK:
3402 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3403 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3404 break;
3405 default:
3406 cnt = 0;
3407 bt_dev_err(hdev, "unknown link type %d", conn->type);
3410 q = cnt / num;
3411 *quote = q ? q : 1;
3414 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3415 int *quote)
3417 struct hci_conn_hash *h = &hdev->conn_hash;
3418 struct hci_conn *conn = NULL, *c;
3419 unsigned int num = 0, min = ~0;
3421 /* We don't have to lock device here. Connections are always
3422 * added and removed with TX task disabled. */
3424 rcu_read_lock();
3426 list_for_each_entry_rcu(c, &h->list, list) {
3427 if (c->type != type || skb_queue_empty(&c->data_q))
3428 continue;
3430 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3431 continue;
3433 num++;
3435 if (c->sent < min) {
3436 min = c->sent;
3437 conn = c;
3440 if (hci_conn_num(hdev, type) == num)
3441 break;
3444 rcu_read_unlock();
3446 hci_quote_sent(conn, num, quote);
3448 BT_DBG("conn %p quote %d", conn, *quote);
3449 return conn;
3452 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3454 struct hci_conn_hash *h = &hdev->conn_hash;
3455 struct hci_conn *c;
3457 bt_dev_err(hdev, "link tx timeout");
3459 rcu_read_lock();
3461 /* Kill stalled connections */
3462 list_for_each_entry_rcu(c, &h->list, list) {
3463 if (c->type == type && c->sent) {
3464 bt_dev_err(hdev, "killing stalled connection %pMR",
3465 &c->dst);
3466 /* hci_disconnect might sleep, so, we have to release
3467 * the RCU read lock before calling it.
3469 rcu_read_unlock();
3470 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3471 rcu_read_lock();
3475 rcu_read_unlock();
3478 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3479 int *quote)
3481 struct hci_conn_hash *h = &hdev->conn_hash;
3482 struct hci_chan *chan = NULL;
3483 unsigned int num = 0, min = ~0, cur_prio = 0;
3484 struct hci_conn *conn;
3485 int conn_num = 0;
3487 BT_DBG("%s", hdev->name);
3489 rcu_read_lock();
3491 list_for_each_entry_rcu(conn, &h->list, list) {
3492 struct hci_chan *tmp;
3494 if (conn->type != type)
3495 continue;
3497 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3498 continue;
3500 conn_num++;
3502 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3503 struct sk_buff *skb;
3505 if (skb_queue_empty(&tmp->data_q))
3506 continue;
3508 skb = skb_peek(&tmp->data_q);
3509 if (skb->priority < cur_prio)
3510 continue;
3512 if (skb->priority > cur_prio) {
3513 num = 0;
3514 min = ~0;
3515 cur_prio = skb->priority;
3518 num++;
3520 if (conn->sent < min) {
3521 min = conn->sent;
3522 chan = tmp;
3526 if (hci_conn_num(hdev, type) == conn_num)
3527 break;
3530 rcu_read_unlock();
3532 if (!chan)
3533 return NULL;
3535 hci_quote_sent(chan->conn, num, quote);
3537 BT_DBG("chan %p quote %d", chan, *quote);
3538 return chan;
3541 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3543 struct hci_conn_hash *h = &hdev->conn_hash;
3544 struct hci_conn *conn;
3545 int num = 0;
3547 BT_DBG("%s", hdev->name);
3549 rcu_read_lock();
3551 list_for_each_entry_rcu(conn, &h->list, list) {
3552 struct hci_chan *chan;
3554 if (conn->type != type)
3555 continue;
3557 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3558 continue;
3560 num++;
3562 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3563 struct sk_buff *skb;
3565 if (chan->sent) {
3566 chan->sent = 0;
3567 continue;
3570 if (skb_queue_empty(&chan->data_q))
3571 continue;
3573 skb = skb_peek(&chan->data_q);
3574 if (skb->priority >= HCI_PRIO_MAX - 1)
3575 continue;
3577 skb->priority = HCI_PRIO_MAX - 1;
3579 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3580 skb->priority);
3583 if (hci_conn_num(hdev, type) == num)
3584 break;
3587 rcu_read_unlock();
3591 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3593 unsigned long last_tx;
3595 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3596 return;
3598 switch (type) {
3599 case LE_LINK:
3600 last_tx = hdev->le_last_tx;
3601 break;
3602 default:
3603 last_tx = hdev->acl_last_tx;
3604 break;
3607 /* tx timeout must be longer than maximum link supervision timeout
3608 * (40.9 seconds)
3610 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3611 hci_link_tx_to(hdev, type);
3614 /* Schedule SCO */
3615 static void hci_sched_sco(struct hci_dev *hdev)
3617 struct hci_conn *conn;
3618 struct sk_buff *skb;
3619 int quote;
3621 BT_DBG("%s", hdev->name);
3623 if (!hci_conn_num(hdev, SCO_LINK))
3624 return;
3626 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3627 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3628 BT_DBG("skb %p len %d", skb, skb->len);
3629 hci_send_frame(hdev, skb);
3631 conn->sent++;
3632 if (conn->sent == ~0)
3633 conn->sent = 0;
3638 static void hci_sched_esco(struct hci_dev *hdev)
3640 struct hci_conn *conn;
3641 struct sk_buff *skb;
3642 int quote;
3644 BT_DBG("%s", hdev->name);
3646 if (!hci_conn_num(hdev, ESCO_LINK))
3647 return;
3649 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3650 &quote))) {
3651 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3652 BT_DBG("skb %p len %d", skb, skb->len);
3653 hci_send_frame(hdev, skb);
3655 conn->sent++;
3656 if (conn->sent == ~0)
3657 conn->sent = 0;
3662 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3664 unsigned int cnt = hdev->acl_cnt;
3665 struct hci_chan *chan;
3666 struct sk_buff *skb;
3667 int quote;
3669 __check_timeout(hdev, cnt, ACL_LINK);
3671 while (hdev->acl_cnt &&
3672 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3673 u32 priority = (skb_peek(&chan->data_q))->priority;
3674 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3675 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3676 skb->len, skb->priority);
3678 /* Stop if priority has changed */
3679 if (skb->priority < priority)
3680 break;
3682 skb = skb_dequeue(&chan->data_q);
3684 hci_conn_enter_active_mode(chan->conn,
3685 bt_cb(skb)->force_active);
3687 hci_send_frame(hdev, skb);
3688 hdev->acl_last_tx = jiffies;
3690 hdev->acl_cnt--;
3691 chan->sent++;
3692 chan->conn->sent++;
3694 /* Send pending SCO packets right away */
3695 hci_sched_sco(hdev);
3696 hci_sched_esco(hdev);
3700 if (cnt != hdev->acl_cnt)
3701 hci_prio_recalculate(hdev, ACL_LINK);
3704 static void hci_sched_acl(struct hci_dev *hdev)
3706 BT_DBG("%s", hdev->name);
3708 /* No ACL link over BR/EDR controller */
3709 if (!hci_conn_num(hdev, ACL_LINK))
3710 return;
3712 hci_sched_acl_pkt(hdev);
3715 static void hci_sched_le(struct hci_dev *hdev)
3717 struct hci_chan *chan;
3718 struct sk_buff *skb;
3719 int quote, cnt, tmp;
3721 BT_DBG("%s", hdev->name);
3723 if (!hci_conn_num(hdev, LE_LINK))
3724 return;
3726 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3728 __check_timeout(hdev, cnt, LE_LINK);
3730 tmp = cnt;
3731 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3732 u32 priority = (skb_peek(&chan->data_q))->priority;
3733 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3734 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3735 skb->len, skb->priority);
3737 /* Stop if priority has changed */
3738 if (skb->priority < priority)
3739 break;
3741 skb = skb_dequeue(&chan->data_q);
3743 hci_send_frame(hdev, skb);
3744 hdev->le_last_tx = jiffies;
3746 cnt--;
3747 chan->sent++;
3748 chan->conn->sent++;
3750 /* Send pending SCO packets right away */
3751 hci_sched_sco(hdev);
3752 hci_sched_esco(hdev);
3756 if (hdev->le_pkts)
3757 hdev->le_cnt = cnt;
3758 else
3759 hdev->acl_cnt = cnt;
3761 if (cnt != tmp)
3762 hci_prio_recalculate(hdev, LE_LINK);
3765 /* Schedule CIS */
3766 static void hci_sched_iso(struct hci_dev *hdev)
3768 struct hci_conn *conn;
3769 struct sk_buff *skb;
3770 int quote, *cnt;
3772 BT_DBG("%s", hdev->name);
3774 if (!hci_conn_num(hdev, ISO_LINK))
3775 return;
3777 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3778 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3779 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3780 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3781 BT_DBG("skb %p len %d", skb, skb->len);
3782 hci_send_frame(hdev, skb);
3784 conn->sent++;
3785 if (conn->sent == ~0)
3786 conn->sent = 0;
3787 (*cnt)--;
3792 static void hci_tx_work(struct work_struct *work)
3794 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3795 struct sk_buff *skb;
3797 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3798 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3800 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3801 /* Schedule queues and send stuff to HCI driver */
3802 hci_sched_sco(hdev);
3803 hci_sched_esco(hdev);
3804 hci_sched_iso(hdev);
3805 hci_sched_acl(hdev);
3806 hci_sched_le(hdev);
3809 /* Send next queued raw (unknown type) packet */
3810 while ((skb = skb_dequeue(&hdev->raw_q)))
3811 hci_send_frame(hdev, skb);
3814 /* ----- HCI RX task (incoming data processing) ----- */
3816 /* ACL data packet */
3817 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3819 struct hci_acl_hdr *hdr = (void *) skb->data;
3820 struct hci_conn *conn;
3821 __u16 handle, flags;
3823 skb_pull(skb, HCI_ACL_HDR_SIZE);
3825 handle = __le16_to_cpu(hdr->handle);
3826 flags = hci_flags(handle);
3827 handle = hci_handle(handle);
3829 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3830 handle, flags);
3832 hdev->stat.acl_rx++;
3834 hci_dev_lock(hdev);
3835 conn = hci_conn_hash_lookup_handle(hdev, handle);
3836 hci_dev_unlock(hdev);
3838 if (conn) {
3839 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3841 /* Send to upper protocol */
3842 l2cap_recv_acldata(conn, skb, flags);
3843 return;
3844 } else {
3845 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3846 handle);
3849 kfree_skb(skb);
3852 /* SCO data packet */
3853 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3855 struct hci_sco_hdr *hdr = (void *) skb->data;
3856 struct hci_conn *conn;
3857 __u16 handle, flags;
3859 skb_pull(skb, HCI_SCO_HDR_SIZE);
3861 handle = __le16_to_cpu(hdr->handle);
3862 flags = hci_flags(handle);
3863 handle = hci_handle(handle);
3865 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3866 handle, flags);
3868 hdev->stat.sco_rx++;
3870 hci_dev_lock(hdev);
3871 conn = hci_conn_hash_lookup_handle(hdev, handle);
3872 hci_dev_unlock(hdev);
3874 if (conn) {
3875 /* Send to upper protocol */
3876 hci_skb_pkt_status(skb) = flags & 0x03;
3877 sco_recv_scodata(conn, skb);
3878 return;
3879 } else {
3880 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3881 handle);
3884 kfree_skb(skb);
3887 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3889 struct hci_iso_hdr *hdr;
3890 struct hci_conn *conn;
3891 __u16 handle, flags;
3893 hdr = skb_pull_data(skb, sizeof(*hdr));
3894 if (!hdr) {
3895 bt_dev_err(hdev, "ISO packet too small");
3896 goto drop;
3899 handle = __le16_to_cpu(hdr->handle);
3900 flags = hci_flags(handle);
3901 handle = hci_handle(handle);
3903 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3904 handle, flags);
3906 hci_dev_lock(hdev);
3907 conn = hci_conn_hash_lookup_handle(hdev, handle);
3908 hci_dev_unlock(hdev);
3910 if (!conn) {
3911 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3912 handle);
3913 goto drop;
3916 /* Send to upper protocol */
3917 iso_recv(conn, skb, flags);
3918 return;
3920 drop:
3921 kfree_skb(skb);
3924 static bool hci_req_is_complete(struct hci_dev *hdev)
3926 struct sk_buff *skb;
3928 skb = skb_peek(&hdev->cmd_q);
3929 if (!skb)
3930 return true;
3932 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3935 static void hci_resend_last(struct hci_dev *hdev)
3937 struct hci_command_hdr *sent;
3938 struct sk_buff *skb;
3939 u16 opcode;
3941 if (!hdev->sent_cmd)
3942 return;
3944 sent = (void *) hdev->sent_cmd->data;
3945 opcode = __le16_to_cpu(sent->opcode);
3946 if (opcode == HCI_OP_RESET)
3947 return;
3949 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3950 if (!skb)
3951 return;
3953 skb_queue_head(&hdev->cmd_q, skb);
3954 queue_work(hdev->workqueue, &hdev->cmd_work);
3957 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3958 hci_req_complete_t *req_complete,
3959 hci_req_complete_skb_t *req_complete_skb)
3961 struct sk_buff *skb;
3962 unsigned long flags;
3964 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3966 /* If the completed command doesn't match the last one that was
3967 * sent we need to do special handling of it.
3969 if (!hci_sent_cmd_data(hdev, opcode)) {
3970 /* Some CSR based controllers generate a spontaneous
3971 * reset complete event during init and any pending
3972 * command will never be completed. In such a case we
3973 * need to resend whatever was the last sent
3974 * command.
3976 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3977 hci_resend_last(hdev);
3979 return;
3982 /* If we reach this point this event matches the last command sent */
3983 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3985 /* If the command succeeded and there's still more commands in
3986 * this request the request is not yet complete.
3988 if (!status && !hci_req_is_complete(hdev))
3989 return;
3991 skb = hdev->req_skb;
3993 /* If this was the last command in a request the complete
3994 * callback would be found in hdev->req_skb instead of the
3995 * command queue (hdev->cmd_q).
3997 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3998 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3999 return;
4002 if (skb && bt_cb(skb)->hci.req_complete) {
4003 *req_complete = bt_cb(skb)->hci.req_complete;
4004 return;
4007 /* Remove all pending commands belonging to this request */
4008 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4009 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4010 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4011 __skb_queue_head(&hdev->cmd_q, skb);
4012 break;
4015 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4016 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4017 else
4018 *req_complete = bt_cb(skb)->hci.req_complete;
4019 dev_kfree_skb_irq(skb);
4021 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4024 static void hci_rx_work(struct work_struct *work)
4026 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4027 struct sk_buff *skb;
4029 BT_DBG("%s", hdev->name);
4031 /* The kcov_remote functions used for collecting packet parsing
4032 * coverage information from this background thread and associate
4033 * the coverage with the syscall's thread which originally injected
4034 * the packet. This helps fuzzing the kernel.
4036 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4037 kcov_remote_start_common(skb_get_kcov_handle(skb));
4039 /* Send copy to monitor */
4040 hci_send_to_monitor(hdev, skb);
4042 if (atomic_read(&hdev->promisc)) {
4043 /* Send copy to the sockets */
4044 hci_send_to_sock(hdev, skb);
4047 /* If the device has been opened in HCI_USER_CHANNEL,
4048 * the userspace has exclusive access to device.
4049 * When device is HCI_INIT, we still need to process
4050 * the data packets to the driver in order
4051 * to complete its setup().
4053 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4054 !test_bit(HCI_INIT, &hdev->flags)) {
4055 kfree_skb(skb);
4056 continue;
4059 if (test_bit(HCI_INIT, &hdev->flags)) {
4060 /* Don't process data packets in this states. */
4061 switch (hci_skb_pkt_type(skb)) {
4062 case HCI_ACLDATA_PKT:
4063 case HCI_SCODATA_PKT:
4064 case HCI_ISODATA_PKT:
4065 kfree_skb(skb);
4066 continue;
4070 /* Process frame */
4071 switch (hci_skb_pkt_type(skb)) {
4072 case HCI_EVENT_PKT:
4073 BT_DBG("%s Event packet", hdev->name);
4074 hci_event_packet(hdev, skb);
4075 break;
4077 case HCI_ACLDATA_PKT:
4078 BT_DBG("%s ACL data packet", hdev->name);
4079 hci_acldata_packet(hdev, skb);
4080 break;
4082 case HCI_SCODATA_PKT:
4083 BT_DBG("%s SCO data packet", hdev->name);
4084 hci_scodata_packet(hdev, skb);
4085 break;
4087 case HCI_ISODATA_PKT:
4088 BT_DBG("%s ISO data packet", hdev->name);
4089 hci_isodata_packet(hdev, skb);
4090 break;
4092 default:
4093 kfree_skb(skb);
4094 break;
4099 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4101 int err;
4103 bt_dev_dbg(hdev, "skb %p", skb);
4105 kfree_skb(hdev->sent_cmd);
4107 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4108 if (!hdev->sent_cmd) {
4109 skb_queue_head(&hdev->cmd_q, skb);
4110 queue_work(hdev->workqueue, &hdev->cmd_work);
4111 return;
4114 err = hci_send_frame(hdev, skb);
4115 if (err < 0) {
4116 hci_cmd_sync_cancel_sync(hdev, -err);
4117 return;
4120 if (hci_req_status_pend(hdev) &&
4121 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4122 kfree_skb(hdev->req_skb);
4123 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4126 atomic_dec(&hdev->cmd_cnt);
4129 static void hci_cmd_work(struct work_struct *work)
4131 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4132 struct sk_buff *skb;
4134 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4135 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4137 /* Send queued commands */
4138 if (atomic_read(&hdev->cmd_cnt)) {
4139 skb = skb_dequeue(&hdev->cmd_q);
4140 if (!skb)
4141 return;
4143 hci_send_cmd_sync(hdev, skb);
4145 rcu_read_lock();
4146 if (test_bit(HCI_RESET, &hdev->flags) ||
4147 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4148 cancel_delayed_work(&hdev->cmd_timer);
4149 else
4150 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4151 HCI_CMD_TIMEOUT);
4152 rcu_read_unlock();