Bluetooth: Fix quirks that are valid during setup driver callback
[linux-2.6/btrfs-unstable.git] / net / bluetooth / hci_core.c
blobb29f3938e6b38954412b56c64913934434a3a569
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "smp.h"
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ---- HCI notifications ---- */
59 static void hci_notify(struct hci_dev *hdev, int event)
61 hci_sock_dev_event(hdev, event);
64 /* ---- HCI debugfs entries ---- */
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
99 return -EALREADY;
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
116 if (err < 0)
117 return err;
119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
121 return count;
124 static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
131 static int features_show(struct seq_file *f, void *ptr)
133 struct hci_dev *hdev = f->private;
134 u8 p;
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
152 hci_dev_unlock(hdev);
154 return 0;
157 static int features_open(struct inode *inode, struct file *file)
159 return single_open(file, features_show, inode->i_private);
162 static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
169 static int blacklist_show(struct seq_file *f, void *p)
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177 hci_dev_unlock(hdev);
179 return 0;
182 static int blacklist_open(struct inode *inode, struct file *file)
184 return single_open(file, blacklist_show, inode->i_private);
187 static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
194 static int uuids_show(struct seq_file *f, void *p)
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
201 u8 i, val[16];
203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
210 seq_printf(f, "%pUb\n", val);
212 hci_dev_unlock(hdev);
214 return 0;
217 static int uuids_open(struct inode *inode, struct file *file)
219 return single_open(file, uuids_show, inode->i_private);
222 static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
229 static int inquiry_cache_show(struct seq_file *f, void *p)
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
235 hci_dev_lock(hdev);
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
248 hci_dev_unlock(hdev);
250 return 0;
253 static int inquiry_cache_open(struct inode *inode, struct file *file)
255 return single_open(file, inquiry_cache_show, inode->i_private);
258 static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
265 static int link_keys_show(struct seq_file *f, void *ptr)
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
276 hci_dev_unlock(hdev);
278 return 0;
281 static int link_keys_open(struct inode *inode, struct file *file)
283 return single_open(file, link_keys_show, inode->i_private);
286 static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
293 static int dev_class_show(struct seq_file *f, void *ptr)
295 struct hci_dev *hdev = f->private;
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
302 return 0;
305 static int dev_class_open(struct inode *inode, struct file *file)
307 return single_open(file, dev_class_show, inode->i_private);
310 static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
317 static int voice_setting_get(void *data, u64 *val)
319 struct hci_dev *hdev = data;
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
325 return 0;
328 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
331 static int auto_accept_delay_set(void *data, u64 val)
333 struct hci_dev *hdev = data;
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
339 return 0;
342 static int auto_accept_delay_get(void *data, u64 *val)
344 struct hci_dev *hdev = data;
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
350 return 0;
353 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
356 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
368 static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
388 return -EALREADY;
390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
392 return count;
395 static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
402 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
414 static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
420 static int idle_timeout_set(void *data, u64 val)
422 struct hci_dev *hdev = data;
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
427 hci_dev_lock(hdev);
428 hdev->idle_timeout = val;
429 hci_dev_unlock(hdev);
431 return 0;
434 static int idle_timeout_get(void *data, u64 *val)
436 struct hci_dev *hdev = data;
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
442 return 0;
445 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
448 static int rpa_timeout_set(void *data, u64 val)
450 struct hci_dev *hdev = data;
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
462 return 0;
465 static int rpa_timeout_get(void *data, u64 *val)
467 struct hci_dev *hdev = data;
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
473 return 0;
476 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
479 static int sniff_min_interval_set(void *data, u64 val)
481 struct hci_dev *hdev = data;
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
486 hci_dev_lock(hdev);
487 hdev->sniff_min_interval = val;
488 hci_dev_unlock(hdev);
490 return 0;
493 static int sniff_min_interval_get(void *data, u64 *val)
495 struct hci_dev *hdev = data;
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
501 return 0;
504 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
507 static int sniff_max_interval_set(void *data, u64 val)
509 struct hci_dev *hdev = data;
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
514 hci_dev_lock(hdev);
515 hdev->sniff_max_interval = val;
516 hci_dev_unlock(hdev);
518 return 0;
521 static int sniff_max_interval_get(void *data, u64 *val)
523 struct hci_dev *hdev = data;
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
529 return 0;
532 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
535 static int conn_info_min_age_set(void *data, u64 val)
537 struct hci_dev *hdev = data;
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
546 return 0;
549 static int conn_info_min_age_get(void *data, u64 *val)
551 struct hci_dev *hdev = data;
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
557 return 0;
560 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
563 static int conn_info_max_age_set(void *data, u64 val)
565 struct hci_dev *hdev = data;
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
574 return 0;
577 static int conn_info_max_age_get(void *data, u64 *val)
579 struct hci_dev *hdev = data;
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
585 return 0;
588 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
591 static int identity_show(struct seq_file *f, void *p)
593 struct hci_dev *hdev = f->private;
594 bdaddr_t addr;
595 u8 addr_type;
597 hci_dev_lock(hdev);
599 hci_copy_identity_address(hdev, &addr, &addr_type);
601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
602 16, hdev->irk, &hdev->rpa);
604 hci_dev_unlock(hdev);
606 return 0;
609 static int identity_open(struct inode *inode, struct file *file)
611 return single_open(file, identity_show, inode->i_private);
614 static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
621 static int random_address_show(struct seq_file *f, void *p)
623 struct hci_dev *hdev = f->private;
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
629 return 0;
632 static int random_address_open(struct inode *inode, struct file *file)
634 return single_open(file, random_address_show, inode->i_private);
637 static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
644 static int static_address_show(struct seq_file *f, void *p)
646 struct hci_dev *hdev = f->private;
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
652 return 0;
655 static int static_address_open(struct inode *inode, struct file *file)
657 return single_open(file, static_address_show, inode->i_private);
660 static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
667 static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
680 static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
697 return -EINVAL;
699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
700 return -EALREADY;
702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
704 return count;
707 static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
714 static int white_list_show(struct seq_file *f, void *ptr)
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
724 return 0;
727 static int white_list_open(struct inode *inode, struct file *file)
729 return single_open(file, white_list_show, inode->i_private);
732 static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
739 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
751 hci_dev_unlock(hdev);
753 return 0;
756 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
762 static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
769 static int long_term_keys_show(struct seq_file *f, void *ptr)
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
774 hci_dev_lock(hdev);
775 list_for_each_safe(p, n, &hdev->long_term_keys) {
776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
780 __le64_to_cpu(ltk->rand), 16, ltk->val);
782 hci_dev_unlock(hdev);
784 return 0;
787 static int long_term_keys_open(struct inode *inode, struct file *file)
789 return single_open(file, long_term_keys_show, inode->i_private);
792 static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
799 static int conn_min_interval_set(void *data, u64 val)
801 struct hci_dev *hdev = data;
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
806 hci_dev_lock(hdev);
807 hdev->le_conn_min_interval = val;
808 hci_dev_unlock(hdev);
810 return 0;
813 static int conn_min_interval_get(void *data, u64 *val)
815 struct hci_dev *hdev = data;
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
821 return 0;
824 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
827 static int conn_max_interval_set(void *data, u64 val)
829 struct hci_dev *hdev = data;
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
834 hci_dev_lock(hdev);
835 hdev->le_conn_max_interval = val;
836 hci_dev_unlock(hdev);
838 return 0;
841 static int conn_max_interval_get(void *data, u64 *val)
843 struct hci_dev *hdev = data;
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
849 return 0;
852 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
855 static int conn_latency_set(void *data, u64 val)
857 struct hci_dev *hdev = data;
859 if (val > 0x01f3)
860 return -EINVAL;
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
866 return 0;
869 static int conn_latency_get(void *data, u64 *val)
871 struct hci_dev *hdev = data;
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
877 return 0;
880 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
883 static int supervision_timeout_set(void *data, u64 val)
885 struct hci_dev *hdev = data;
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
894 return 0;
897 static int supervision_timeout_get(void *data, u64 *val)
899 struct hci_dev *hdev = data;
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
905 return 0;
908 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
911 static int adv_channel_map_set(void *data, u64 val)
913 struct hci_dev *hdev = data;
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
922 return 0;
925 static int adv_channel_map_get(void *data, u64 *val)
927 struct hci_dev *hdev = data;
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
933 return 0;
936 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
939 static int device_list_show(struct seq_file *f, void *ptr)
941 struct hci_dev *hdev = f->private;
942 struct hci_conn_params *p;
944 hci_dev_lock(hdev);
945 list_for_each_entry(p, &hdev->le_conn_params, list) {
946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
947 p->auto_connect);
949 hci_dev_unlock(hdev);
951 return 0;
954 static int device_list_open(struct inode *inode, struct file *file)
956 return single_open(file, device_list_show, inode->i_private);
959 static const struct file_operations device_list_fops = {
960 .open = device_list_open,
961 .read = seq_read,
962 .llseek = seq_lseek,
963 .release = single_release,
966 /* ---- HCI requests ---- */
968 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
979 static void hci_req_cancel(struct hci_dev *hdev, int err)
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
990 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
997 hci_dev_lock(hdev);
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1002 hci_dev_unlock(hdev);
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1040 failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1045 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1046 const void *param, u8 event, u32 timeout)
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1052 BT_DBG("%s", hdev->name);
1054 hci_req_init(&req, hdev);
1056 hci_req_add_ev(&req, opcode, plen, param, event);
1058 hdev->req_status = HCI_REQ_PEND;
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1067 schedule_timeout(timeout);
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1088 hdev->req_status = hdev->req_result = 0;
1090 BT_DBG("%s end: err %d", hdev->name, err);
1092 if (err < 0)
1093 return ERR_PTR(err);
1095 return hci_get_cmd_complete(hdev, opcode, event);
1097 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1099 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1100 const void *param, u32 timeout)
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1104 EXPORT_SYMBOL(__hci_cmd_sync);
1106 /* Execute request and wait for completion. */
1107 static int __hci_req_sync(struct hci_dev *hdev,
1108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
1110 unsigned long opt, __u32 timeout)
1112 struct hci_request req;
1113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1116 BT_DBG("%s start", hdev->name);
1118 hci_req_init(&req, hdev);
1120 hdev->req_status = HCI_REQ_PEND;
1122 func(&req, opt);
1124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
1126 hdev->req_status = 0;
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
1133 if (err == -ENODATA)
1134 return 0;
1136 return err;
1139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1142 schedule_timeout(timeout);
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1146 if (signal_pending(current))
1147 return -EINTR;
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
1151 err = -bt_to_errno(hdev->req_result);
1152 break;
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
1163 hdev->req_status = hdev->req_result = 0;
1165 BT_DBG("%s end: err %d", hdev->name, err);
1167 return err;
1170 static int hci_req_sync(struct hci_dev *hdev,
1171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
1173 unsigned long opt, __u32 timeout)
1175 int ret;
1177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1180 /* Serialize all requests */
1181 hci_req_lock(hdev);
1182 ret = __hci_req_sync(hdev, req, opt, timeout);
1183 hci_req_unlock(hdev);
1185 return ret;
1188 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1190 BT_DBG("%s %ld", req->hdev->name, opt);
1192 /* Reset device */
1193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1197 static void bredr_init(struct hci_request *req)
1199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1201 /* Read Local Supported Features */
1202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1204 /* Read Local Version */
1205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1207 /* Read BD Address */
1208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1211 static void amp_init(struct hci_request *req)
1213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1215 /* Read Local Version */
1216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1224 /* Read Local AMP Info */
1225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1227 /* Read Data Blk size */
1228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1237 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1239 struct hci_dev *hdev = req->hdev;
1241 BT_DBG("%s %ld", hdev->name, opt);
1243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1245 hci_reset_req(req, 0);
1247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
1249 bredr_init(req);
1250 break;
1252 case HCI_AMP:
1253 amp_init(req);
1254 break;
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1262 static void bredr_setup(struct hci_request *req)
1264 struct hci_dev *hdev = req->hdev;
1266 __le16 param;
1267 __u8 flt_type;
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1272 /* Read Class of Device */
1273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1275 /* Read Local Name */
1276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1278 /* Read Voice Setting */
1279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
1289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1291 /* Connection accept timeout ~20 secs */
1292 param = cpu_to_le16(0x7d00);
1293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1304 static void le_setup(struct hci_request *req)
1306 struct hci_dev *hdev = req->hdev;
1308 /* Read LE Buffer Size */
1309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1311 /* Read LE Local Supported Features */
1312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1317 /* Read LE Advertising Channel TX Power */
1318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1320 /* Read LE White List Size */
1321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1331 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1356 return 0x00;
1359 static void hci_setup_inquiry_mode(struct hci_request *req)
1361 u8 mode;
1363 mode = hci_get_inquiry_mode(req->hdev);
1365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1368 static void hci_setup_event_mask(struct hci_request *req)
1370 struct hci_dev *hdev = req->hdev;
1372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
1390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1442 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1444 struct hci_dev *hdev = req->hdev;
1446 if (lmp_bredr_capable(hdev))
1447 bredr_setup(req);
1448 else
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1451 if (lmp_le_capable(hdev))
1452 le_setup(req);
1454 hci_setup_event_mask(req);
1456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1462 if (lmp_ssp_capable(hdev)) {
1463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1469 hdev->max_page = 0x01;
1471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472 u8 mode = 0x01;
1473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
1475 } else {
1476 struct hci_cp_write_eir cp;
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1485 if (lmp_inq_rssi_capable(hdev))
1486 hci_setup_inquiry_mode(req);
1488 if (lmp_inq_tx_pwr_capable(hdev))
1489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1494 cp.page = 0x01;
1495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496 sizeof(cp), &cp);
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500 u8 enable = 1;
1501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502 &enable);
1506 static void hci_setup_link_policy(struct hci_request *req)
1508 struct hci_dev *hdev = req->hdev;
1509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1521 cp.policy = cpu_to_le16(link_policy);
1522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1525 static void hci_set_le_support(struct hci_request *req)
1527 struct hci_dev *hdev = req->hdev;
1528 struct hci_cp_write_le_host_supported cp;
1530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1532 return;
1534 memset(&cp, 0, sizeof(cp));
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537 cp.le = 0x01;
1538 cp.simul = lmp_le_br_capable(hdev);
1541 if (cp.le != lmp_host_le_capable(hdev))
1542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543 &cp);
1546 static void hci_set_event_mask_page_2(struct hci_request *req)
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1554 if (lmp_csb_master_capable(hdev)) {
1555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1564 if (lmp_csb_slave_capable(hdev)) {
1565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1573 events[2] |= 0x80;
1575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1578 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1580 struct hci_dev *hdev = req->hdev;
1581 u8 p;
1583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
1596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1598 struct hci_cp_delete_stored_link_key cp;
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603 sizeof(cp), &cp);
1606 if (hdev->commands[5] & 0x10)
1607 hci_setup_link_policy(req);
1609 if (lmp_le_capable(hdev)) {
1610 u8 events[8];
1612 memset(events, 0, sizeof(events));
1613 events[0] = 0x1f;
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1620 * Parameter Request
1623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624 events);
1626 hci_set_le_support(req);
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1633 cp.page = p;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635 sizeof(cp), &cp);
1639 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1641 struct hci_dev *hdev = req->hdev;
1643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1647 /* Check for Synchronization Train support */
1648 if (lmp_sync_train_capable(hdev))
1649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1651 /* Enable Secure Connections if supported and configured */
1652 if ((lmp_sc_capable(hdev) ||
1653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655 u8 support = 0x01;
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1661 static int __hci_init(struct hci_dev *hdev)
1663 int err;
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666 if (err < 0)
1667 return err;
1669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674 &dut_mode_fops);
1677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1679 * first stage init.
1681 if (hdev->dev_type != HCI_BREDR)
1682 return 0;
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685 if (err < 0)
1686 return err;
1688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689 if (err < 0)
1690 return err;
1692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693 if (err < 0)
1694 return err;
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700 return 0;
1702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703 &features_fops);
1704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709 &blacklist_fops);
1710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
1720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
1722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
1724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
1728 if (lmp_ssp_capable(hdev)) {
1729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
1731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
1733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
1737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1746 if (lmp_le_capable(hdev)) {
1747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
1751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
1753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
1767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768 &white_list_fops);
1769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
1772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
1774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
1778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
1780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
1782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
1784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785 &device_list_fops);
1786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787 hdev->debugfs,
1788 &hdev->discov_interleaved_timeout);
1791 return 0;
1794 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1796 __u8 scan = opt;
1798 BT_DBG("%s %x", req->hdev->name, scan);
1800 /* Inquiry and Page scans */
1801 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1804 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1806 __u8 auth = opt;
1808 BT_DBG("%s %x", req->hdev->name, auth);
1810 /* Authentication */
1811 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1814 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1816 __u8 encrypt = opt;
1818 BT_DBG("%s %x", req->hdev->name, encrypt);
1820 /* Encryption */
1821 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1824 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1826 __le16 policy = cpu_to_le16(opt);
1828 BT_DBG("%s %x", req->hdev->name, policy);
1830 /* Default link policy */
1831 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1834 /* Get HCI device by index.
1835 * Device is held on return. */
1836 struct hci_dev *hci_dev_get(int index)
1838 struct hci_dev *hdev = NULL, *d;
1840 BT_DBG("%d", index);
1842 if (index < 0)
1843 return NULL;
1845 read_lock(&hci_dev_list_lock);
1846 list_for_each_entry(d, &hci_dev_list, list) {
1847 if (d->id == index) {
1848 hdev = hci_dev_hold(d);
1849 break;
1852 read_unlock(&hci_dev_list_lock);
1853 return hdev;
1856 /* ---- Inquiry support ---- */
1858 bool hci_discovery_active(struct hci_dev *hdev)
1860 struct discovery_state *discov = &hdev->discovery;
1862 switch (discov->state) {
1863 case DISCOVERY_FINDING:
1864 case DISCOVERY_RESOLVING:
1865 return true;
1867 default:
1868 return false;
1872 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1874 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1876 if (hdev->discovery.state == state)
1877 return;
1879 switch (state) {
1880 case DISCOVERY_STOPPED:
1881 hci_update_background_scan(hdev);
1883 if (hdev->discovery.state != DISCOVERY_STARTING)
1884 mgmt_discovering(hdev, 0);
1885 break;
1886 case DISCOVERY_STARTING:
1887 break;
1888 case DISCOVERY_FINDING:
1889 mgmt_discovering(hdev, 1);
1890 break;
1891 case DISCOVERY_RESOLVING:
1892 break;
1893 case DISCOVERY_STOPPING:
1894 break;
1897 hdev->discovery.state = state;
1900 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1902 struct discovery_state *cache = &hdev->discovery;
1903 struct inquiry_entry *p, *n;
1905 list_for_each_entry_safe(p, n, &cache->all, all) {
1906 list_del(&p->all);
1907 kfree(p);
1910 INIT_LIST_HEAD(&cache->unknown);
1911 INIT_LIST_HEAD(&cache->resolve);
1914 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1915 bdaddr_t *bdaddr)
1917 struct discovery_state *cache = &hdev->discovery;
1918 struct inquiry_entry *e;
1920 BT_DBG("cache %p, %pMR", cache, bdaddr);
1922 list_for_each_entry(e, &cache->all, all) {
1923 if (!bacmp(&e->data.bdaddr, bdaddr))
1924 return e;
1927 return NULL;
1930 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1931 bdaddr_t *bdaddr)
1933 struct discovery_state *cache = &hdev->discovery;
1934 struct inquiry_entry *e;
1936 BT_DBG("cache %p, %pMR", cache, bdaddr);
1938 list_for_each_entry(e, &cache->unknown, list) {
1939 if (!bacmp(&e->data.bdaddr, bdaddr))
1940 return e;
1943 return NULL;
1946 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1947 bdaddr_t *bdaddr,
1948 int state)
1950 struct discovery_state *cache = &hdev->discovery;
1951 struct inquiry_entry *e;
1953 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1955 list_for_each_entry(e, &cache->resolve, list) {
1956 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1957 return e;
1958 if (!bacmp(&e->data.bdaddr, bdaddr))
1959 return e;
1962 return NULL;
1965 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1966 struct inquiry_entry *ie)
1968 struct discovery_state *cache = &hdev->discovery;
1969 struct list_head *pos = &cache->resolve;
1970 struct inquiry_entry *p;
1972 list_del(&ie->list);
1974 list_for_each_entry(p, &cache->resolve, list) {
1975 if (p->name_state != NAME_PENDING &&
1976 abs(p->data.rssi) >= abs(ie->data.rssi))
1977 break;
1978 pos = &p->list;
1981 list_add(&ie->list, pos);
1984 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1985 bool name_known)
1987 struct discovery_state *cache = &hdev->discovery;
1988 struct inquiry_entry *ie;
1989 u32 flags = 0;
1991 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1993 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1995 if (!data->ssp_mode)
1996 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1998 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1999 if (ie) {
2000 if (!ie->data.ssp_mode)
2001 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2003 if (ie->name_state == NAME_NEEDED &&
2004 data->rssi != ie->data.rssi) {
2005 ie->data.rssi = data->rssi;
2006 hci_inquiry_cache_update_resolve(hdev, ie);
2009 goto update;
2012 /* Entry not in the cache. Add new one. */
2013 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2014 if (!ie) {
2015 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2016 goto done;
2019 list_add(&ie->all, &cache->all);
2021 if (name_known) {
2022 ie->name_state = NAME_KNOWN;
2023 } else {
2024 ie->name_state = NAME_NOT_KNOWN;
2025 list_add(&ie->list, &cache->unknown);
2028 update:
2029 if (name_known && ie->name_state != NAME_KNOWN &&
2030 ie->name_state != NAME_PENDING) {
2031 ie->name_state = NAME_KNOWN;
2032 list_del(&ie->list);
2035 memcpy(&ie->data, data, sizeof(*data));
2036 ie->timestamp = jiffies;
2037 cache->timestamp = jiffies;
2039 if (ie->name_state == NAME_NOT_KNOWN)
2040 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2042 done:
2043 return flags;
2046 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2048 struct discovery_state *cache = &hdev->discovery;
2049 struct inquiry_info *info = (struct inquiry_info *) buf;
2050 struct inquiry_entry *e;
2051 int copied = 0;
2053 list_for_each_entry(e, &cache->all, all) {
2054 struct inquiry_data *data = &e->data;
2056 if (copied >= num)
2057 break;
2059 bacpy(&info->bdaddr, &data->bdaddr);
2060 info->pscan_rep_mode = data->pscan_rep_mode;
2061 info->pscan_period_mode = data->pscan_period_mode;
2062 info->pscan_mode = data->pscan_mode;
2063 memcpy(info->dev_class, data->dev_class, 3);
2064 info->clock_offset = data->clock_offset;
2066 info++;
2067 copied++;
2070 BT_DBG("cache %p, copied %d", cache, copied);
2071 return copied;
2074 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2076 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2077 struct hci_dev *hdev = req->hdev;
2078 struct hci_cp_inquiry cp;
2080 BT_DBG("%s", hdev->name);
2082 if (test_bit(HCI_INQUIRY, &hdev->flags))
2083 return;
2085 /* Start Inquiry */
2086 memcpy(&cp.lap, &ir->lap, 3);
2087 cp.length = ir->length;
2088 cp.num_rsp = ir->num_rsp;
2089 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2092 static int wait_inquiry(void *word)
2094 schedule();
2095 return signal_pending(current);
2098 int hci_inquiry(void __user *arg)
2100 __u8 __user *ptr = arg;
2101 struct hci_inquiry_req ir;
2102 struct hci_dev *hdev;
2103 int err = 0, do_inquiry = 0, max_rsp;
2104 long timeo;
2105 __u8 *buf;
2107 if (copy_from_user(&ir, ptr, sizeof(ir)))
2108 return -EFAULT;
2110 hdev = hci_dev_get(ir.dev_id);
2111 if (!hdev)
2112 return -ENODEV;
2114 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2115 err = -EBUSY;
2116 goto done;
2119 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2120 err = -EOPNOTSUPP;
2121 goto done;
2124 if (hdev->dev_type != HCI_BREDR) {
2125 err = -EOPNOTSUPP;
2126 goto done;
2129 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2130 err = -EOPNOTSUPP;
2131 goto done;
2134 hci_dev_lock(hdev);
2135 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2136 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2137 hci_inquiry_cache_flush(hdev);
2138 do_inquiry = 1;
2140 hci_dev_unlock(hdev);
2142 timeo = ir.length * msecs_to_jiffies(2000);
2144 if (do_inquiry) {
2145 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2146 timeo);
2147 if (err < 0)
2148 goto done;
2150 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2151 * cleared). If it is interrupted by a signal, return -EINTR.
2153 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2154 TASK_INTERRUPTIBLE))
2155 return -EINTR;
2158 /* for unlimited number of responses we will use buffer with
2159 * 255 entries
2161 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2163 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2164 * copy it to the user space.
2166 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2167 if (!buf) {
2168 err = -ENOMEM;
2169 goto done;
2172 hci_dev_lock(hdev);
2173 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2174 hci_dev_unlock(hdev);
2176 BT_DBG("num_rsp %d", ir.num_rsp);
2178 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2179 ptr += sizeof(ir);
2180 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2181 ir.num_rsp))
2182 err = -EFAULT;
2183 } else
2184 err = -EFAULT;
2186 kfree(buf);
2188 done:
2189 hci_dev_put(hdev);
2190 return err;
2193 static int hci_dev_do_open(struct hci_dev *hdev)
2195 int ret = 0;
2197 BT_DBG("%s %p", hdev->name, hdev);
2199 hci_req_lock(hdev);
2201 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2202 ret = -ENODEV;
2203 goto done;
2206 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2207 /* Check for rfkill but allow the HCI setup stage to
2208 * proceed (which in itself doesn't cause any RF activity).
2210 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2211 ret = -ERFKILL;
2212 goto done;
2215 /* Check for valid public address or a configured static
2216 * random adddress, but let the HCI setup proceed to
2217 * be able to determine if there is a public address
2218 * or not.
2220 * In case of user channel usage, it is not important
2221 * if a public address or static random address is
2222 * available.
2224 * This check is only valid for BR/EDR controllers
2225 * since AMP controllers do not have an address.
2227 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2228 hdev->dev_type == HCI_BREDR &&
2229 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2230 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2231 ret = -EADDRNOTAVAIL;
2232 goto done;
2236 if (test_bit(HCI_UP, &hdev->flags)) {
2237 ret = -EALREADY;
2238 goto done;
2241 if (hdev->open(hdev)) {
2242 ret = -EIO;
2243 goto done;
2246 atomic_set(&hdev->cmd_cnt, 1);
2247 set_bit(HCI_INIT, &hdev->flags);
2249 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2250 if (hdev->setup)
2251 ret = hdev->setup(hdev);
2253 /* The transport driver can set these quirks before
2254 * creating the HCI device or in its setup callback.
2256 * In case any of them is set, the controller has to
2257 * start up as unconfigured.
2259 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2260 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2261 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2264 /* If public address change is configured, ensure that the
2265 * address gets programmed. If the driver does not support
2266 * changing the public address, fail the power on procedure.
2268 if (!ret && bacmp(&hdev->public_addr, BDADDR_ANY)) {
2269 if (hdev->set_bdaddr)
2270 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2271 else
2272 ret = -EADDRNOTAVAIL;
2275 if (!ret) {
2276 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2277 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2278 ret = __hci_init(hdev);
2281 clear_bit(HCI_INIT, &hdev->flags);
2283 if (!ret) {
2284 hci_dev_hold(hdev);
2285 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2286 set_bit(HCI_UP, &hdev->flags);
2287 hci_notify(hdev, HCI_DEV_UP);
2288 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2289 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2290 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2291 hdev->dev_type == HCI_BREDR) {
2292 hci_dev_lock(hdev);
2293 mgmt_powered(hdev, 1);
2294 hci_dev_unlock(hdev);
2296 } else {
2297 /* Init failed, cleanup */
2298 flush_work(&hdev->tx_work);
2299 flush_work(&hdev->cmd_work);
2300 flush_work(&hdev->rx_work);
2302 skb_queue_purge(&hdev->cmd_q);
2303 skb_queue_purge(&hdev->rx_q);
2305 if (hdev->flush)
2306 hdev->flush(hdev);
2308 if (hdev->sent_cmd) {
2309 kfree_skb(hdev->sent_cmd);
2310 hdev->sent_cmd = NULL;
2313 hdev->close(hdev);
2314 hdev->flags &= BIT(HCI_RAW);
2317 done:
2318 hci_req_unlock(hdev);
2319 return ret;
2322 /* ---- HCI ioctl helpers ---- */
2324 int hci_dev_open(__u16 dev)
2326 struct hci_dev *hdev;
2327 int err;
2329 hdev = hci_dev_get(dev);
2330 if (!hdev)
2331 return -ENODEV;
2333 /* Devices that are marked as unconfigured can only be powered
2334 * up as user channel. Trying to bring them up as normal devices
2335 * will result into a failure. Only user channel operation is
2336 * possible.
2338 * When this function is called for a user channel, the flag
2339 * HCI_USER_CHANNEL will be set first before attempting to
2340 * open the device.
2342 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2343 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2344 err = -EOPNOTSUPP;
2345 goto done;
2348 /* We need to ensure that no other power on/off work is pending
2349 * before proceeding to call hci_dev_do_open. This is
2350 * particularly important if the setup procedure has not yet
2351 * completed.
2353 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2354 cancel_delayed_work(&hdev->power_off);
2356 /* After this call it is guaranteed that the setup procedure
2357 * has finished. This means that error conditions like RFKILL
2358 * or no valid public or static random address apply.
2360 flush_workqueue(hdev->req_workqueue);
2362 err = hci_dev_do_open(hdev);
2364 done:
2365 hci_dev_put(hdev);
2366 return err;
2369 /* This function requires the caller holds hdev->lock */
2370 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2372 struct hci_conn_params *p;
2374 list_for_each_entry(p, &hdev->le_conn_params, list)
2375 list_del_init(&p->action);
2377 BT_DBG("All LE pending actions cleared");
2380 static int hci_dev_do_close(struct hci_dev *hdev)
2382 BT_DBG("%s %p", hdev->name, hdev);
2384 cancel_delayed_work(&hdev->power_off);
2386 hci_req_cancel(hdev, ENODEV);
2387 hci_req_lock(hdev);
2389 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2390 cancel_delayed_work_sync(&hdev->cmd_timer);
2391 hci_req_unlock(hdev);
2392 return 0;
2395 /* Flush RX and TX works */
2396 flush_work(&hdev->tx_work);
2397 flush_work(&hdev->rx_work);
2399 if (hdev->discov_timeout > 0) {
2400 cancel_delayed_work(&hdev->discov_off);
2401 hdev->discov_timeout = 0;
2402 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2403 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2406 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2407 cancel_delayed_work(&hdev->service_cache);
2409 cancel_delayed_work_sync(&hdev->le_scan_disable);
2411 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2412 cancel_delayed_work_sync(&hdev->rpa_expired);
2414 hci_dev_lock(hdev);
2415 hci_inquiry_cache_flush(hdev);
2416 hci_conn_hash_flush(hdev);
2417 hci_pend_le_actions_clear(hdev);
2418 hci_dev_unlock(hdev);
2420 hci_notify(hdev, HCI_DEV_DOWN);
2422 if (hdev->flush)
2423 hdev->flush(hdev);
2425 /* Reset device */
2426 skb_queue_purge(&hdev->cmd_q);
2427 atomic_set(&hdev->cmd_cnt, 1);
2428 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2429 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2430 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2431 set_bit(HCI_INIT, &hdev->flags);
2432 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2433 clear_bit(HCI_INIT, &hdev->flags);
2436 /* flush cmd work */
2437 flush_work(&hdev->cmd_work);
2439 /* Drop queues */
2440 skb_queue_purge(&hdev->rx_q);
2441 skb_queue_purge(&hdev->cmd_q);
2442 skb_queue_purge(&hdev->raw_q);
2444 /* Drop last sent command */
2445 if (hdev->sent_cmd) {
2446 cancel_delayed_work_sync(&hdev->cmd_timer);
2447 kfree_skb(hdev->sent_cmd);
2448 hdev->sent_cmd = NULL;
2451 kfree_skb(hdev->recv_evt);
2452 hdev->recv_evt = NULL;
2454 /* After this point our queues are empty
2455 * and no tasks are scheduled. */
2456 hdev->close(hdev);
2458 /* Clear flags */
2459 hdev->flags &= BIT(HCI_RAW);
2460 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2462 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2463 if (hdev->dev_type == HCI_BREDR) {
2464 hci_dev_lock(hdev);
2465 mgmt_powered(hdev, 0);
2466 hci_dev_unlock(hdev);
2470 /* Controller radio is available but is currently powered down */
2471 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2473 memset(hdev->eir, 0, sizeof(hdev->eir));
2474 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2475 bacpy(&hdev->random_addr, BDADDR_ANY);
2477 hci_req_unlock(hdev);
2479 hci_dev_put(hdev);
2480 return 0;
2483 int hci_dev_close(__u16 dev)
2485 struct hci_dev *hdev;
2486 int err;
2488 hdev = hci_dev_get(dev);
2489 if (!hdev)
2490 return -ENODEV;
2492 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2493 err = -EBUSY;
2494 goto done;
2497 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2498 cancel_delayed_work(&hdev->power_off);
2500 err = hci_dev_do_close(hdev);
2502 done:
2503 hci_dev_put(hdev);
2504 return err;
2507 int hci_dev_reset(__u16 dev)
2509 struct hci_dev *hdev;
2510 int ret = 0;
2512 hdev = hci_dev_get(dev);
2513 if (!hdev)
2514 return -ENODEV;
2516 hci_req_lock(hdev);
2518 if (!test_bit(HCI_UP, &hdev->flags)) {
2519 ret = -ENETDOWN;
2520 goto done;
2523 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2524 ret = -EBUSY;
2525 goto done;
2528 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2529 ret = -EOPNOTSUPP;
2530 goto done;
2533 /* Drop queues */
2534 skb_queue_purge(&hdev->rx_q);
2535 skb_queue_purge(&hdev->cmd_q);
2537 hci_dev_lock(hdev);
2538 hci_inquiry_cache_flush(hdev);
2539 hci_conn_hash_flush(hdev);
2540 hci_dev_unlock(hdev);
2542 if (hdev->flush)
2543 hdev->flush(hdev);
2545 atomic_set(&hdev->cmd_cnt, 1);
2546 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2548 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2550 done:
2551 hci_req_unlock(hdev);
2552 hci_dev_put(hdev);
2553 return ret;
2556 int hci_dev_reset_stat(__u16 dev)
2558 struct hci_dev *hdev;
2559 int ret = 0;
2561 hdev = hci_dev_get(dev);
2562 if (!hdev)
2563 return -ENODEV;
2565 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2566 ret = -EBUSY;
2567 goto done;
2570 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2571 ret = -EOPNOTSUPP;
2572 goto done;
2575 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2577 done:
2578 hci_dev_put(hdev);
2579 return ret;
2582 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2584 struct hci_dev *hdev;
2585 struct hci_dev_req dr;
2586 int err = 0;
2588 if (copy_from_user(&dr, arg, sizeof(dr)))
2589 return -EFAULT;
2591 hdev = hci_dev_get(dr.dev_id);
2592 if (!hdev)
2593 return -ENODEV;
2595 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2596 err = -EBUSY;
2597 goto done;
2600 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2601 err = -EOPNOTSUPP;
2602 goto done;
2605 if (hdev->dev_type != HCI_BREDR) {
2606 err = -EOPNOTSUPP;
2607 goto done;
2610 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2611 err = -EOPNOTSUPP;
2612 goto done;
2615 switch (cmd) {
2616 case HCISETAUTH:
2617 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2618 HCI_INIT_TIMEOUT);
2619 break;
2621 case HCISETENCRYPT:
2622 if (!lmp_encrypt_capable(hdev)) {
2623 err = -EOPNOTSUPP;
2624 break;
2627 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2628 /* Auth must be enabled first */
2629 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2630 HCI_INIT_TIMEOUT);
2631 if (err)
2632 break;
2635 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2636 HCI_INIT_TIMEOUT);
2637 break;
2639 case HCISETSCAN:
2640 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2641 HCI_INIT_TIMEOUT);
2642 break;
2644 case HCISETLINKPOL:
2645 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2646 HCI_INIT_TIMEOUT);
2647 break;
2649 case HCISETLINKMODE:
2650 hdev->link_mode = ((__u16) dr.dev_opt) &
2651 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2652 break;
2654 case HCISETPTYPE:
2655 hdev->pkt_type = (__u16) dr.dev_opt;
2656 break;
2658 case HCISETACLMTU:
2659 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2660 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2661 break;
2663 case HCISETSCOMTU:
2664 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2665 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2666 break;
2668 default:
2669 err = -EINVAL;
2670 break;
2673 done:
2674 hci_dev_put(hdev);
2675 return err;
2678 int hci_get_dev_list(void __user *arg)
2680 struct hci_dev *hdev;
2681 struct hci_dev_list_req *dl;
2682 struct hci_dev_req *dr;
2683 int n = 0, size, err;
2684 __u16 dev_num;
2686 if (get_user(dev_num, (__u16 __user *) arg))
2687 return -EFAULT;
2689 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2690 return -EINVAL;
2692 size = sizeof(*dl) + dev_num * sizeof(*dr);
2694 dl = kzalloc(size, GFP_KERNEL);
2695 if (!dl)
2696 return -ENOMEM;
2698 dr = dl->dev_req;
2700 read_lock(&hci_dev_list_lock);
2701 list_for_each_entry(hdev, &hci_dev_list, list) {
2702 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2703 cancel_delayed_work(&hdev->power_off);
2705 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2706 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2708 (dr + n)->dev_id = hdev->id;
2709 (dr + n)->dev_opt = hdev->flags;
2711 if (++n >= dev_num)
2712 break;
2714 read_unlock(&hci_dev_list_lock);
2716 dl->dev_num = n;
2717 size = sizeof(*dl) + n * sizeof(*dr);
2719 err = copy_to_user(arg, dl, size);
2720 kfree(dl);
2722 return err ? -EFAULT : 0;
2725 int hci_get_dev_info(void __user *arg)
2727 struct hci_dev *hdev;
2728 struct hci_dev_info di;
2729 int err = 0;
2731 if (copy_from_user(&di, arg, sizeof(di)))
2732 return -EFAULT;
2734 hdev = hci_dev_get(di.dev_id);
2735 if (!hdev)
2736 return -ENODEV;
2738 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2739 cancel_delayed_work_sync(&hdev->power_off);
2741 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2742 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2744 strcpy(di.name, hdev->name);
2745 di.bdaddr = hdev->bdaddr;
2746 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2747 di.flags = hdev->flags;
2748 di.pkt_type = hdev->pkt_type;
2749 if (lmp_bredr_capable(hdev)) {
2750 di.acl_mtu = hdev->acl_mtu;
2751 di.acl_pkts = hdev->acl_pkts;
2752 di.sco_mtu = hdev->sco_mtu;
2753 di.sco_pkts = hdev->sco_pkts;
2754 } else {
2755 di.acl_mtu = hdev->le_mtu;
2756 di.acl_pkts = hdev->le_pkts;
2757 di.sco_mtu = 0;
2758 di.sco_pkts = 0;
2760 di.link_policy = hdev->link_policy;
2761 di.link_mode = hdev->link_mode;
2763 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2764 memcpy(&di.features, &hdev->features, sizeof(di.features));
2766 if (copy_to_user(arg, &di, sizeof(di)))
2767 err = -EFAULT;
2769 hci_dev_put(hdev);
2771 return err;
2774 /* ---- Interface to HCI drivers ---- */
2776 static int hci_rfkill_set_block(void *data, bool blocked)
2778 struct hci_dev *hdev = data;
2780 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2782 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2783 return -EBUSY;
2785 if (blocked) {
2786 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2787 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2788 hci_dev_do_close(hdev);
2789 } else {
2790 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2793 return 0;
2796 static const struct rfkill_ops hci_rfkill_ops = {
2797 .set_block = hci_rfkill_set_block,
2800 static void hci_power_on(struct work_struct *work)
2802 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2803 int err;
2805 BT_DBG("%s", hdev->name);
2807 err = hci_dev_do_open(hdev);
2808 if (err < 0) {
2809 mgmt_set_powered_failed(hdev, err);
2810 return;
2813 /* During the HCI setup phase, a few error conditions are
2814 * ignored and they need to be checked now. If they are still
2815 * valid, it is important to turn the device back off.
2817 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2818 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2819 (hdev->dev_type == HCI_BREDR &&
2820 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2821 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2822 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2823 hci_dev_do_close(hdev);
2824 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2825 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2826 HCI_AUTO_OFF_TIMEOUT);
2829 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2830 /* For unconfigured devices, set the HCI_RAW flag
2831 * so that userspace can easily identify them.
2833 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2834 set_bit(HCI_RAW, &hdev->flags);
2836 /* For fully configured devices, this will send
2837 * the Index Added event. For unconfigured devices,
2838 * it will send Unconfigued Index Added event.
2840 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2841 * and no event will be send.
2843 mgmt_index_added(hdev);
2847 static void hci_power_off(struct work_struct *work)
2849 struct hci_dev *hdev = container_of(work, struct hci_dev,
2850 power_off.work);
2852 BT_DBG("%s", hdev->name);
2854 hci_dev_do_close(hdev);
2857 static void hci_discov_off(struct work_struct *work)
2859 struct hci_dev *hdev;
2861 hdev = container_of(work, struct hci_dev, discov_off.work);
2863 BT_DBG("%s", hdev->name);
2865 mgmt_discoverable_timeout(hdev);
2868 void hci_uuids_clear(struct hci_dev *hdev)
2870 struct bt_uuid *uuid, *tmp;
2872 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2873 list_del(&uuid->list);
2874 kfree(uuid);
2878 void hci_link_keys_clear(struct hci_dev *hdev)
2880 struct list_head *p, *n;
2882 list_for_each_safe(p, n, &hdev->link_keys) {
2883 struct link_key *key;
2885 key = list_entry(p, struct link_key, list);
2887 list_del(p);
2888 kfree(key);
2892 void hci_smp_ltks_clear(struct hci_dev *hdev)
2894 struct smp_ltk *k, *tmp;
2896 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2897 list_del(&k->list);
2898 kfree(k);
2902 void hci_smp_irks_clear(struct hci_dev *hdev)
2904 struct smp_irk *k, *tmp;
2906 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2907 list_del(&k->list);
2908 kfree(k);
2912 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2914 struct link_key *k;
2916 list_for_each_entry(k, &hdev->link_keys, list)
2917 if (bacmp(bdaddr, &k->bdaddr) == 0)
2918 return k;
2920 return NULL;
2923 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2924 u8 key_type, u8 old_key_type)
2926 /* Legacy key */
2927 if (key_type < 0x03)
2928 return true;
2930 /* Debug keys are insecure so don't store them persistently */
2931 if (key_type == HCI_LK_DEBUG_COMBINATION)
2932 return false;
2934 /* Changed combination key and there's no previous one */
2935 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2936 return false;
2938 /* Security mode 3 case */
2939 if (!conn)
2940 return true;
2942 /* Neither local nor remote side had no-bonding as requirement */
2943 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2944 return true;
2946 /* Local side had dedicated bonding as requirement */
2947 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2948 return true;
2950 /* Remote side had dedicated bonding as requirement */
2951 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2952 return true;
2954 /* If none of the above criteria match, then don't store the key
2955 * persistently */
2956 return false;
2959 static bool ltk_type_master(u8 type)
2961 return (type == SMP_LTK);
2964 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2965 bool master)
2967 struct smp_ltk *k;
2969 list_for_each_entry(k, &hdev->long_term_keys, list) {
2970 if (k->ediv != ediv || k->rand != rand)
2971 continue;
2973 if (ltk_type_master(k->type) != master)
2974 continue;
2976 return k;
2979 return NULL;
2982 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2983 u8 addr_type, bool master)
2985 struct smp_ltk *k;
2987 list_for_each_entry(k, &hdev->long_term_keys, list)
2988 if (addr_type == k->bdaddr_type &&
2989 bacmp(bdaddr, &k->bdaddr) == 0 &&
2990 ltk_type_master(k->type) == master)
2991 return k;
2993 return NULL;
2996 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2998 struct smp_irk *irk;
3000 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3001 if (!bacmp(&irk->rpa, rpa))
3002 return irk;
3005 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3006 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3007 bacpy(&irk->rpa, rpa);
3008 return irk;
3012 return NULL;
3015 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3016 u8 addr_type)
3018 struct smp_irk *irk;
3020 /* Identity Address must be public or static random */
3021 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3022 return NULL;
3024 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3025 if (addr_type == irk->addr_type &&
3026 bacmp(bdaddr, &irk->bdaddr) == 0)
3027 return irk;
3030 return NULL;
3033 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3034 bdaddr_t *bdaddr, u8 *val, u8 type,
3035 u8 pin_len, bool *persistent)
3037 struct link_key *key, *old_key;
3038 u8 old_key_type;
3040 old_key = hci_find_link_key(hdev, bdaddr);
3041 if (old_key) {
3042 old_key_type = old_key->type;
3043 key = old_key;
3044 } else {
3045 old_key_type = conn ? conn->key_type : 0xff;
3046 key = kzalloc(sizeof(*key), GFP_KERNEL);
3047 if (!key)
3048 return NULL;
3049 list_add(&key->list, &hdev->link_keys);
3052 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3054 /* Some buggy controller combinations generate a changed
3055 * combination key for legacy pairing even when there's no
3056 * previous key */
3057 if (type == HCI_LK_CHANGED_COMBINATION &&
3058 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3059 type = HCI_LK_COMBINATION;
3060 if (conn)
3061 conn->key_type = type;
3064 bacpy(&key->bdaddr, bdaddr);
3065 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3066 key->pin_len = pin_len;
3068 if (type == HCI_LK_CHANGED_COMBINATION)
3069 key->type = old_key_type;
3070 else
3071 key->type = type;
3073 if (persistent)
3074 *persistent = hci_persistent_key(hdev, conn, type,
3075 old_key_type);
3077 return key;
3080 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3081 u8 addr_type, u8 type, u8 authenticated,
3082 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3084 struct smp_ltk *key, *old_key;
3085 bool master = ltk_type_master(type);
3087 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3088 if (old_key)
3089 key = old_key;
3090 else {
3091 key = kzalloc(sizeof(*key), GFP_KERNEL);
3092 if (!key)
3093 return NULL;
3094 list_add(&key->list, &hdev->long_term_keys);
3097 bacpy(&key->bdaddr, bdaddr);
3098 key->bdaddr_type = addr_type;
3099 memcpy(key->val, tk, sizeof(key->val));
3100 key->authenticated = authenticated;
3101 key->ediv = ediv;
3102 key->rand = rand;
3103 key->enc_size = enc_size;
3104 key->type = type;
3106 return key;
3109 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3110 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3112 struct smp_irk *irk;
3114 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3115 if (!irk) {
3116 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3117 if (!irk)
3118 return NULL;
3120 bacpy(&irk->bdaddr, bdaddr);
3121 irk->addr_type = addr_type;
3123 list_add(&irk->list, &hdev->identity_resolving_keys);
3126 memcpy(irk->val, val, 16);
3127 bacpy(&irk->rpa, rpa);
3129 return irk;
3132 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3134 struct link_key *key;
3136 key = hci_find_link_key(hdev, bdaddr);
3137 if (!key)
3138 return -ENOENT;
3140 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3142 list_del(&key->list);
3143 kfree(key);
3145 return 0;
3148 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3150 struct smp_ltk *k, *tmp;
3151 int removed = 0;
3153 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3154 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3155 continue;
3157 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3159 list_del(&k->list);
3160 kfree(k);
3161 removed++;
3164 return removed ? 0 : -ENOENT;
3167 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3169 struct smp_irk *k, *tmp;
3171 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3172 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3173 continue;
3175 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3177 list_del(&k->list);
3178 kfree(k);
3182 /* HCI command timer function */
3183 static void hci_cmd_timeout(struct work_struct *work)
3185 struct hci_dev *hdev = container_of(work, struct hci_dev,
3186 cmd_timer.work);
3188 if (hdev->sent_cmd) {
3189 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3190 u16 opcode = __le16_to_cpu(sent->opcode);
3192 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3193 } else {
3194 BT_ERR("%s command tx timeout", hdev->name);
3197 atomic_set(&hdev->cmd_cnt, 1);
3198 queue_work(hdev->workqueue, &hdev->cmd_work);
3201 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3202 bdaddr_t *bdaddr)
3204 struct oob_data *data;
3206 list_for_each_entry(data, &hdev->remote_oob_data, list)
3207 if (bacmp(bdaddr, &data->bdaddr) == 0)
3208 return data;
3210 return NULL;
3213 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3215 struct oob_data *data;
3217 data = hci_find_remote_oob_data(hdev, bdaddr);
3218 if (!data)
3219 return -ENOENT;
3221 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3223 list_del(&data->list);
3224 kfree(data);
3226 return 0;
3229 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3231 struct oob_data *data, *n;
3233 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3234 list_del(&data->list);
3235 kfree(data);
3239 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3240 u8 *hash, u8 *randomizer)
3242 struct oob_data *data;
3244 data = hci_find_remote_oob_data(hdev, bdaddr);
3245 if (!data) {
3246 data = kmalloc(sizeof(*data), GFP_KERNEL);
3247 if (!data)
3248 return -ENOMEM;
3250 bacpy(&data->bdaddr, bdaddr);
3251 list_add(&data->list, &hdev->remote_oob_data);
3254 memcpy(data->hash192, hash, sizeof(data->hash192));
3255 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3257 memset(data->hash256, 0, sizeof(data->hash256));
3258 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3260 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3262 return 0;
3265 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3266 u8 *hash192, u8 *randomizer192,
3267 u8 *hash256, u8 *randomizer256)
3269 struct oob_data *data;
3271 data = hci_find_remote_oob_data(hdev, bdaddr);
3272 if (!data) {
3273 data = kmalloc(sizeof(*data), GFP_KERNEL);
3274 if (!data)
3275 return -ENOMEM;
3277 bacpy(&data->bdaddr, bdaddr);
3278 list_add(&data->list, &hdev->remote_oob_data);
3281 memcpy(data->hash192, hash192, sizeof(data->hash192));
3282 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3284 memcpy(data->hash256, hash256, sizeof(data->hash256));
3285 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3287 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3289 return 0;
3292 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3293 bdaddr_t *bdaddr, u8 type)
3295 struct bdaddr_list *b;
3297 list_for_each_entry(b, &hdev->blacklist, list) {
3298 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3299 return b;
3302 return NULL;
3305 static void hci_blacklist_clear(struct hci_dev *hdev)
3307 struct list_head *p, *n;
3309 list_for_each_safe(p, n, &hdev->blacklist) {
3310 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3312 list_del(p);
3313 kfree(b);
3317 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3319 struct bdaddr_list *entry;
3321 if (!bacmp(bdaddr, BDADDR_ANY))
3322 return -EBADF;
3324 if (hci_blacklist_lookup(hdev, bdaddr, type))
3325 return -EEXIST;
3327 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3328 if (!entry)
3329 return -ENOMEM;
3331 bacpy(&entry->bdaddr, bdaddr);
3332 entry->bdaddr_type = type;
3334 list_add(&entry->list, &hdev->blacklist);
3336 return 0;
3339 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3341 struct bdaddr_list *entry;
3343 if (!bacmp(bdaddr, BDADDR_ANY)) {
3344 hci_blacklist_clear(hdev);
3345 return 0;
3348 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3349 if (!entry)
3350 return -ENOENT;
3352 list_del(&entry->list);
3353 kfree(entry);
3355 return 0;
3358 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3359 bdaddr_t *bdaddr, u8 type)
3361 struct bdaddr_list *b;
3363 list_for_each_entry(b, &hdev->le_white_list, list) {
3364 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3365 return b;
3368 return NULL;
3371 void hci_white_list_clear(struct hci_dev *hdev)
3373 struct list_head *p, *n;
3375 list_for_each_safe(p, n, &hdev->le_white_list) {
3376 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3378 list_del(p);
3379 kfree(b);
3383 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3385 struct bdaddr_list *entry;
3387 if (!bacmp(bdaddr, BDADDR_ANY))
3388 return -EBADF;
3390 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3391 if (!entry)
3392 return -ENOMEM;
3394 bacpy(&entry->bdaddr, bdaddr);
3395 entry->bdaddr_type = type;
3397 list_add(&entry->list, &hdev->le_white_list);
3399 return 0;
3402 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3404 struct bdaddr_list *entry;
3406 if (!bacmp(bdaddr, BDADDR_ANY))
3407 return -EBADF;
3409 entry = hci_white_list_lookup(hdev, bdaddr, type);
3410 if (!entry)
3411 return -ENOENT;
3413 list_del(&entry->list);
3414 kfree(entry);
3416 return 0;
3419 /* This function requires the caller holds hdev->lock */
3420 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3421 bdaddr_t *addr, u8 addr_type)
3423 struct hci_conn_params *params;
3425 /* The conn params list only contains identity addresses */
3426 if (!hci_is_identity_address(addr, addr_type))
3427 return NULL;
3429 list_for_each_entry(params, &hdev->le_conn_params, list) {
3430 if (bacmp(&params->addr, addr) == 0 &&
3431 params->addr_type == addr_type) {
3432 return params;
3436 return NULL;
3439 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3441 struct hci_conn *conn;
3443 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3444 if (!conn)
3445 return false;
3447 if (conn->dst_type != type)
3448 return false;
3450 if (conn->state != BT_CONNECTED)
3451 return false;
3453 return true;
3456 /* This function requires the caller holds hdev->lock */
3457 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3458 bdaddr_t *addr, u8 addr_type)
3460 struct hci_conn_params *param;
3462 /* The list only contains identity addresses */
3463 if (!hci_is_identity_address(addr, addr_type))
3464 return NULL;
3466 list_for_each_entry(param, list, action) {
3467 if (bacmp(&param->addr, addr) == 0 &&
3468 param->addr_type == addr_type)
3469 return param;
3472 return NULL;
3475 /* This function requires the caller holds hdev->lock */
3476 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3477 bdaddr_t *addr, u8 addr_type)
3479 struct hci_conn_params *params;
3481 if (!hci_is_identity_address(addr, addr_type))
3482 return NULL;
3484 params = hci_conn_params_lookup(hdev, addr, addr_type);
3485 if (params)
3486 return params;
3488 params = kzalloc(sizeof(*params), GFP_KERNEL);
3489 if (!params) {
3490 BT_ERR("Out of memory");
3491 return NULL;
3494 bacpy(&params->addr, addr);
3495 params->addr_type = addr_type;
3497 list_add(&params->list, &hdev->le_conn_params);
3498 INIT_LIST_HEAD(&params->action);
3500 params->conn_min_interval = hdev->le_conn_min_interval;
3501 params->conn_max_interval = hdev->le_conn_max_interval;
3502 params->conn_latency = hdev->le_conn_latency;
3503 params->supervision_timeout = hdev->le_supv_timeout;
3504 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3506 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3508 return params;
3511 /* This function requires the caller holds hdev->lock */
3512 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3513 u8 auto_connect)
3515 struct hci_conn_params *params;
3517 params = hci_conn_params_add(hdev, addr, addr_type);
3518 if (!params)
3519 return -EIO;
3521 if (params->auto_connect == auto_connect)
3522 return 0;
3524 list_del_init(&params->action);
3526 switch (auto_connect) {
3527 case HCI_AUTO_CONN_DISABLED:
3528 case HCI_AUTO_CONN_LINK_LOSS:
3529 hci_update_background_scan(hdev);
3530 break;
3531 case HCI_AUTO_CONN_REPORT:
3532 list_add(&params->action, &hdev->pend_le_reports);
3533 hci_update_background_scan(hdev);
3534 break;
3535 case HCI_AUTO_CONN_ALWAYS:
3536 if (!is_connected(hdev, addr, addr_type)) {
3537 list_add(&params->action, &hdev->pend_le_conns);
3538 hci_update_background_scan(hdev);
3540 break;
3543 params->auto_connect = auto_connect;
3545 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3546 auto_connect);
3548 return 0;
3551 /* This function requires the caller holds hdev->lock */
3552 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3554 struct hci_conn_params *params;
3556 params = hci_conn_params_lookup(hdev, addr, addr_type);
3557 if (!params)
3558 return;
3560 list_del(&params->action);
3561 list_del(&params->list);
3562 kfree(params);
3564 hci_update_background_scan(hdev);
3566 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3569 /* This function requires the caller holds hdev->lock */
3570 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3572 struct hci_conn_params *params, *tmp;
3574 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3575 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3576 continue;
3577 list_del(&params->list);
3578 kfree(params);
3581 BT_DBG("All LE disabled connection parameters were removed");
3584 /* This function requires the caller holds hdev->lock */
3585 void hci_conn_params_clear_enabled(struct hci_dev *hdev)
3587 struct hci_conn_params *params, *tmp;
3589 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3590 if (params->auto_connect == HCI_AUTO_CONN_DISABLED)
3591 continue;
3592 list_del(&params->action);
3593 list_del(&params->list);
3594 kfree(params);
3597 hci_update_background_scan(hdev);
3599 BT_DBG("All enabled LE connection parameters were removed");
3602 /* This function requires the caller holds hdev->lock */
3603 void hci_conn_params_clear_all(struct hci_dev *hdev)
3605 struct hci_conn_params *params, *tmp;
3607 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3608 list_del(&params->action);
3609 list_del(&params->list);
3610 kfree(params);
3613 hci_update_background_scan(hdev);
3615 BT_DBG("All LE connection parameters were removed");
3618 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3620 if (status) {
3621 BT_ERR("Failed to start inquiry: status %d", status);
3623 hci_dev_lock(hdev);
3624 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3625 hci_dev_unlock(hdev);
3626 return;
3630 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3632 /* General inquiry access code (GIAC) */
3633 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3634 struct hci_request req;
3635 struct hci_cp_inquiry cp;
3636 int err;
3638 if (status) {
3639 BT_ERR("Failed to disable LE scanning: status %d", status);
3640 return;
3643 switch (hdev->discovery.type) {
3644 case DISCOV_TYPE_LE:
3645 hci_dev_lock(hdev);
3646 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3647 hci_dev_unlock(hdev);
3648 break;
3650 case DISCOV_TYPE_INTERLEAVED:
3651 hci_req_init(&req, hdev);
3653 memset(&cp, 0, sizeof(cp));
3654 memcpy(&cp.lap, lap, sizeof(cp.lap));
3655 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3656 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3658 hci_dev_lock(hdev);
3660 hci_inquiry_cache_flush(hdev);
3662 err = hci_req_run(&req, inquiry_complete);
3663 if (err) {
3664 BT_ERR("Inquiry request failed: err %d", err);
3665 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3668 hci_dev_unlock(hdev);
3669 break;
3673 static void le_scan_disable_work(struct work_struct *work)
3675 struct hci_dev *hdev = container_of(work, struct hci_dev,
3676 le_scan_disable.work);
3677 struct hci_request req;
3678 int err;
3680 BT_DBG("%s", hdev->name);
3682 hci_req_init(&req, hdev);
3684 hci_req_add_le_scan_disable(&req);
3686 err = hci_req_run(&req, le_scan_disable_work_complete);
3687 if (err)
3688 BT_ERR("Disable LE scanning request failed: err %d", err);
3691 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3693 struct hci_dev *hdev = req->hdev;
3695 /* If we're advertising or initiating an LE connection we can't
3696 * go ahead and change the random address at this time. This is
3697 * because the eventual initiator address used for the
3698 * subsequently created connection will be undefined (some
3699 * controllers use the new address and others the one we had
3700 * when the operation started).
3702 * In this kind of scenario skip the update and let the random
3703 * address be updated at the next cycle.
3705 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3706 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3707 BT_DBG("Deferring random address update");
3708 return;
3711 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3714 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3715 u8 *own_addr_type)
3717 struct hci_dev *hdev = req->hdev;
3718 int err;
3720 /* If privacy is enabled use a resolvable private address. If
3721 * current RPA has expired or there is something else than
3722 * the current RPA in use, then generate a new one.
3724 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3725 int to;
3727 *own_addr_type = ADDR_LE_DEV_RANDOM;
3729 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3730 !bacmp(&hdev->random_addr, &hdev->rpa))
3731 return 0;
3733 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3734 if (err < 0) {
3735 BT_ERR("%s failed to generate new RPA", hdev->name);
3736 return err;
3739 set_random_addr(req, &hdev->rpa);
3741 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3742 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3744 return 0;
3747 /* In case of required privacy without resolvable private address,
3748 * use an unresolvable private address. This is useful for active
3749 * scanning and non-connectable advertising.
3751 if (require_privacy) {
3752 bdaddr_t urpa;
3754 get_random_bytes(&urpa, 6);
3755 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3757 *own_addr_type = ADDR_LE_DEV_RANDOM;
3758 set_random_addr(req, &urpa);
3759 return 0;
3762 /* If forcing static address is in use or there is no public
3763 * address use the static address as random address (but skip
3764 * the HCI command if the current random address is already the
3765 * static one.
3767 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3768 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3769 *own_addr_type = ADDR_LE_DEV_RANDOM;
3770 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3771 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3772 &hdev->static_addr);
3773 return 0;
3776 /* Neither privacy nor static address is being used so use a
3777 * public address.
3779 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3781 return 0;
3784 /* Copy the Identity Address of the controller.
3786 * If the controller has a public BD_ADDR, then by default use that one.
3787 * If this is a LE only controller without a public address, default to
3788 * the static random address.
3790 * For debugging purposes it is possible to force controllers with a
3791 * public address to use the static random address instead.
3793 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3794 u8 *bdaddr_type)
3796 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3797 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3798 bacpy(bdaddr, &hdev->static_addr);
3799 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3800 } else {
3801 bacpy(bdaddr, &hdev->bdaddr);
3802 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3806 /* Alloc HCI device */
3807 struct hci_dev *hci_alloc_dev(void)
3809 struct hci_dev *hdev;
3811 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3812 if (!hdev)
3813 return NULL;
3815 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3816 hdev->esco_type = (ESCO_HV1);
3817 hdev->link_mode = (HCI_LM_ACCEPT);
3818 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3819 hdev->io_capability = 0x03; /* No Input No Output */
3820 hdev->manufacturer = 0xffff; /* Default to internal use */
3821 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3822 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3824 hdev->sniff_max_interval = 800;
3825 hdev->sniff_min_interval = 80;
3827 hdev->le_adv_channel_map = 0x07;
3828 hdev->le_scan_interval = 0x0060;
3829 hdev->le_scan_window = 0x0030;
3830 hdev->le_conn_min_interval = 0x0028;
3831 hdev->le_conn_max_interval = 0x0038;
3832 hdev->le_conn_latency = 0x0000;
3833 hdev->le_supv_timeout = 0x002a;
3835 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3836 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3837 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3838 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3840 mutex_init(&hdev->lock);
3841 mutex_init(&hdev->req_lock);
3843 INIT_LIST_HEAD(&hdev->mgmt_pending);
3844 INIT_LIST_HEAD(&hdev->blacklist);
3845 INIT_LIST_HEAD(&hdev->uuids);
3846 INIT_LIST_HEAD(&hdev->link_keys);
3847 INIT_LIST_HEAD(&hdev->long_term_keys);
3848 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3849 INIT_LIST_HEAD(&hdev->remote_oob_data);
3850 INIT_LIST_HEAD(&hdev->le_white_list);
3851 INIT_LIST_HEAD(&hdev->le_conn_params);
3852 INIT_LIST_HEAD(&hdev->pend_le_conns);
3853 INIT_LIST_HEAD(&hdev->pend_le_reports);
3854 INIT_LIST_HEAD(&hdev->conn_hash.list);
3856 INIT_WORK(&hdev->rx_work, hci_rx_work);
3857 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3858 INIT_WORK(&hdev->tx_work, hci_tx_work);
3859 INIT_WORK(&hdev->power_on, hci_power_on);
3861 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3862 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3863 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3865 skb_queue_head_init(&hdev->rx_q);
3866 skb_queue_head_init(&hdev->cmd_q);
3867 skb_queue_head_init(&hdev->raw_q);
3869 init_waitqueue_head(&hdev->req_wait_q);
3871 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3873 hci_init_sysfs(hdev);
3874 discovery_init(hdev);
3876 return hdev;
3878 EXPORT_SYMBOL(hci_alloc_dev);
3880 /* Free HCI device */
3881 void hci_free_dev(struct hci_dev *hdev)
3883 /* will free via device release */
3884 put_device(&hdev->dev);
3886 EXPORT_SYMBOL(hci_free_dev);
3888 /* Register HCI device */
3889 int hci_register_dev(struct hci_dev *hdev)
3891 int id, error;
3893 if (!hdev->open || !hdev->close)
3894 return -EINVAL;
3896 /* Do not allow HCI_AMP devices to register at index 0,
3897 * so the index can be used as the AMP controller ID.
3899 switch (hdev->dev_type) {
3900 case HCI_BREDR:
3901 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3902 break;
3903 case HCI_AMP:
3904 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3905 break;
3906 default:
3907 return -EINVAL;
3910 if (id < 0)
3911 return id;
3913 sprintf(hdev->name, "hci%d", id);
3914 hdev->id = id;
3916 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3918 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3919 WQ_MEM_RECLAIM, 1, hdev->name);
3920 if (!hdev->workqueue) {
3921 error = -ENOMEM;
3922 goto err;
3925 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3926 WQ_MEM_RECLAIM, 1, hdev->name);
3927 if (!hdev->req_workqueue) {
3928 destroy_workqueue(hdev->workqueue);
3929 error = -ENOMEM;
3930 goto err;
3933 if (!IS_ERR_OR_NULL(bt_debugfs))
3934 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3936 dev_set_name(&hdev->dev, "%s", hdev->name);
3938 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3939 CRYPTO_ALG_ASYNC);
3940 if (IS_ERR(hdev->tfm_aes)) {
3941 BT_ERR("Unable to create crypto context");
3942 error = PTR_ERR(hdev->tfm_aes);
3943 hdev->tfm_aes = NULL;
3944 goto err_wqueue;
3947 error = device_add(&hdev->dev);
3948 if (error < 0)
3949 goto err_tfm;
3951 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3952 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3953 hdev);
3954 if (hdev->rfkill) {
3955 if (rfkill_register(hdev->rfkill) < 0) {
3956 rfkill_destroy(hdev->rfkill);
3957 hdev->rfkill = NULL;
3961 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3962 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3964 set_bit(HCI_SETUP, &hdev->dev_flags);
3965 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3967 if (hdev->dev_type == HCI_BREDR) {
3968 /* Assume BR/EDR support until proven otherwise (such as
3969 * through reading supported features during init.
3971 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3974 write_lock(&hci_dev_list_lock);
3975 list_add(&hdev->list, &hci_dev_list);
3976 write_unlock(&hci_dev_list_lock);
3978 /* Devices that are marked for raw-only usage are unconfigured
3979 * and should not be included in normal operation.
3981 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3982 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3984 hci_notify(hdev, HCI_DEV_REG);
3985 hci_dev_hold(hdev);
3987 queue_work(hdev->req_workqueue, &hdev->power_on);
3989 return id;
3991 err_tfm:
3992 crypto_free_blkcipher(hdev->tfm_aes);
3993 err_wqueue:
3994 destroy_workqueue(hdev->workqueue);
3995 destroy_workqueue(hdev->req_workqueue);
3996 err:
3997 ida_simple_remove(&hci_index_ida, hdev->id);
3999 return error;
4001 EXPORT_SYMBOL(hci_register_dev);
4003 /* Unregister HCI device */
4004 void hci_unregister_dev(struct hci_dev *hdev)
4006 int i, id;
4008 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4010 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4012 id = hdev->id;
4014 write_lock(&hci_dev_list_lock);
4015 list_del(&hdev->list);
4016 write_unlock(&hci_dev_list_lock);
4018 hci_dev_do_close(hdev);
4020 for (i = 0; i < NUM_REASSEMBLY; i++)
4021 kfree_skb(hdev->reassembly[i]);
4023 cancel_work_sync(&hdev->power_on);
4025 if (!test_bit(HCI_INIT, &hdev->flags) &&
4026 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
4027 hci_dev_lock(hdev);
4028 mgmt_index_removed(hdev);
4029 hci_dev_unlock(hdev);
4032 /* mgmt_index_removed should take care of emptying the
4033 * pending list */
4034 BUG_ON(!list_empty(&hdev->mgmt_pending));
4036 hci_notify(hdev, HCI_DEV_UNREG);
4038 if (hdev->rfkill) {
4039 rfkill_unregister(hdev->rfkill);
4040 rfkill_destroy(hdev->rfkill);
4043 if (hdev->tfm_aes)
4044 crypto_free_blkcipher(hdev->tfm_aes);
4046 device_del(&hdev->dev);
4048 debugfs_remove_recursive(hdev->debugfs);
4050 destroy_workqueue(hdev->workqueue);
4051 destroy_workqueue(hdev->req_workqueue);
4053 hci_dev_lock(hdev);
4054 hci_blacklist_clear(hdev);
4055 hci_uuids_clear(hdev);
4056 hci_link_keys_clear(hdev);
4057 hci_smp_ltks_clear(hdev);
4058 hci_smp_irks_clear(hdev);
4059 hci_remote_oob_data_clear(hdev);
4060 hci_white_list_clear(hdev);
4061 hci_conn_params_clear_all(hdev);
4062 hci_dev_unlock(hdev);
4064 hci_dev_put(hdev);
4066 ida_simple_remove(&hci_index_ida, id);
4068 EXPORT_SYMBOL(hci_unregister_dev);
4070 /* Suspend HCI device */
4071 int hci_suspend_dev(struct hci_dev *hdev)
4073 hci_notify(hdev, HCI_DEV_SUSPEND);
4074 return 0;
4076 EXPORT_SYMBOL(hci_suspend_dev);
4078 /* Resume HCI device */
4079 int hci_resume_dev(struct hci_dev *hdev)
4081 hci_notify(hdev, HCI_DEV_RESUME);
4082 return 0;
4084 EXPORT_SYMBOL(hci_resume_dev);
4086 /* Receive frame from HCI drivers */
4087 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4089 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4090 && !test_bit(HCI_INIT, &hdev->flags))) {
4091 kfree_skb(skb);
4092 return -ENXIO;
4095 /* Incoming skb */
4096 bt_cb(skb)->incoming = 1;
4098 /* Time stamp */
4099 __net_timestamp(skb);
4101 skb_queue_tail(&hdev->rx_q, skb);
4102 queue_work(hdev->workqueue, &hdev->rx_work);
4104 return 0;
4106 EXPORT_SYMBOL(hci_recv_frame);
4108 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4109 int count, __u8 index)
4111 int len = 0;
4112 int hlen = 0;
4113 int remain = count;
4114 struct sk_buff *skb;
4115 struct bt_skb_cb *scb;
4117 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4118 index >= NUM_REASSEMBLY)
4119 return -EILSEQ;
4121 skb = hdev->reassembly[index];
4123 if (!skb) {
4124 switch (type) {
4125 case HCI_ACLDATA_PKT:
4126 len = HCI_MAX_FRAME_SIZE;
4127 hlen = HCI_ACL_HDR_SIZE;
4128 break;
4129 case HCI_EVENT_PKT:
4130 len = HCI_MAX_EVENT_SIZE;
4131 hlen = HCI_EVENT_HDR_SIZE;
4132 break;
4133 case HCI_SCODATA_PKT:
4134 len = HCI_MAX_SCO_SIZE;
4135 hlen = HCI_SCO_HDR_SIZE;
4136 break;
4139 skb = bt_skb_alloc(len, GFP_ATOMIC);
4140 if (!skb)
4141 return -ENOMEM;
4143 scb = (void *) skb->cb;
4144 scb->expect = hlen;
4145 scb->pkt_type = type;
4147 hdev->reassembly[index] = skb;
4150 while (count) {
4151 scb = (void *) skb->cb;
4152 len = min_t(uint, scb->expect, count);
4154 memcpy(skb_put(skb, len), data, len);
4156 count -= len;
4157 data += len;
4158 scb->expect -= len;
4159 remain = count;
4161 switch (type) {
4162 case HCI_EVENT_PKT:
4163 if (skb->len == HCI_EVENT_HDR_SIZE) {
4164 struct hci_event_hdr *h = hci_event_hdr(skb);
4165 scb->expect = h->plen;
4167 if (skb_tailroom(skb) < scb->expect) {
4168 kfree_skb(skb);
4169 hdev->reassembly[index] = NULL;
4170 return -ENOMEM;
4173 break;
4175 case HCI_ACLDATA_PKT:
4176 if (skb->len == HCI_ACL_HDR_SIZE) {
4177 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4178 scb->expect = __le16_to_cpu(h->dlen);
4180 if (skb_tailroom(skb) < scb->expect) {
4181 kfree_skb(skb);
4182 hdev->reassembly[index] = NULL;
4183 return -ENOMEM;
4186 break;
4188 case HCI_SCODATA_PKT:
4189 if (skb->len == HCI_SCO_HDR_SIZE) {
4190 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4191 scb->expect = h->dlen;
4193 if (skb_tailroom(skb) < scb->expect) {
4194 kfree_skb(skb);
4195 hdev->reassembly[index] = NULL;
4196 return -ENOMEM;
4199 break;
4202 if (scb->expect == 0) {
4203 /* Complete frame */
4205 bt_cb(skb)->pkt_type = type;
4206 hci_recv_frame(hdev, skb);
4208 hdev->reassembly[index] = NULL;
4209 return remain;
4213 return remain;
4216 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4218 int rem = 0;
4220 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4221 return -EILSEQ;
4223 while (count) {
4224 rem = hci_reassembly(hdev, type, data, count, type - 1);
4225 if (rem < 0)
4226 return rem;
4228 data += (count - rem);
4229 count = rem;
4232 return rem;
4234 EXPORT_SYMBOL(hci_recv_fragment);
4236 #define STREAM_REASSEMBLY 0
4238 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4240 int type;
4241 int rem = 0;
4243 while (count) {
4244 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4246 if (!skb) {
4247 struct { char type; } *pkt;
4249 /* Start of the frame */
4250 pkt = data;
4251 type = pkt->type;
4253 data++;
4254 count--;
4255 } else
4256 type = bt_cb(skb)->pkt_type;
4258 rem = hci_reassembly(hdev, type, data, count,
4259 STREAM_REASSEMBLY);
4260 if (rem < 0)
4261 return rem;
4263 data += (count - rem);
4264 count = rem;
4267 return rem;
4269 EXPORT_SYMBOL(hci_recv_stream_fragment);
4271 /* ---- Interface to upper protocols ---- */
4273 int hci_register_cb(struct hci_cb *cb)
4275 BT_DBG("%p name %s", cb, cb->name);
4277 write_lock(&hci_cb_list_lock);
4278 list_add(&cb->list, &hci_cb_list);
4279 write_unlock(&hci_cb_list_lock);
4281 return 0;
4283 EXPORT_SYMBOL(hci_register_cb);
4285 int hci_unregister_cb(struct hci_cb *cb)
4287 BT_DBG("%p name %s", cb, cb->name);
4289 write_lock(&hci_cb_list_lock);
4290 list_del(&cb->list);
4291 write_unlock(&hci_cb_list_lock);
4293 return 0;
4295 EXPORT_SYMBOL(hci_unregister_cb);
4297 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4299 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4301 /* Time stamp */
4302 __net_timestamp(skb);
4304 /* Send copy to monitor */
4305 hci_send_to_monitor(hdev, skb);
4307 if (atomic_read(&hdev->promisc)) {
4308 /* Send copy to the sockets */
4309 hci_send_to_sock(hdev, skb);
4312 /* Get rid of skb owner, prior to sending to the driver. */
4313 skb_orphan(skb);
4315 if (hdev->send(hdev, skb) < 0)
4316 BT_ERR("%s sending frame failed", hdev->name);
4319 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4321 skb_queue_head_init(&req->cmd_q);
4322 req->hdev = hdev;
4323 req->err = 0;
4326 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4328 struct hci_dev *hdev = req->hdev;
4329 struct sk_buff *skb;
4330 unsigned long flags;
4332 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4334 /* If an error occured during request building, remove all HCI
4335 * commands queued on the HCI request queue.
4337 if (req->err) {
4338 skb_queue_purge(&req->cmd_q);
4339 return req->err;
4342 /* Do not allow empty requests */
4343 if (skb_queue_empty(&req->cmd_q))
4344 return -ENODATA;
4346 skb = skb_peek_tail(&req->cmd_q);
4347 bt_cb(skb)->req.complete = complete;
4349 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4350 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4351 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4353 queue_work(hdev->workqueue, &hdev->cmd_work);
4355 return 0;
4358 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4359 u32 plen, const void *param)
4361 int len = HCI_COMMAND_HDR_SIZE + plen;
4362 struct hci_command_hdr *hdr;
4363 struct sk_buff *skb;
4365 skb = bt_skb_alloc(len, GFP_ATOMIC);
4366 if (!skb)
4367 return NULL;
4369 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4370 hdr->opcode = cpu_to_le16(opcode);
4371 hdr->plen = plen;
4373 if (plen)
4374 memcpy(skb_put(skb, plen), param, plen);
4376 BT_DBG("skb len %d", skb->len);
4378 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4380 return skb;
4383 /* Send HCI command */
4384 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4385 const void *param)
4387 struct sk_buff *skb;
4389 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4391 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4392 if (!skb) {
4393 BT_ERR("%s no memory for command", hdev->name);
4394 return -ENOMEM;
4397 /* Stand-alone HCI commands must be flaged as
4398 * single-command requests.
4400 bt_cb(skb)->req.start = true;
4402 skb_queue_tail(&hdev->cmd_q, skb);
4403 queue_work(hdev->workqueue, &hdev->cmd_work);
4405 return 0;
4408 /* Queue a command to an asynchronous HCI request */
4409 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4410 const void *param, u8 event)
4412 struct hci_dev *hdev = req->hdev;
4413 struct sk_buff *skb;
4415 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4417 /* If an error occured during request building, there is no point in
4418 * queueing the HCI command. We can simply return.
4420 if (req->err)
4421 return;
4423 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4424 if (!skb) {
4425 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4426 hdev->name, opcode);
4427 req->err = -ENOMEM;
4428 return;
4431 if (skb_queue_empty(&req->cmd_q))
4432 bt_cb(skb)->req.start = true;
4434 bt_cb(skb)->req.event = event;
4436 skb_queue_tail(&req->cmd_q, skb);
4439 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4440 const void *param)
4442 hci_req_add_ev(req, opcode, plen, param, 0);
4445 /* Get data from the previously sent command */
4446 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4448 struct hci_command_hdr *hdr;
4450 if (!hdev->sent_cmd)
4451 return NULL;
4453 hdr = (void *) hdev->sent_cmd->data;
4455 if (hdr->opcode != cpu_to_le16(opcode))
4456 return NULL;
4458 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4460 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4463 /* Send ACL data */
4464 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4466 struct hci_acl_hdr *hdr;
4467 int len = skb->len;
4469 skb_push(skb, HCI_ACL_HDR_SIZE);
4470 skb_reset_transport_header(skb);
4471 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4472 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4473 hdr->dlen = cpu_to_le16(len);
4476 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4477 struct sk_buff *skb, __u16 flags)
4479 struct hci_conn *conn = chan->conn;
4480 struct hci_dev *hdev = conn->hdev;
4481 struct sk_buff *list;
4483 skb->len = skb_headlen(skb);
4484 skb->data_len = 0;
4486 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4488 switch (hdev->dev_type) {
4489 case HCI_BREDR:
4490 hci_add_acl_hdr(skb, conn->handle, flags);
4491 break;
4492 case HCI_AMP:
4493 hci_add_acl_hdr(skb, chan->handle, flags);
4494 break;
4495 default:
4496 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4497 return;
4500 list = skb_shinfo(skb)->frag_list;
4501 if (!list) {
4502 /* Non fragmented */
4503 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4505 skb_queue_tail(queue, skb);
4506 } else {
4507 /* Fragmented */
4508 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4510 skb_shinfo(skb)->frag_list = NULL;
4512 /* Queue all fragments atomically */
4513 spin_lock(&queue->lock);
4515 __skb_queue_tail(queue, skb);
4517 flags &= ~ACL_START;
4518 flags |= ACL_CONT;
4519 do {
4520 skb = list; list = list->next;
4522 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4523 hci_add_acl_hdr(skb, conn->handle, flags);
4525 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4527 __skb_queue_tail(queue, skb);
4528 } while (list);
4530 spin_unlock(&queue->lock);
4534 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4536 struct hci_dev *hdev = chan->conn->hdev;
4538 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4540 hci_queue_acl(chan, &chan->data_q, skb, flags);
4542 queue_work(hdev->workqueue, &hdev->tx_work);
4545 /* Send SCO data */
4546 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4548 struct hci_dev *hdev = conn->hdev;
4549 struct hci_sco_hdr hdr;
4551 BT_DBG("%s len %d", hdev->name, skb->len);
4553 hdr.handle = cpu_to_le16(conn->handle);
4554 hdr.dlen = skb->len;
4556 skb_push(skb, HCI_SCO_HDR_SIZE);
4557 skb_reset_transport_header(skb);
4558 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4560 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4562 skb_queue_tail(&conn->data_q, skb);
4563 queue_work(hdev->workqueue, &hdev->tx_work);
4566 /* ---- HCI TX task (outgoing data) ---- */
4568 /* HCI Connection scheduler */
4569 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4570 int *quote)
4572 struct hci_conn_hash *h = &hdev->conn_hash;
4573 struct hci_conn *conn = NULL, *c;
4574 unsigned int num = 0, min = ~0;
4576 /* We don't have to lock device here. Connections are always
4577 * added and removed with TX task disabled. */
4579 rcu_read_lock();
4581 list_for_each_entry_rcu(c, &h->list, list) {
4582 if (c->type != type || skb_queue_empty(&c->data_q))
4583 continue;
4585 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4586 continue;
4588 num++;
4590 if (c->sent < min) {
4591 min = c->sent;
4592 conn = c;
4595 if (hci_conn_num(hdev, type) == num)
4596 break;
4599 rcu_read_unlock();
4601 if (conn) {
4602 int cnt, q;
4604 switch (conn->type) {
4605 case ACL_LINK:
4606 cnt = hdev->acl_cnt;
4607 break;
4608 case SCO_LINK:
4609 case ESCO_LINK:
4610 cnt = hdev->sco_cnt;
4611 break;
4612 case LE_LINK:
4613 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4614 break;
4615 default:
4616 cnt = 0;
4617 BT_ERR("Unknown link type");
4620 q = cnt / num;
4621 *quote = q ? q : 1;
4622 } else
4623 *quote = 0;
4625 BT_DBG("conn %p quote %d", conn, *quote);
4626 return conn;
4629 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4631 struct hci_conn_hash *h = &hdev->conn_hash;
4632 struct hci_conn *c;
4634 BT_ERR("%s link tx timeout", hdev->name);
4636 rcu_read_lock();
4638 /* Kill stalled connections */
4639 list_for_each_entry_rcu(c, &h->list, list) {
4640 if (c->type == type && c->sent) {
4641 BT_ERR("%s killing stalled connection %pMR",
4642 hdev->name, &c->dst);
4643 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4647 rcu_read_unlock();
4650 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4651 int *quote)
4653 struct hci_conn_hash *h = &hdev->conn_hash;
4654 struct hci_chan *chan = NULL;
4655 unsigned int num = 0, min = ~0, cur_prio = 0;
4656 struct hci_conn *conn;
4657 int cnt, q, conn_num = 0;
4659 BT_DBG("%s", hdev->name);
4661 rcu_read_lock();
4663 list_for_each_entry_rcu(conn, &h->list, list) {
4664 struct hci_chan *tmp;
4666 if (conn->type != type)
4667 continue;
4669 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4670 continue;
4672 conn_num++;
4674 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4675 struct sk_buff *skb;
4677 if (skb_queue_empty(&tmp->data_q))
4678 continue;
4680 skb = skb_peek(&tmp->data_q);
4681 if (skb->priority < cur_prio)
4682 continue;
4684 if (skb->priority > cur_prio) {
4685 num = 0;
4686 min = ~0;
4687 cur_prio = skb->priority;
4690 num++;
4692 if (conn->sent < min) {
4693 min = conn->sent;
4694 chan = tmp;
4698 if (hci_conn_num(hdev, type) == conn_num)
4699 break;
4702 rcu_read_unlock();
4704 if (!chan)
4705 return NULL;
4707 switch (chan->conn->type) {
4708 case ACL_LINK:
4709 cnt = hdev->acl_cnt;
4710 break;
4711 case AMP_LINK:
4712 cnt = hdev->block_cnt;
4713 break;
4714 case SCO_LINK:
4715 case ESCO_LINK:
4716 cnt = hdev->sco_cnt;
4717 break;
4718 case LE_LINK:
4719 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4720 break;
4721 default:
4722 cnt = 0;
4723 BT_ERR("Unknown link type");
4726 q = cnt / num;
4727 *quote = q ? q : 1;
4728 BT_DBG("chan %p quote %d", chan, *quote);
4729 return chan;
4732 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4734 struct hci_conn_hash *h = &hdev->conn_hash;
4735 struct hci_conn *conn;
4736 int num = 0;
4738 BT_DBG("%s", hdev->name);
4740 rcu_read_lock();
4742 list_for_each_entry_rcu(conn, &h->list, list) {
4743 struct hci_chan *chan;
4745 if (conn->type != type)
4746 continue;
4748 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4749 continue;
4751 num++;
4753 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4754 struct sk_buff *skb;
4756 if (chan->sent) {
4757 chan->sent = 0;
4758 continue;
4761 if (skb_queue_empty(&chan->data_q))
4762 continue;
4764 skb = skb_peek(&chan->data_q);
4765 if (skb->priority >= HCI_PRIO_MAX - 1)
4766 continue;
4768 skb->priority = HCI_PRIO_MAX - 1;
4770 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4771 skb->priority);
4774 if (hci_conn_num(hdev, type) == num)
4775 break;
4778 rcu_read_unlock();
4782 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4784 /* Calculate count of blocks used by this packet */
4785 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4788 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4790 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4791 /* ACL tx timeout must be longer than maximum
4792 * link supervision timeout (40.9 seconds) */
4793 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4794 HCI_ACL_TX_TIMEOUT))
4795 hci_link_tx_to(hdev, ACL_LINK);
4799 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4801 unsigned int cnt = hdev->acl_cnt;
4802 struct hci_chan *chan;
4803 struct sk_buff *skb;
4804 int quote;
4806 __check_timeout(hdev, cnt);
4808 while (hdev->acl_cnt &&
4809 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4810 u32 priority = (skb_peek(&chan->data_q))->priority;
4811 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4812 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4813 skb->len, skb->priority);
4815 /* Stop if priority has changed */
4816 if (skb->priority < priority)
4817 break;
4819 skb = skb_dequeue(&chan->data_q);
4821 hci_conn_enter_active_mode(chan->conn,
4822 bt_cb(skb)->force_active);
4824 hci_send_frame(hdev, skb);
4825 hdev->acl_last_tx = jiffies;
4827 hdev->acl_cnt--;
4828 chan->sent++;
4829 chan->conn->sent++;
4833 if (cnt != hdev->acl_cnt)
4834 hci_prio_recalculate(hdev, ACL_LINK);
4837 static void hci_sched_acl_blk(struct hci_dev *hdev)
4839 unsigned int cnt = hdev->block_cnt;
4840 struct hci_chan *chan;
4841 struct sk_buff *skb;
4842 int quote;
4843 u8 type;
4845 __check_timeout(hdev, cnt);
4847 BT_DBG("%s", hdev->name);
4849 if (hdev->dev_type == HCI_AMP)
4850 type = AMP_LINK;
4851 else
4852 type = ACL_LINK;
4854 while (hdev->block_cnt > 0 &&
4855 (chan = hci_chan_sent(hdev, type, &quote))) {
4856 u32 priority = (skb_peek(&chan->data_q))->priority;
4857 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4858 int blocks;
4860 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4861 skb->len, skb->priority);
4863 /* Stop if priority has changed */
4864 if (skb->priority < priority)
4865 break;
4867 skb = skb_dequeue(&chan->data_q);
4869 blocks = __get_blocks(hdev, skb);
4870 if (blocks > hdev->block_cnt)
4871 return;
4873 hci_conn_enter_active_mode(chan->conn,
4874 bt_cb(skb)->force_active);
4876 hci_send_frame(hdev, skb);
4877 hdev->acl_last_tx = jiffies;
4879 hdev->block_cnt -= blocks;
4880 quote -= blocks;
4882 chan->sent += blocks;
4883 chan->conn->sent += blocks;
4887 if (cnt != hdev->block_cnt)
4888 hci_prio_recalculate(hdev, type);
4891 static void hci_sched_acl(struct hci_dev *hdev)
4893 BT_DBG("%s", hdev->name);
4895 /* No ACL link over BR/EDR controller */
4896 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4897 return;
4899 /* No AMP link over AMP controller */
4900 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4901 return;
4903 switch (hdev->flow_ctl_mode) {
4904 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4905 hci_sched_acl_pkt(hdev);
4906 break;
4908 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4909 hci_sched_acl_blk(hdev);
4910 break;
4914 /* Schedule SCO */
4915 static void hci_sched_sco(struct hci_dev *hdev)
4917 struct hci_conn *conn;
4918 struct sk_buff *skb;
4919 int quote;
4921 BT_DBG("%s", hdev->name);
4923 if (!hci_conn_num(hdev, SCO_LINK))
4924 return;
4926 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4927 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4928 BT_DBG("skb %p len %d", skb, skb->len);
4929 hci_send_frame(hdev, skb);
4931 conn->sent++;
4932 if (conn->sent == ~0)
4933 conn->sent = 0;
4938 static void hci_sched_esco(struct hci_dev *hdev)
4940 struct hci_conn *conn;
4941 struct sk_buff *skb;
4942 int quote;
4944 BT_DBG("%s", hdev->name);
4946 if (!hci_conn_num(hdev, ESCO_LINK))
4947 return;
4949 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4950 &quote))) {
4951 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4952 BT_DBG("skb %p len %d", skb, skb->len);
4953 hci_send_frame(hdev, skb);
4955 conn->sent++;
4956 if (conn->sent == ~0)
4957 conn->sent = 0;
4962 static void hci_sched_le(struct hci_dev *hdev)
4964 struct hci_chan *chan;
4965 struct sk_buff *skb;
4966 int quote, cnt, tmp;
4968 BT_DBG("%s", hdev->name);
4970 if (!hci_conn_num(hdev, LE_LINK))
4971 return;
4973 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4974 /* LE tx timeout must be longer than maximum
4975 * link supervision timeout (40.9 seconds) */
4976 if (!hdev->le_cnt && hdev->le_pkts &&
4977 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4978 hci_link_tx_to(hdev, LE_LINK);
4981 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4982 tmp = cnt;
4983 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4984 u32 priority = (skb_peek(&chan->data_q))->priority;
4985 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4986 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4987 skb->len, skb->priority);
4989 /* Stop if priority has changed */
4990 if (skb->priority < priority)
4991 break;
4993 skb = skb_dequeue(&chan->data_q);
4995 hci_send_frame(hdev, skb);
4996 hdev->le_last_tx = jiffies;
4998 cnt--;
4999 chan->sent++;
5000 chan->conn->sent++;
5004 if (hdev->le_pkts)
5005 hdev->le_cnt = cnt;
5006 else
5007 hdev->acl_cnt = cnt;
5009 if (cnt != tmp)
5010 hci_prio_recalculate(hdev, LE_LINK);
5013 static void hci_tx_work(struct work_struct *work)
5015 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5016 struct sk_buff *skb;
5018 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5019 hdev->sco_cnt, hdev->le_cnt);
5021 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5022 /* Schedule queues and send stuff to HCI driver */
5023 hci_sched_acl(hdev);
5024 hci_sched_sco(hdev);
5025 hci_sched_esco(hdev);
5026 hci_sched_le(hdev);
5029 /* Send next queued raw (unknown type) packet */
5030 while ((skb = skb_dequeue(&hdev->raw_q)))
5031 hci_send_frame(hdev, skb);
5034 /* ----- HCI RX task (incoming data processing) ----- */
5036 /* ACL data packet */
5037 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5039 struct hci_acl_hdr *hdr = (void *) skb->data;
5040 struct hci_conn *conn;
5041 __u16 handle, flags;
5043 skb_pull(skb, HCI_ACL_HDR_SIZE);
5045 handle = __le16_to_cpu(hdr->handle);
5046 flags = hci_flags(handle);
5047 handle = hci_handle(handle);
5049 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5050 handle, flags);
5052 hdev->stat.acl_rx++;
5054 hci_dev_lock(hdev);
5055 conn = hci_conn_hash_lookup_handle(hdev, handle);
5056 hci_dev_unlock(hdev);
5058 if (conn) {
5059 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5061 /* Send to upper protocol */
5062 l2cap_recv_acldata(conn, skb, flags);
5063 return;
5064 } else {
5065 BT_ERR("%s ACL packet for unknown connection handle %d",
5066 hdev->name, handle);
5069 kfree_skb(skb);
5072 /* SCO data packet */
5073 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5075 struct hci_sco_hdr *hdr = (void *) skb->data;
5076 struct hci_conn *conn;
5077 __u16 handle;
5079 skb_pull(skb, HCI_SCO_HDR_SIZE);
5081 handle = __le16_to_cpu(hdr->handle);
5083 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5085 hdev->stat.sco_rx++;
5087 hci_dev_lock(hdev);
5088 conn = hci_conn_hash_lookup_handle(hdev, handle);
5089 hci_dev_unlock(hdev);
5091 if (conn) {
5092 /* Send to upper protocol */
5093 sco_recv_scodata(conn, skb);
5094 return;
5095 } else {
5096 BT_ERR("%s SCO packet for unknown connection handle %d",
5097 hdev->name, handle);
5100 kfree_skb(skb);
5103 static bool hci_req_is_complete(struct hci_dev *hdev)
5105 struct sk_buff *skb;
5107 skb = skb_peek(&hdev->cmd_q);
5108 if (!skb)
5109 return true;
5111 return bt_cb(skb)->req.start;
5114 static void hci_resend_last(struct hci_dev *hdev)
5116 struct hci_command_hdr *sent;
5117 struct sk_buff *skb;
5118 u16 opcode;
5120 if (!hdev->sent_cmd)
5121 return;
5123 sent = (void *) hdev->sent_cmd->data;
5124 opcode = __le16_to_cpu(sent->opcode);
5125 if (opcode == HCI_OP_RESET)
5126 return;
5128 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5129 if (!skb)
5130 return;
5132 skb_queue_head(&hdev->cmd_q, skb);
5133 queue_work(hdev->workqueue, &hdev->cmd_work);
5136 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5138 hci_req_complete_t req_complete = NULL;
5139 struct sk_buff *skb;
5140 unsigned long flags;
5142 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5144 /* If the completed command doesn't match the last one that was
5145 * sent we need to do special handling of it.
5147 if (!hci_sent_cmd_data(hdev, opcode)) {
5148 /* Some CSR based controllers generate a spontaneous
5149 * reset complete event during init and any pending
5150 * command will never be completed. In such a case we
5151 * need to resend whatever was the last sent
5152 * command.
5154 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5155 hci_resend_last(hdev);
5157 return;
5160 /* If the command succeeded and there's still more commands in
5161 * this request the request is not yet complete.
5163 if (!status && !hci_req_is_complete(hdev))
5164 return;
5166 /* If this was the last command in a request the complete
5167 * callback would be found in hdev->sent_cmd instead of the
5168 * command queue (hdev->cmd_q).
5170 if (hdev->sent_cmd) {
5171 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5173 if (req_complete) {
5174 /* We must set the complete callback to NULL to
5175 * avoid calling the callback more than once if
5176 * this function gets called again.
5178 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5180 goto call_complete;
5184 /* Remove all pending commands belonging to this request */
5185 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5186 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5187 if (bt_cb(skb)->req.start) {
5188 __skb_queue_head(&hdev->cmd_q, skb);
5189 break;
5192 req_complete = bt_cb(skb)->req.complete;
5193 kfree_skb(skb);
5195 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5197 call_complete:
5198 if (req_complete)
5199 req_complete(hdev, status);
5202 static void hci_rx_work(struct work_struct *work)
5204 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5205 struct sk_buff *skb;
5207 BT_DBG("%s", hdev->name);
5209 while ((skb = skb_dequeue(&hdev->rx_q))) {
5210 /* Send copy to monitor */
5211 hci_send_to_monitor(hdev, skb);
5213 if (atomic_read(&hdev->promisc)) {
5214 /* Send copy to the sockets */
5215 hci_send_to_sock(hdev, skb);
5218 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5219 kfree_skb(skb);
5220 continue;
5223 if (test_bit(HCI_INIT, &hdev->flags)) {
5224 /* Don't process data packets in this states. */
5225 switch (bt_cb(skb)->pkt_type) {
5226 case HCI_ACLDATA_PKT:
5227 case HCI_SCODATA_PKT:
5228 kfree_skb(skb);
5229 continue;
5233 /* Process frame */
5234 switch (bt_cb(skb)->pkt_type) {
5235 case HCI_EVENT_PKT:
5236 BT_DBG("%s Event packet", hdev->name);
5237 hci_event_packet(hdev, skb);
5238 break;
5240 case HCI_ACLDATA_PKT:
5241 BT_DBG("%s ACL data packet", hdev->name);
5242 hci_acldata_packet(hdev, skb);
5243 break;
5245 case HCI_SCODATA_PKT:
5246 BT_DBG("%s SCO data packet", hdev->name);
5247 hci_scodata_packet(hdev, skb);
5248 break;
5250 default:
5251 kfree_skb(skb);
5252 break;
5257 static void hci_cmd_work(struct work_struct *work)
5259 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5260 struct sk_buff *skb;
5262 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5263 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5265 /* Send queued commands */
5266 if (atomic_read(&hdev->cmd_cnt)) {
5267 skb = skb_dequeue(&hdev->cmd_q);
5268 if (!skb)
5269 return;
5271 kfree_skb(hdev->sent_cmd);
5273 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5274 if (hdev->sent_cmd) {
5275 atomic_dec(&hdev->cmd_cnt);
5276 hci_send_frame(hdev, skb);
5277 if (test_bit(HCI_RESET, &hdev->flags))
5278 cancel_delayed_work(&hdev->cmd_timer);
5279 else
5280 schedule_delayed_work(&hdev->cmd_timer,
5281 HCI_CMD_TIMEOUT);
5282 } else {
5283 skb_queue_head(&hdev->cmd_q, skb);
5284 queue_work(hdev->workqueue, &hdev->cmd_work);
5289 void hci_req_add_le_scan_disable(struct hci_request *req)
5291 struct hci_cp_le_set_scan_enable cp;
5293 memset(&cp, 0, sizeof(cp));
5294 cp.enable = LE_SCAN_DISABLE;
5295 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5298 void hci_req_add_le_passive_scan(struct hci_request *req)
5300 struct hci_cp_le_set_scan_param param_cp;
5301 struct hci_cp_le_set_scan_enable enable_cp;
5302 struct hci_dev *hdev = req->hdev;
5303 u8 own_addr_type;
5305 /* Set require_privacy to false since no SCAN_REQ are send
5306 * during passive scanning. Not using an unresolvable address
5307 * here is important so that peer devices using direct
5308 * advertising with our address will be correctly reported
5309 * by the controller.
5311 if (hci_update_random_address(req, false, &own_addr_type))
5312 return;
5314 memset(&param_cp, 0, sizeof(param_cp));
5315 param_cp.type = LE_SCAN_PASSIVE;
5316 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5317 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5318 param_cp.own_address_type = own_addr_type;
5319 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5320 &param_cp);
5322 memset(&enable_cp, 0, sizeof(enable_cp));
5323 enable_cp.enable = LE_SCAN_ENABLE;
5324 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5325 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5326 &enable_cp);
5329 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5331 if (status)
5332 BT_DBG("HCI request failed to update background scanning: "
5333 "status 0x%2.2x", status);
5336 /* This function controls the background scanning based on hdev->pend_le_conns
5337 * list. If there are pending LE connection we start the background scanning,
5338 * otherwise we stop it.
5340 * This function requires the caller holds hdev->lock.
5342 void hci_update_background_scan(struct hci_dev *hdev)
5344 struct hci_request req;
5345 struct hci_conn *conn;
5346 int err;
5348 if (!test_bit(HCI_UP, &hdev->flags) ||
5349 test_bit(HCI_INIT, &hdev->flags) ||
5350 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5351 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5352 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5353 return;
5355 hci_req_init(&req, hdev);
5357 if (list_empty(&hdev->pend_le_conns) &&
5358 list_empty(&hdev->pend_le_reports)) {
5359 /* If there is no pending LE connections or devices
5360 * to be scanned for, we should stop the background
5361 * scanning.
5364 /* If controller is not scanning we are done. */
5365 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5366 return;
5368 hci_req_add_le_scan_disable(&req);
5370 BT_DBG("%s stopping background scanning", hdev->name);
5371 } else {
5372 /* If there is at least one pending LE connection, we should
5373 * keep the background scan running.
5376 /* If controller is connecting, we should not start scanning
5377 * since some controllers are not able to scan and connect at
5378 * the same time.
5380 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5381 if (conn)
5382 return;
5384 /* If controller is currently scanning, we stop it to ensure we
5385 * don't miss any advertising (due to duplicates filter).
5387 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5388 hci_req_add_le_scan_disable(&req);
5390 hci_req_add_le_passive_scan(&req);
5392 BT_DBG("%s starting background scanning", hdev->name);
5395 err = hci_req_run(&req, update_background_scan_complete);
5396 if (err)
5397 BT_ERR("Failed to run HCI request: err %d", err);