Bluetooth: Add support for Unconfigured Index Added events
[linux-2.6/btrfs-unstable.git] / net / bluetooth / hci_core.c
blobdf25a8329ecc71055d7aba62459fc97923364ffc
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "smp.h"
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ---- HCI notifications ---- */
59 static void hci_notify(struct hci_dev *hdev, int event)
61 hci_sock_dev_event(hdev, event);
64 /* ---- HCI debugfs entries ---- */
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
99 return -EALREADY;
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
116 if (err < 0)
117 return err;
119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
121 return count;
124 static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
131 static int features_show(struct seq_file *f, void *ptr)
133 struct hci_dev *hdev = f->private;
134 u8 p;
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
152 hci_dev_unlock(hdev);
154 return 0;
157 static int features_open(struct inode *inode, struct file *file)
159 return single_open(file, features_show, inode->i_private);
162 static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
169 static int blacklist_show(struct seq_file *f, void *p)
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177 hci_dev_unlock(hdev);
179 return 0;
182 static int blacklist_open(struct inode *inode, struct file *file)
184 return single_open(file, blacklist_show, inode->i_private);
187 static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
194 static int uuids_show(struct seq_file *f, void *p)
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
201 u8 i, val[16];
203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
210 seq_printf(f, "%pUb\n", val);
212 hci_dev_unlock(hdev);
214 return 0;
217 static int uuids_open(struct inode *inode, struct file *file)
219 return single_open(file, uuids_show, inode->i_private);
222 static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
229 static int inquiry_cache_show(struct seq_file *f, void *p)
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
235 hci_dev_lock(hdev);
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
248 hci_dev_unlock(hdev);
250 return 0;
253 static int inquiry_cache_open(struct inode *inode, struct file *file)
255 return single_open(file, inquiry_cache_show, inode->i_private);
258 static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
265 static int link_keys_show(struct seq_file *f, void *ptr)
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
276 hci_dev_unlock(hdev);
278 return 0;
281 static int link_keys_open(struct inode *inode, struct file *file)
283 return single_open(file, link_keys_show, inode->i_private);
286 static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
293 static int dev_class_show(struct seq_file *f, void *ptr)
295 struct hci_dev *hdev = f->private;
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
302 return 0;
305 static int dev_class_open(struct inode *inode, struct file *file)
307 return single_open(file, dev_class_show, inode->i_private);
310 static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
317 static int voice_setting_get(void *data, u64 *val)
319 struct hci_dev *hdev = data;
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
325 return 0;
328 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
331 static int auto_accept_delay_set(void *data, u64 val)
333 struct hci_dev *hdev = data;
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
339 return 0;
342 static int auto_accept_delay_get(void *data, u64 *val)
344 struct hci_dev *hdev = data;
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
350 return 0;
353 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
356 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
368 static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
388 return -EALREADY;
390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
392 return count;
395 static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
402 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
414 static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
420 static int idle_timeout_set(void *data, u64 val)
422 struct hci_dev *hdev = data;
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
427 hci_dev_lock(hdev);
428 hdev->idle_timeout = val;
429 hci_dev_unlock(hdev);
431 return 0;
434 static int idle_timeout_get(void *data, u64 *val)
436 struct hci_dev *hdev = data;
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
442 return 0;
445 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
448 static int rpa_timeout_set(void *data, u64 val)
450 struct hci_dev *hdev = data;
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
462 return 0;
465 static int rpa_timeout_get(void *data, u64 *val)
467 struct hci_dev *hdev = data;
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
473 return 0;
476 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
479 static int sniff_min_interval_set(void *data, u64 val)
481 struct hci_dev *hdev = data;
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
486 hci_dev_lock(hdev);
487 hdev->sniff_min_interval = val;
488 hci_dev_unlock(hdev);
490 return 0;
493 static int sniff_min_interval_get(void *data, u64 *val)
495 struct hci_dev *hdev = data;
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
501 return 0;
504 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
507 static int sniff_max_interval_set(void *data, u64 val)
509 struct hci_dev *hdev = data;
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
514 hci_dev_lock(hdev);
515 hdev->sniff_max_interval = val;
516 hci_dev_unlock(hdev);
518 return 0;
521 static int sniff_max_interval_get(void *data, u64 *val)
523 struct hci_dev *hdev = data;
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
529 return 0;
532 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
535 static int conn_info_min_age_set(void *data, u64 val)
537 struct hci_dev *hdev = data;
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
546 return 0;
549 static int conn_info_min_age_get(void *data, u64 *val)
551 struct hci_dev *hdev = data;
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
557 return 0;
560 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
563 static int conn_info_max_age_set(void *data, u64 val)
565 struct hci_dev *hdev = data;
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
574 return 0;
577 static int conn_info_max_age_get(void *data, u64 *val)
579 struct hci_dev *hdev = data;
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
585 return 0;
588 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
591 static int identity_show(struct seq_file *f, void *p)
593 struct hci_dev *hdev = f->private;
594 bdaddr_t addr;
595 u8 addr_type;
597 hci_dev_lock(hdev);
599 hci_copy_identity_address(hdev, &addr, &addr_type);
601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
602 16, hdev->irk, &hdev->rpa);
604 hci_dev_unlock(hdev);
606 return 0;
609 static int identity_open(struct inode *inode, struct file *file)
611 return single_open(file, identity_show, inode->i_private);
614 static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
621 static int random_address_show(struct seq_file *f, void *p)
623 struct hci_dev *hdev = f->private;
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
629 return 0;
632 static int random_address_open(struct inode *inode, struct file *file)
634 return single_open(file, random_address_show, inode->i_private);
637 static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
644 static int static_address_show(struct seq_file *f, void *p)
646 struct hci_dev *hdev = f->private;
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
652 return 0;
655 static int static_address_open(struct inode *inode, struct file *file)
657 return single_open(file, static_address_show, inode->i_private);
660 static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
667 static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
680 static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
697 return -EINVAL;
699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
700 return -EALREADY;
702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
704 return count;
707 static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
714 static int white_list_show(struct seq_file *f, void *ptr)
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
724 return 0;
727 static int white_list_open(struct inode *inode, struct file *file)
729 return single_open(file, white_list_show, inode->i_private);
732 static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
739 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
751 hci_dev_unlock(hdev);
753 return 0;
756 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
762 static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
769 static int long_term_keys_show(struct seq_file *f, void *ptr)
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
774 hci_dev_lock(hdev);
775 list_for_each_safe(p, n, &hdev->long_term_keys) {
776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
780 __le64_to_cpu(ltk->rand), 16, ltk->val);
782 hci_dev_unlock(hdev);
784 return 0;
787 static int long_term_keys_open(struct inode *inode, struct file *file)
789 return single_open(file, long_term_keys_show, inode->i_private);
792 static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
799 static int conn_min_interval_set(void *data, u64 val)
801 struct hci_dev *hdev = data;
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
806 hci_dev_lock(hdev);
807 hdev->le_conn_min_interval = val;
808 hci_dev_unlock(hdev);
810 return 0;
813 static int conn_min_interval_get(void *data, u64 *val)
815 struct hci_dev *hdev = data;
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
821 return 0;
824 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
827 static int conn_max_interval_set(void *data, u64 val)
829 struct hci_dev *hdev = data;
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
834 hci_dev_lock(hdev);
835 hdev->le_conn_max_interval = val;
836 hci_dev_unlock(hdev);
838 return 0;
841 static int conn_max_interval_get(void *data, u64 *val)
843 struct hci_dev *hdev = data;
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
849 return 0;
852 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
855 static int conn_latency_set(void *data, u64 val)
857 struct hci_dev *hdev = data;
859 if (val > 0x01f3)
860 return -EINVAL;
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
866 return 0;
869 static int conn_latency_get(void *data, u64 *val)
871 struct hci_dev *hdev = data;
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
877 return 0;
880 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
883 static int supervision_timeout_set(void *data, u64 val)
885 struct hci_dev *hdev = data;
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
894 return 0;
897 static int supervision_timeout_get(void *data, u64 *val)
899 struct hci_dev *hdev = data;
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
905 return 0;
908 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
911 static int adv_channel_map_set(void *data, u64 val)
913 struct hci_dev *hdev = data;
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
922 return 0;
925 static int adv_channel_map_get(void *data, u64 *val)
927 struct hci_dev *hdev = data;
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
933 return 0;
936 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
939 static int device_list_show(struct seq_file *f, void *ptr)
941 struct hci_dev *hdev = f->private;
942 struct hci_conn_params *p;
944 hci_dev_lock(hdev);
945 list_for_each_entry(p, &hdev->le_conn_params, list) {
946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
947 p->auto_connect);
949 hci_dev_unlock(hdev);
951 return 0;
954 static int device_list_open(struct inode *inode, struct file *file)
956 return single_open(file, device_list_show, inode->i_private);
959 static const struct file_operations device_list_fops = {
960 .open = device_list_open,
961 .read = seq_read,
962 .llseek = seq_lseek,
963 .release = single_release,
966 /* ---- HCI requests ---- */
968 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
979 static void hci_req_cancel(struct hci_dev *hdev, int err)
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
990 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
997 hci_dev_lock(hdev);
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1002 hci_dev_unlock(hdev);
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1040 failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1045 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1046 const void *param, u8 event, u32 timeout)
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1052 BT_DBG("%s", hdev->name);
1054 hci_req_init(&req, hdev);
1056 hci_req_add_ev(&req, opcode, plen, param, event);
1058 hdev->req_status = HCI_REQ_PEND;
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1067 schedule_timeout(timeout);
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1088 hdev->req_status = hdev->req_result = 0;
1090 BT_DBG("%s end: err %d", hdev->name, err);
1092 if (err < 0)
1093 return ERR_PTR(err);
1095 return hci_get_cmd_complete(hdev, opcode, event);
1097 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1099 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1100 const void *param, u32 timeout)
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1104 EXPORT_SYMBOL(__hci_cmd_sync);
1106 /* Execute request and wait for completion. */
1107 static int __hci_req_sync(struct hci_dev *hdev,
1108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
1110 unsigned long opt, __u32 timeout)
1112 struct hci_request req;
1113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1116 BT_DBG("%s start", hdev->name);
1118 hci_req_init(&req, hdev);
1120 hdev->req_status = HCI_REQ_PEND;
1122 func(&req, opt);
1124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
1126 hdev->req_status = 0;
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
1133 if (err == -ENODATA)
1134 return 0;
1136 return err;
1139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1142 schedule_timeout(timeout);
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1146 if (signal_pending(current))
1147 return -EINTR;
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
1151 err = -bt_to_errno(hdev->req_result);
1152 break;
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
1163 hdev->req_status = hdev->req_result = 0;
1165 BT_DBG("%s end: err %d", hdev->name, err);
1167 return err;
1170 static int hci_req_sync(struct hci_dev *hdev,
1171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
1173 unsigned long opt, __u32 timeout)
1175 int ret;
1177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1180 /* Serialize all requests */
1181 hci_req_lock(hdev);
1182 ret = __hci_req_sync(hdev, req, opt, timeout);
1183 hci_req_unlock(hdev);
1185 return ret;
1188 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1190 BT_DBG("%s %ld", req->hdev->name, opt);
1192 /* Reset device */
1193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1197 static void bredr_init(struct hci_request *req)
1199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1201 /* Read Local Supported Features */
1202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1204 /* Read Local Version */
1205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1207 /* Read BD Address */
1208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1211 static void amp_init(struct hci_request *req)
1213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1215 /* Read Local Version */
1216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1224 /* Read Local AMP Info */
1225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1227 /* Read Data Blk size */
1228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1237 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1239 struct hci_dev *hdev = req->hdev;
1241 BT_DBG("%s %ld", hdev->name, opt);
1243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1245 hci_reset_req(req, 0);
1247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
1249 bredr_init(req);
1250 break;
1252 case HCI_AMP:
1253 amp_init(req);
1254 break;
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1262 static void bredr_setup(struct hci_request *req)
1264 struct hci_dev *hdev = req->hdev;
1266 __le16 param;
1267 __u8 flt_type;
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1272 /* Read Class of Device */
1273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1275 /* Read Local Name */
1276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1278 /* Read Voice Setting */
1279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
1289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1291 /* Connection accept timeout ~20 secs */
1292 param = cpu_to_le16(0x7d00);
1293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1304 static void le_setup(struct hci_request *req)
1306 struct hci_dev *hdev = req->hdev;
1308 /* Read LE Buffer Size */
1309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1311 /* Read LE Local Supported Features */
1312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1317 /* Read LE Advertising Channel TX Power */
1318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1320 /* Read LE White List Size */
1321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1331 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1356 return 0x00;
1359 static void hci_setup_inquiry_mode(struct hci_request *req)
1361 u8 mode;
1363 mode = hci_get_inquiry_mode(req->hdev);
1365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1368 static void hci_setup_event_mask(struct hci_request *req)
1370 struct hci_dev *hdev = req->hdev;
1372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
1390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1442 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1444 struct hci_dev *hdev = req->hdev;
1446 if (lmp_bredr_capable(hdev))
1447 bredr_setup(req);
1448 else
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1451 if (lmp_le_capable(hdev))
1452 le_setup(req);
1454 hci_setup_event_mask(req);
1456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1462 if (lmp_ssp_capable(hdev)) {
1463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1469 hdev->max_page = 0x01;
1471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472 u8 mode = 0x01;
1473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
1475 } else {
1476 struct hci_cp_write_eir cp;
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1485 if (lmp_inq_rssi_capable(hdev))
1486 hci_setup_inquiry_mode(req);
1488 if (lmp_inq_tx_pwr_capable(hdev))
1489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1494 cp.page = 0x01;
1495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496 sizeof(cp), &cp);
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500 u8 enable = 1;
1501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502 &enable);
1506 static void hci_setup_link_policy(struct hci_request *req)
1508 struct hci_dev *hdev = req->hdev;
1509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1521 cp.policy = cpu_to_le16(link_policy);
1522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1525 static void hci_set_le_support(struct hci_request *req)
1527 struct hci_dev *hdev = req->hdev;
1528 struct hci_cp_write_le_host_supported cp;
1530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1532 return;
1534 memset(&cp, 0, sizeof(cp));
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537 cp.le = 0x01;
1538 cp.simul = lmp_le_br_capable(hdev);
1541 if (cp.le != lmp_host_le_capable(hdev))
1542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543 &cp);
1546 static void hci_set_event_mask_page_2(struct hci_request *req)
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1554 if (lmp_csb_master_capable(hdev)) {
1555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1564 if (lmp_csb_slave_capable(hdev)) {
1565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1573 events[2] |= 0x80;
1575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1578 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1580 struct hci_dev *hdev = req->hdev;
1581 u8 p;
1583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
1596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1598 struct hci_cp_delete_stored_link_key cp;
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603 sizeof(cp), &cp);
1606 if (hdev->commands[5] & 0x10)
1607 hci_setup_link_policy(req);
1609 if (lmp_le_capable(hdev)) {
1610 u8 events[8];
1612 memset(events, 0, sizeof(events));
1613 events[0] = 0x1f;
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1620 * Parameter Request
1623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624 events);
1626 hci_set_le_support(req);
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1633 cp.page = p;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635 sizeof(cp), &cp);
1639 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1641 struct hci_dev *hdev = req->hdev;
1643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1647 /* Check for Synchronization Train support */
1648 if (lmp_sync_train_capable(hdev))
1649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1651 /* Enable Secure Connections if supported and configured */
1652 if ((lmp_sc_capable(hdev) ||
1653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655 u8 support = 0x01;
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1661 static int __hci_init(struct hci_dev *hdev)
1663 int err;
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666 if (err < 0)
1667 return err;
1669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674 &dut_mode_fops);
1677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1679 * first stage init.
1681 if (hdev->dev_type != HCI_BREDR)
1682 return 0;
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685 if (err < 0)
1686 return err;
1688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689 if (err < 0)
1690 return err;
1692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693 if (err < 0)
1694 return err;
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700 return 0;
1702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703 &features_fops);
1704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709 &blacklist_fops);
1710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
1720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
1722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
1724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
1728 if (lmp_ssp_capable(hdev)) {
1729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
1731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
1733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
1737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1746 if (lmp_le_capable(hdev)) {
1747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
1751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
1753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
1767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768 &white_list_fops);
1769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
1772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
1774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
1778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
1780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
1782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
1784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785 &device_list_fops);
1786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787 hdev->debugfs,
1788 &hdev->discov_interleaved_timeout);
1791 return 0;
1794 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1796 __u8 scan = opt;
1798 BT_DBG("%s %x", req->hdev->name, scan);
1800 /* Inquiry and Page scans */
1801 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1804 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1806 __u8 auth = opt;
1808 BT_DBG("%s %x", req->hdev->name, auth);
1810 /* Authentication */
1811 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1814 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1816 __u8 encrypt = opt;
1818 BT_DBG("%s %x", req->hdev->name, encrypt);
1820 /* Encryption */
1821 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1824 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1826 __le16 policy = cpu_to_le16(opt);
1828 BT_DBG("%s %x", req->hdev->name, policy);
1830 /* Default link policy */
1831 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1834 /* Get HCI device by index.
1835 * Device is held on return. */
1836 struct hci_dev *hci_dev_get(int index)
1838 struct hci_dev *hdev = NULL, *d;
1840 BT_DBG("%d", index);
1842 if (index < 0)
1843 return NULL;
1845 read_lock(&hci_dev_list_lock);
1846 list_for_each_entry(d, &hci_dev_list, list) {
1847 if (d->id == index) {
1848 hdev = hci_dev_hold(d);
1849 break;
1852 read_unlock(&hci_dev_list_lock);
1853 return hdev;
1856 /* ---- Inquiry support ---- */
1858 bool hci_discovery_active(struct hci_dev *hdev)
1860 struct discovery_state *discov = &hdev->discovery;
1862 switch (discov->state) {
1863 case DISCOVERY_FINDING:
1864 case DISCOVERY_RESOLVING:
1865 return true;
1867 default:
1868 return false;
1872 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1874 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1876 if (hdev->discovery.state == state)
1877 return;
1879 switch (state) {
1880 case DISCOVERY_STOPPED:
1881 hci_update_background_scan(hdev);
1883 if (hdev->discovery.state != DISCOVERY_STARTING)
1884 mgmt_discovering(hdev, 0);
1885 break;
1886 case DISCOVERY_STARTING:
1887 break;
1888 case DISCOVERY_FINDING:
1889 mgmt_discovering(hdev, 1);
1890 break;
1891 case DISCOVERY_RESOLVING:
1892 break;
1893 case DISCOVERY_STOPPING:
1894 break;
1897 hdev->discovery.state = state;
1900 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1902 struct discovery_state *cache = &hdev->discovery;
1903 struct inquiry_entry *p, *n;
1905 list_for_each_entry_safe(p, n, &cache->all, all) {
1906 list_del(&p->all);
1907 kfree(p);
1910 INIT_LIST_HEAD(&cache->unknown);
1911 INIT_LIST_HEAD(&cache->resolve);
1914 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1915 bdaddr_t *bdaddr)
1917 struct discovery_state *cache = &hdev->discovery;
1918 struct inquiry_entry *e;
1920 BT_DBG("cache %p, %pMR", cache, bdaddr);
1922 list_for_each_entry(e, &cache->all, all) {
1923 if (!bacmp(&e->data.bdaddr, bdaddr))
1924 return e;
1927 return NULL;
1930 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1931 bdaddr_t *bdaddr)
1933 struct discovery_state *cache = &hdev->discovery;
1934 struct inquiry_entry *e;
1936 BT_DBG("cache %p, %pMR", cache, bdaddr);
1938 list_for_each_entry(e, &cache->unknown, list) {
1939 if (!bacmp(&e->data.bdaddr, bdaddr))
1940 return e;
1943 return NULL;
1946 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1947 bdaddr_t *bdaddr,
1948 int state)
1950 struct discovery_state *cache = &hdev->discovery;
1951 struct inquiry_entry *e;
1953 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1955 list_for_each_entry(e, &cache->resolve, list) {
1956 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1957 return e;
1958 if (!bacmp(&e->data.bdaddr, bdaddr))
1959 return e;
1962 return NULL;
1965 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1966 struct inquiry_entry *ie)
1968 struct discovery_state *cache = &hdev->discovery;
1969 struct list_head *pos = &cache->resolve;
1970 struct inquiry_entry *p;
1972 list_del(&ie->list);
1974 list_for_each_entry(p, &cache->resolve, list) {
1975 if (p->name_state != NAME_PENDING &&
1976 abs(p->data.rssi) >= abs(ie->data.rssi))
1977 break;
1978 pos = &p->list;
1981 list_add(&ie->list, pos);
1984 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1985 bool name_known)
1987 struct discovery_state *cache = &hdev->discovery;
1988 struct inquiry_entry *ie;
1989 u32 flags = 0;
1991 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1993 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1995 if (!data->ssp_mode)
1996 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1998 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1999 if (ie) {
2000 if (!ie->data.ssp_mode)
2001 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2003 if (ie->name_state == NAME_NEEDED &&
2004 data->rssi != ie->data.rssi) {
2005 ie->data.rssi = data->rssi;
2006 hci_inquiry_cache_update_resolve(hdev, ie);
2009 goto update;
2012 /* Entry not in the cache. Add new one. */
2013 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2014 if (!ie) {
2015 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2016 goto done;
2019 list_add(&ie->all, &cache->all);
2021 if (name_known) {
2022 ie->name_state = NAME_KNOWN;
2023 } else {
2024 ie->name_state = NAME_NOT_KNOWN;
2025 list_add(&ie->list, &cache->unknown);
2028 update:
2029 if (name_known && ie->name_state != NAME_KNOWN &&
2030 ie->name_state != NAME_PENDING) {
2031 ie->name_state = NAME_KNOWN;
2032 list_del(&ie->list);
2035 memcpy(&ie->data, data, sizeof(*data));
2036 ie->timestamp = jiffies;
2037 cache->timestamp = jiffies;
2039 if (ie->name_state == NAME_NOT_KNOWN)
2040 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2042 done:
2043 return flags;
2046 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2048 struct discovery_state *cache = &hdev->discovery;
2049 struct inquiry_info *info = (struct inquiry_info *) buf;
2050 struct inquiry_entry *e;
2051 int copied = 0;
2053 list_for_each_entry(e, &cache->all, all) {
2054 struct inquiry_data *data = &e->data;
2056 if (copied >= num)
2057 break;
2059 bacpy(&info->bdaddr, &data->bdaddr);
2060 info->pscan_rep_mode = data->pscan_rep_mode;
2061 info->pscan_period_mode = data->pscan_period_mode;
2062 info->pscan_mode = data->pscan_mode;
2063 memcpy(info->dev_class, data->dev_class, 3);
2064 info->clock_offset = data->clock_offset;
2066 info++;
2067 copied++;
2070 BT_DBG("cache %p, copied %d", cache, copied);
2071 return copied;
2074 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2076 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2077 struct hci_dev *hdev = req->hdev;
2078 struct hci_cp_inquiry cp;
2080 BT_DBG("%s", hdev->name);
2082 if (test_bit(HCI_INQUIRY, &hdev->flags))
2083 return;
2085 /* Start Inquiry */
2086 memcpy(&cp.lap, &ir->lap, 3);
2087 cp.length = ir->length;
2088 cp.num_rsp = ir->num_rsp;
2089 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2092 static int wait_inquiry(void *word)
2094 schedule();
2095 return signal_pending(current);
2098 int hci_inquiry(void __user *arg)
2100 __u8 __user *ptr = arg;
2101 struct hci_inquiry_req ir;
2102 struct hci_dev *hdev;
2103 int err = 0, do_inquiry = 0, max_rsp;
2104 long timeo;
2105 __u8 *buf;
2107 if (copy_from_user(&ir, ptr, sizeof(ir)))
2108 return -EFAULT;
2110 hdev = hci_dev_get(ir.dev_id);
2111 if (!hdev)
2112 return -ENODEV;
2114 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2115 err = -EBUSY;
2116 goto done;
2119 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2120 err = -EOPNOTSUPP;
2121 goto done;
2124 if (hdev->dev_type != HCI_BREDR) {
2125 err = -EOPNOTSUPP;
2126 goto done;
2129 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2130 err = -EOPNOTSUPP;
2131 goto done;
2134 hci_dev_lock(hdev);
2135 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2136 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2137 hci_inquiry_cache_flush(hdev);
2138 do_inquiry = 1;
2140 hci_dev_unlock(hdev);
2142 timeo = ir.length * msecs_to_jiffies(2000);
2144 if (do_inquiry) {
2145 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2146 timeo);
2147 if (err < 0)
2148 goto done;
2150 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2151 * cleared). If it is interrupted by a signal, return -EINTR.
2153 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2154 TASK_INTERRUPTIBLE))
2155 return -EINTR;
2158 /* for unlimited number of responses we will use buffer with
2159 * 255 entries
2161 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2163 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2164 * copy it to the user space.
2166 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2167 if (!buf) {
2168 err = -ENOMEM;
2169 goto done;
2172 hci_dev_lock(hdev);
2173 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2174 hci_dev_unlock(hdev);
2176 BT_DBG("num_rsp %d", ir.num_rsp);
2178 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2179 ptr += sizeof(ir);
2180 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2181 ir.num_rsp))
2182 err = -EFAULT;
2183 } else
2184 err = -EFAULT;
2186 kfree(buf);
2188 done:
2189 hci_dev_put(hdev);
2190 return err;
2193 static int hci_dev_do_open(struct hci_dev *hdev)
2195 int ret = 0;
2197 BT_DBG("%s %p", hdev->name, hdev);
2199 hci_req_lock(hdev);
2201 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2202 ret = -ENODEV;
2203 goto done;
2206 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2207 /* Check for rfkill but allow the HCI setup stage to
2208 * proceed (which in itself doesn't cause any RF activity).
2210 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2211 ret = -ERFKILL;
2212 goto done;
2215 /* Check for valid public address or a configured static
2216 * random adddress, but let the HCI setup proceed to
2217 * be able to determine if there is a public address
2218 * or not.
2220 * In case of user channel usage, it is not important
2221 * if a public address or static random address is
2222 * available.
2224 * This check is only valid for BR/EDR controllers
2225 * since AMP controllers do not have an address.
2227 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2228 hdev->dev_type == HCI_BREDR &&
2229 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2230 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2231 ret = -EADDRNOTAVAIL;
2232 goto done;
2236 if (test_bit(HCI_UP, &hdev->flags)) {
2237 ret = -EALREADY;
2238 goto done;
2241 if (hdev->open(hdev)) {
2242 ret = -EIO;
2243 goto done;
2246 atomic_set(&hdev->cmd_cnt, 1);
2247 set_bit(HCI_INIT, &hdev->flags);
2249 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2250 ret = hdev->setup(hdev);
2252 /* If public address change is configured, ensure that the
2253 * address gets programmed. If the driver does not support
2254 * changing the public address, fail the power on procedure.
2256 if (!ret && bacmp(&hdev->public_addr, BDADDR_ANY)) {
2257 if (hdev->set_bdaddr)
2258 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2259 else
2260 ret = -EADDRNOTAVAIL;
2263 if (!ret) {
2264 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2265 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2266 ret = __hci_init(hdev);
2269 clear_bit(HCI_INIT, &hdev->flags);
2271 if (!ret) {
2272 hci_dev_hold(hdev);
2273 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2274 set_bit(HCI_UP, &hdev->flags);
2275 hci_notify(hdev, HCI_DEV_UP);
2276 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2277 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2278 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2279 hdev->dev_type == HCI_BREDR) {
2280 hci_dev_lock(hdev);
2281 mgmt_powered(hdev, 1);
2282 hci_dev_unlock(hdev);
2284 } else {
2285 /* Init failed, cleanup */
2286 flush_work(&hdev->tx_work);
2287 flush_work(&hdev->cmd_work);
2288 flush_work(&hdev->rx_work);
2290 skb_queue_purge(&hdev->cmd_q);
2291 skb_queue_purge(&hdev->rx_q);
2293 if (hdev->flush)
2294 hdev->flush(hdev);
2296 if (hdev->sent_cmd) {
2297 kfree_skb(hdev->sent_cmd);
2298 hdev->sent_cmd = NULL;
2301 hdev->close(hdev);
2302 hdev->flags &= BIT(HCI_RAW);
2305 done:
2306 hci_req_unlock(hdev);
2307 return ret;
2310 /* ---- HCI ioctl helpers ---- */
2312 int hci_dev_open(__u16 dev)
2314 struct hci_dev *hdev;
2315 int err;
2317 hdev = hci_dev_get(dev);
2318 if (!hdev)
2319 return -ENODEV;
2321 /* Devices that are marked as unconfigured can only be powered
2322 * up as user channel. Trying to bring them up as normal devices
2323 * will result into a failure. Only user channel operation is
2324 * possible.
2326 * When this function is called for a user channel, the flag
2327 * HCI_USER_CHANNEL will be set first before attempting to
2328 * open the device.
2330 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2331 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2332 err = -EOPNOTSUPP;
2333 goto done;
2336 /* We need to ensure that no other power on/off work is pending
2337 * before proceeding to call hci_dev_do_open. This is
2338 * particularly important if the setup procedure has not yet
2339 * completed.
2341 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2342 cancel_delayed_work(&hdev->power_off);
2344 /* After this call it is guaranteed that the setup procedure
2345 * has finished. This means that error conditions like RFKILL
2346 * or no valid public or static random address apply.
2348 flush_workqueue(hdev->req_workqueue);
2350 err = hci_dev_do_open(hdev);
2352 done:
2353 hci_dev_put(hdev);
2354 return err;
2357 static int hci_dev_do_close(struct hci_dev *hdev)
2359 BT_DBG("%s %p", hdev->name, hdev);
2361 cancel_delayed_work(&hdev->power_off);
2363 hci_req_cancel(hdev, ENODEV);
2364 hci_req_lock(hdev);
2366 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2367 cancel_delayed_work_sync(&hdev->cmd_timer);
2368 hci_req_unlock(hdev);
2369 return 0;
2372 /* Flush RX and TX works */
2373 flush_work(&hdev->tx_work);
2374 flush_work(&hdev->rx_work);
2376 if (hdev->discov_timeout > 0) {
2377 cancel_delayed_work(&hdev->discov_off);
2378 hdev->discov_timeout = 0;
2379 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2380 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2383 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2384 cancel_delayed_work(&hdev->service_cache);
2386 cancel_delayed_work_sync(&hdev->le_scan_disable);
2388 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2389 cancel_delayed_work_sync(&hdev->rpa_expired);
2391 hci_dev_lock(hdev);
2392 hci_inquiry_cache_flush(hdev);
2393 hci_conn_hash_flush(hdev);
2394 hci_pend_le_conns_clear(hdev);
2395 hci_dev_unlock(hdev);
2397 hci_notify(hdev, HCI_DEV_DOWN);
2399 if (hdev->flush)
2400 hdev->flush(hdev);
2402 /* Reset device */
2403 skb_queue_purge(&hdev->cmd_q);
2404 atomic_set(&hdev->cmd_cnt, 1);
2405 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2406 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2407 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2408 set_bit(HCI_INIT, &hdev->flags);
2409 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2410 clear_bit(HCI_INIT, &hdev->flags);
2413 /* flush cmd work */
2414 flush_work(&hdev->cmd_work);
2416 /* Drop queues */
2417 skb_queue_purge(&hdev->rx_q);
2418 skb_queue_purge(&hdev->cmd_q);
2419 skb_queue_purge(&hdev->raw_q);
2421 /* Drop last sent command */
2422 if (hdev->sent_cmd) {
2423 cancel_delayed_work_sync(&hdev->cmd_timer);
2424 kfree_skb(hdev->sent_cmd);
2425 hdev->sent_cmd = NULL;
2428 kfree_skb(hdev->recv_evt);
2429 hdev->recv_evt = NULL;
2431 /* After this point our queues are empty
2432 * and no tasks are scheduled. */
2433 hdev->close(hdev);
2435 /* Clear flags */
2436 hdev->flags &= BIT(HCI_RAW);
2437 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2439 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2440 if (hdev->dev_type == HCI_BREDR) {
2441 hci_dev_lock(hdev);
2442 mgmt_powered(hdev, 0);
2443 hci_dev_unlock(hdev);
2447 /* Controller radio is available but is currently powered down */
2448 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2450 memset(hdev->eir, 0, sizeof(hdev->eir));
2451 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2452 bacpy(&hdev->random_addr, BDADDR_ANY);
2454 hci_req_unlock(hdev);
2456 hci_dev_put(hdev);
2457 return 0;
2460 int hci_dev_close(__u16 dev)
2462 struct hci_dev *hdev;
2463 int err;
2465 hdev = hci_dev_get(dev);
2466 if (!hdev)
2467 return -ENODEV;
2469 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2470 err = -EBUSY;
2471 goto done;
2474 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2475 cancel_delayed_work(&hdev->power_off);
2477 err = hci_dev_do_close(hdev);
2479 done:
2480 hci_dev_put(hdev);
2481 return err;
2484 int hci_dev_reset(__u16 dev)
2486 struct hci_dev *hdev;
2487 int ret = 0;
2489 hdev = hci_dev_get(dev);
2490 if (!hdev)
2491 return -ENODEV;
2493 hci_req_lock(hdev);
2495 if (!test_bit(HCI_UP, &hdev->flags)) {
2496 ret = -ENETDOWN;
2497 goto done;
2500 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2501 ret = -EBUSY;
2502 goto done;
2505 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2506 ret = -EOPNOTSUPP;
2507 goto done;
2510 /* Drop queues */
2511 skb_queue_purge(&hdev->rx_q);
2512 skb_queue_purge(&hdev->cmd_q);
2514 hci_dev_lock(hdev);
2515 hci_inquiry_cache_flush(hdev);
2516 hci_conn_hash_flush(hdev);
2517 hci_dev_unlock(hdev);
2519 if (hdev->flush)
2520 hdev->flush(hdev);
2522 atomic_set(&hdev->cmd_cnt, 1);
2523 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2525 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2527 done:
2528 hci_req_unlock(hdev);
2529 hci_dev_put(hdev);
2530 return ret;
2533 int hci_dev_reset_stat(__u16 dev)
2535 struct hci_dev *hdev;
2536 int ret = 0;
2538 hdev = hci_dev_get(dev);
2539 if (!hdev)
2540 return -ENODEV;
2542 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2543 ret = -EBUSY;
2544 goto done;
2547 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2548 ret = -EOPNOTSUPP;
2549 goto done;
2552 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2554 done:
2555 hci_dev_put(hdev);
2556 return ret;
2559 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2561 struct hci_dev *hdev;
2562 struct hci_dev_req dr;
2563 int err = 0;
2565 if (copy_from_user(&dr, arg, sizeof(dr)))
2566 return -EFAULT;
2568 hdev = hci_dev_get(dr.dev_id);
2569 if (!hdev)
2570 return -ENODEV;
2572 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573 err = -EBUSY;
2574 goto done;
2577 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2578 err = -EOPNOTSUPP;
2579 goto done;
2582 if (hdev->dev_type != HCI_BREDR) {
2583 err = -EOPNOTSUPP;
2584 goto done;
2587 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2588 err = -EOPNOTSUPP;
2589 goto done;
2592 switch (cmd) {
2593 case HCISETAUTH:
2594 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2595 HCI_INIT_TIMEOUT);
2596 break;
2598 case HCISETENCRYPT:
2599 if (!lmp_encrypt_capable(hdev)) {
2600 err = -EOPNOTSUPP;
2601 break;
2604 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2605 /* Auth must be enabled first */
2606 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2607 HCI_INIT_TIMEOUT);
2608 if (err)
2609 break;
2612 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2613 HCI_INIT_TIMEOUT);
2614 break;
2616 case HCISETSCAN:
2617 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2618 HCI_INIT_TIMEOUT);
2619 break;
2621 case HCISETLINKPOL:
2622 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2623 HCI_INIT_TIMEOUT);
2624 break;
2626 case HCISETLINKMODE:
2627 hdev->link_mode = ((__u16) dr.dev_opt) &
2628 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2629 break;
2631 case HCISETPTYPE:
2632 hdev->pkt_type = (__u16) dr.dev_opt;
2633 break;
2635 case HCISETACLMTU:
2636 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2637 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2638 break;
2640 case HCISETSCOMTU:
2641 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2642 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2643 break;
2645 default:
2646 err = -EINVAL;
2647 break;
2650 done:
2651 hci_dev_put(hdev);
2652 return err;
2655 int hci_get_dev_list(void __user *arg)
2657 struct hci_dev *hdev;
2658 struct hci_dev_list_req *dl;
2659 struct hci_dev_req *dr;
2660 int n = 0, size, err;
2661 __u16 dev_num;
2663 if (get_user(dev_num, (__u16 __user *) arg))
2664 return -EFAULT;
2666 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2667 return -EINVAL;
2669 size = sizeof(*dl) + dev_num * sizeof(*dr);
2671 dl = kzalloc(size, GFP_KERNEL);
2672 if (!dl)
2673 return -ENOMEM;
2675 dr = dl->dev_req;
2677 read_lock(&hci_dev_list_lock);
2678 list_for_each_entry(hdev, &hci_dev_list, list) {
2679 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2680 cancel_delayed_work(&hdev->power_off);
2682 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2683 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2685 (dr + n)->dev_id = hdev->id;
2686 (dr + n)->dev_opt = hdev->flags;
2688 if (++n >= dev_num)
2689 break;
2691 read_unlock(&hci_dev_list_lock);
2693 dl->dev_num = n;
2694 size = sizeof(*dl) + n * sizeof(*dr);
2696 err = copy_to_user(arg, dl, size);
2697 kfree(dl);
2699 return err ? -EFAULT : 0;
2702 int hci_get_dev_info(void __user *arg)
2704 struct hci_dev *hdev;
2705 struct hci_dev_info di;
2706 int err = 0;
2708 if (copy_from_user(&di, arg, sizeof(di)))
2709 return -EFAULT;
2711 hdev = hci_dev_get(di.dev_id);
2712 if (!hdev)
2713 return -ENODEV;
2715 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2716 cancel_delayed_work_sync(&hdev->power_off);
2718 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2719 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2721 strcpy(di.name, hdev->name);
2722 di.bdaddr = hdev->bdaddr;
2723 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2724 di.flags = hdev->flags;
2725 di.pkt_type = hdev->pkt_type;
2726 if (lmp_bredr_capable(hdev)) {
2727 di.acl_mtu = hdev->acl_mtu;
2728 di.acl_pkts = hdev->acl_pkts;
2729 di.sco_mtu = hdev->sco_mtu;
2730 di.sco_pkts = hdev->sco_pkts;
2731 } else {
2732 di.acl_mtu = hdev->le_mtu;
2733 di.acl_pkts = hdev->le_pkts;
2734 di.sco_mtu = 0;
2735 di.sco_pkts = 0;
2737 di.link_policy = hdev->link_policy;
2738 di.link_mode = hdev->link_mode;
2740 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2741 memcpy(&di.features, &hdev->features, sizeof(di.features));
2743 if (copy_to_user(arg, &di, sizeof(di)))
2744 err = -EFAULT;
2746 hci_dev_put(hdev);
2748 return err;
2751 /* ---- Interface to HCI drivers ---- */
2753 static int hci_rfkill_set_block(void *data, bool blocked)
2755 struct hci_dev *hdev = data;
2757 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2759 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2760 return -EBUSY;
2762 if (blocked) {
2763 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2764 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2765 hci_dev_do_close(hdev);
2766 } else {
2767 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2770 return 0;
2773 static const struct rfkill_ops hci_rfkill_ops = {
2774 .set_block = hci_rfkill_set_block,
2777 static void hci_power_on(struct work_struct *work)
2779 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2780 int err;
2782 BT_DBG("%s", hdev->name);
2784 err = hci_dev_do_open(hdev);
2785 if (err < 0) {
2786 mgmt_set_powered_failed(hdev, err);
2787 return;
2790 /* During the HCI setup phase, a few error conditions are
2791 * ignored and they need to be checked now. If they are still
2792 * valid, it is important to turn the device back off.
2794 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2795 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2796 (hdev->dev_type == HCI_BREDR &&
2797 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2798 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2799 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2800 hci_dev_do_close(hdev);
2801 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2802 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2803 HCI_AUTO_OFF_TIMEOUT);
2806 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2807 /* For unconfigured devices, set the HCI_RAW flag
2808 * so that userspace can easily identify them.
2810 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2811 set_bit(HCI_RAW, &hdev->flags);
2813 /* For fully configured devices, this will send
2814 * the Index Added event. For unconfigured devices,
2815 * it will send Unconfigued Index Added event.
2817 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2818 * and no event will be send.
2820 mgmt_index_added(hdev);
2824 static void hci_power_off(struct work_struct *work)
2826 struct hci_dev *hdev = container_of(work, struct hci_dev,
2827 power_off.work);
2829 BT_DBG("%s", hdev->name);
2831 hci_dev_do_close(hdev);
2834 static void hci_discov_off(struct work_struct *work)
2836 struct hci_dev *hdev;
2838 hdev = container_of(work, struct hci_dev, discov_off.work);
2840 BT_DBG("%s", hdev->name);
2842 mgmt_discoverable_timeout(hdev);
2845 void hci_uuids_clear(struct hci_dev *hdev)
2847 struct bt_uuid *uuid, *tmp;
2849 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2850 list_del(&uuid->list);
2851 kfree(uuid);
2855 void hci_link_keys_clear(struct hci_dev *hdev)
2857 struct list_head *p, *n;
2859 list_for_each_safe(p, n, &hdev->link_keys) {
2860 struct link_key *key;
2862 key = list_entry(p, struct link_key, list);
2864 list_del(p);
2865 kfree(key);
2869 void hci_smp_ltks_clear(struct hci_dev *hdev)
2871 struct smp_ltk *k, *tmp;
2873 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2874 list_del(&k->list);
2875 kfree(k);
2879 void hci_smp_irks_clear(struct hci_dev *hdev)
2881 struct smp_irk *k, *tmp;
2883 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2884 list_del(&k->list);
2885 kfree(k);
2889 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2891 struct link_key *k;
2893 list_for_each_entry(k, &hdev->link_keys, list)
2894 if (bacmp(bdaddr, &k->bdaddr) == 0)
2895 return k;
2897 return NULL;
2900 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2901 u8 key_type, u8 old_key_type)
2903 /* Legacy key */
2904 if (key_type < 0x03)
2905 return true;
2907 /* Debug keys are insecure so don't store them persistently */
2908 if (key_type == HCI_LK_DEBUG_COMBINATION)
2909 return false;
2911 /* Changed combination key and there's no previous one */
2912 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2913 return false;
2915 /* Security mode 3 case */
2916 if (!conn)
2917 return true;
2919 /* Neither local nor remote side had no-bonding as requirement */
2920 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2921 return true;
2923 /* Local side had dedicated bonding as requirement */
2924 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2925 return true;
2927 /* Remote side had dedicated bonding as requirement */
2928 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2929 return true;
2931 /* If none of the above criteria match, then don't store the key
2932 * persistently */
2933 return false;
2936 static bool ltk_type_master(u8 type)
2938 return (type == SMP_LTK);
2941 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2942 bool master)
2944 struct smp_ltk *k;
2946 list_for_each_entry(k, &hdev->long_term_keys, list) {
2947 if (k->ediv != ediv || k->rand != rand)
2948 continue;
2950 if (ltk_type_master(k->type) != master)
2951 continue;
2953 return k;
2956 return NULL;
2959 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2960 u8 addr_type, bool master)
2962 struct smp_ltk *k;
2964 list_for_each_entry(k, &hdev->long_term_keys, list)
2965 if (addr_type == k->bdaddr_type &&
2966 bacmp(bdaddr, &k->bdaddr) == 0 &&
2967 ltk_type_master(k->type) == master)
2968 return k;
2970 return NULL;
2973 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2975 struct smp_irk *irk;
2977 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2978 if (!bacmp(&irk->rpa, rpa))
2979 return irk;
2982 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2983 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2984 bacpy(&irk->rpa, rpa);
2985 return irk;
2989 return NULL;
2992 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2993 u8 addr_type)
2995 struct smp_irk *irk;
2997 /* Identity Address must be public or static random */
2998 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2999 return NULL;
3001 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3002 if (addr_type == irk->addr_type &&
3003 bacmp(bdaddr, &irk->bdaddr) == 0)
3004 return irk;
3007 return NULL;
3010 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3011 bdaddr_t *bdaddr, u8 *val, u8 type,
3012 u8 pin_len, bool *persistent)
3014 struct link_key *key, *old_key;
3015 u8 old_key_type;
3017 old_key = hci_find_link_key(hdev, bdaddr);
3018 if (old_key) {
3019 old_key_type = old_key->type;
3020 key = old_key;
3021 } else {
3022 old_key_type = conn ? conn->key_type : 0xff;
3023 key = kzalloc(sizeof(*key), GFP_KERNEL);
3024 if (!key)
3025 return NULL;
3026 list_add(&key->list, &hdev->link_keys);
3029 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3031 /* Some buggy controller combinations generate a changed
3032 * combination key for legacy pairing even when there's no
3033 * previous key */
3034 if (type == HCI_LK_CHANGED_COMBINATION &&
3035 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3036 type = HCI_LK_COMBINATION;
3037 if (conn)
3038 conn->key_type = type;
3041 bacpy(&key->bdaddr, bdaddr);
3042 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3043 key->pin_len = pin_len;
3045 if (type == HCI_LK_CHANGED_COMBINATION)
3046 key->type = old_key_type;
3047 else
3048 key->type = type;
3050 if (persistent)
3051 *persistent = hci_persistent_key(hdev, conn, type,
3052 old_key_type);
3054 return key;
3057 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3058 u8 addr_type, u8 type, u8 authenticated,
3059 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3061 struct smp_ltk *key, *old_key;
3062 bool master = ltk_type_master(type);
3064 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3065 if (old_key)
3066 key = old_key;
3067 else {
3068 key = kzalloc(sizeof(*key), GFP_KERNEL);
3069 if (!key)
3070 return NULL;
3071 list_add(&key->list, &hdev->long_term_keys);
3074 bacpy(&key->bdaddr, bdaddr);
3075 key->bdaddr_type = addr_type;
3076 memcpy(key->val, tk, sizeof(key->val));
3077 key->authenticated = authenticated;
3078 key->ediv = ediv;
3079 key->rand = rand;
3080 key->enc_size = enc_size;
3081 key->type = type;
3083 return key;
3086 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3087 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3089 struct smp_irk *irk;
3091 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3092 if (!irk) {
3093 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3094 if (!irk)
3095 return NULL;
3097 bacpy(&irk->bdaddr, bdaddr);
3098 irk->addr_type = addr_type;
3100 list_add(&irk->list, &hdev->identity_resolving_keys);
3103 memcpy(irk->val, val, 16);
3104 bacpy(&irk->rpa, rpa);
3106 return irk;
3109 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3111 struct link_key *key;
3113 key = hci_find_link_key(hdev, bdaddr);
3114 if (!key)
3115 return -ENOENT;
3117 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3119 list_del(&key->list);
3120 kfree(key);
3122 return 0;
3125 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3127 struct smp_ltk *k, *tmp;
3128 int removed = 0;
3130 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3131 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3132 continue;
3134 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3136 list_del(&k->list);
3137 kfree(k);
3138 removed++;
3141 return removed ? 0 : -ENOENT;
3144 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3146 struct smp_irk *k, *tmp;
3148 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3149 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3150 continue;
3152 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3154 list_del(&k->list);
3155 kfree(k);
3159 /* HCI command timer function */
3160 static void hci_cmd_timeout(struct work_struct *work)
3162 struct hci_dev *hdev = container_of(work, struct hci_dev,
3163 cmd_timer.work);
3165 if (hdev->sent_cmd) {
3166 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3167 u16 opcode = __le16_to_cpu(sent->opcode);
3169 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3170 } else {
3171 BT_ERR("%s command tx timeout", hdev->name);
3174 atomic_set(&hdev->cmd_cnt, 1);
3175 queue_work(hdev->workqueue, &hdev->cmd_work);
3178 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3179 bdaddr_t *bdaddr)
3181 struct oob_data *data;
3183 list_for_each_entry(data, &hdev->remote_oob_data, list)
3184 if (bacmp(bdaddr, &data->bdaddr) == 0)
3185 return data;
3187 return NULL;
3190 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3192 struct oob_data *data;
3194 data = hci_find_remote_oob_data(hdev, bdaddr);
3195 if (!data)
3196 return -ENOENT;
3198 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3200 list_del(&data->list);
3201 kfree(data);
3203 return 0;
3206 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3208 struct oob_data *data, *n;
3210 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3211 list_del(&data->list);
3212 kfree(data);
3216 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3217 u8 *hash, u8 *randomizer)
3219 struct oob_data *data;
3221 data = hci_find_remote_oob_data(hdev, bdaddr);
3222 if (!data) {
3223 data = kmalloc(sizeof(*data), GFP_KERNEL);
3224 if (!data)
3225 return -ENOMEM;
3227 bacpy(&data->bdaddr, bdaddr);
3228 list_add(&data->list, &hdev->remote_oob_data);
3231 memcpy(data->hash192, hash, sizeof(data->hash192));
3232 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3234 memset(data->hash256, 0, sizeof(data->hash256));
3235 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3237 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3239 return 0;
3242 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3243 u8 *hash192, u8 *randomizer192,
3244 u8 *hash256, u8 *randomizer256)
3246 struct oob_data *data;
3248 data = hci_find_remote_oob_data(hdev, bdaddr);
3249 if (!data) {
3250 data = kmalloc(sizeof(*data), GFP_KERNEL);
3251 if (!data)
3252 return -ENOMEM;
3254 bacpy(&data->bdaddr, bdaddr);
3255 list_add(&data->list, &hdev->remote_oob_data);
3258 memcpy(data->hash192, hash192, sizeof(data->hash192));
3259 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3261 memcpy(data->hash256, hash256, sizeof(data->hash256));
3262 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3264 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3266 return 0;
3269 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3270 bdaddr_t *bdaddr, u8 type)
3272 struct bdaddr_list *b;
3274 list_for_each_entry(b, &hdev->blacklist, list) {
3275 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3276 return b;
3279 return NULL;
3282 static void hci_blacklist_clear(struct hci_dev *hdev)
3284 struct list_head *p, *n;
3286 list_for_each_safe(p, n, &hdev->blacklist) {
3287 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3289 list_del(p);
3290 kfree(b);
3294 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3296 struct bdaddr_list *entry;
3298 if (!bacmp(bdaddr, BDADDR_ANY))
3299 return -EBADF;
3301 if (hci_blacklist_lookup(hdev, bdaddr, type))
3302 return -EEXIST;
3304 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3305 if (!entry)
3306 return -ENOMEM;
3308 bacpy(&entry->bdaddr, bdaddr);
3309 entry->bdaddr_type = type;
3311 list_add(&entry->list, &hdev->blacklist);
3313 return 0;
3316 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3318 struct bdaddr_list *entry;
3320 if (!bacmp(bdaddr, BDADDR_ANY)) {
3321 hci_blacklist_clear(hdev);
3322 return 0;
3325 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3326 if (!entry)
3327 return -ENOENT;
3329 list_del(&entry->list);
3330 kfree(entry);
3332 return 0;
3335 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3336 bdaddr_t *bdaddr, u8 type)
3338 struct bdaddr_list *b;
3340 list_for_each_entry(b, &hdev->le_white_list, list) {
3341 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3342 return b;
3345 return NULL;
3348 void hci_white_list_clear(struct hci_dev *hdev)
3350 struct list_head *p, *n;
3352 list_for_each_safe(p, n, &hdev->le_white_list) {
3353 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3355 list_del(p);
3356 kfree(b);
3360 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3362 struct bdaddr_list *entry;
3364 if (!bacmp(bdaddr, BDADDR_ANY))
3365 return -EBADF;
3367 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3368 if (!entry)
3369 return -ENOMEM;
3371 bacpy(&entry->bdaddr, bdaddr);
3372 entry->bdaddr_type = type;
3374 list_add(&entry->list, &hdev->le_white_list);
3376 return 0;
3379 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3381 struct bdaddr_list *entry;
3383 if (!bacmp(bdaddr, BDADDR_ANY))
3384 return -EBADF;
3386 entry = hci_white_list_lookup(hdev, bdaddr, type);
3387 if (!entry)
3388 return -ENOENT;
3390 list_del(&entry->list);
3391 kfree(entry);
3393 return 0;
3396 /* This function requires the caller holds hdev->lock */
3397 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3398 bdaddr_t *addr, u8 addr_type)
3400 struct hci_conn_params *params;
3402 list_for_each_entry(params, &hdev->le_conn_params, list) {
3403 if (bacmp(&params->addr, addr) == 0 &&
3404 params->addr_type == addr_type) {
3405 return params;
3409 return NULL;
3412 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3414 struct hci_conn *conn;
3416 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3417 if (!conn)
3418 return false;
3420 if (conn->dst_type != type)
3421 return false;
3423 if (conn->state != BT_CONNECTED)
3424 return false;
3426 return true;
3429 /* This function requires the caller holds hdev->lock */
3430 struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3431 bdaddr_t *addr, u8 addr_type)
3433 struct bdaddr_list *entry;
3435 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3436 if (bacmp(&entry->bdaddr, addr) == 0 &&
3437 entry->bdaddr_type == addr_type)
3438 return entry;
3441 return NULL;
3444 /* This function requires the caller holds hdev->lock */
3445 void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3447 struct bdaddr_list *entry;
3449 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3450 if (entry)
3451 goto done;
3453 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3454 if (!entry) {
3455 BT_ERR("Out of memory");
3456 return;
3459 bacpy(&entry->bdaddr, addr);
3460 entry->bdaddr_type = addr_type;
3462 list_add(&entry->list, &hdev->pend_le_conns);
3464 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3466 done:
3467 hci_update_background_scan(hdev);
3470 /* This function requires the caller holds hdev->lock */
3471 void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3473 struct bdaddr_list *entry;
3475 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3476 if (!entry)
3477 goto done;
3479 list_del(&entry->list);
3480 kfree(entry);
3482 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3484 done:
3485 hci_update_background_scan(hdev);
3488 /* This function requires the caller holds hdev->lock */
3489 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3491 struct bdaddr_list *entry, *tmp;
3493 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3494 list_del(&entry->list);
3495 kfree(entry);
3498 BT_DBG("All LE pending connections cleared");
3500 hci_update_background_scan(hdev);
3503 /* This function requires the caller holds hdev->lock */
3504 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3505 bdaddr_t *addr, u8 addr_type)
3507 struct hci_conn_params *params;
3509 if (!hci_is_identity_address(addr, addr_type))
3510 return NULL;
3512 params = hci_conn_params_lookup(hdev, addr, addr_type);
3513 if (params)
3514 return params;
3516 params = kzalloc(sizeof(*params), GFP_KERNEL);
3517 if (!params) {
3518 BT_ERR("Out of memory");
3519 return NULL;
3522 bacpy(&params->addr, addr);
3523 params->addr_type = addr_type;
3525 list_add(&params->list, &hdev->le_conn_params);
3527 params->conn_min_interval = hdev->le_conn_min_interval;
3528 params->conn_max_interval = hdev->le_conn_max_interval;
3529 params->conn_latency = hdev->le_conn_latency;
3530 params->supervision_timeout = hdev->le_supv_timeout;
3531 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3533 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3535 return params;
3538 /* This function requires the caller holds hdev->lock */
3539 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3540 u8 auto_connect)
3542 struct hci_conn_params *params;
3544 params = hci_conn_params_add(hdev, addr, addr_type);
3545 if (!params)
3546 return -EIO;
3548 params->auto_connect = auto_connect;
3550 switch (auto_connect) {
3551 case HCI_AUTO_CONN_DISABLED:
3552 case HCI_AUTO_CONN_REPORT:
3553 case HCI_AUTO_CONN_LINK_LOSS:
3554 hci_pend_le_conn_del(hdev, addr, addr_type);
3555 break;
3556 case HCI_AUTO_CONN_ALWAYS:
3557 if (!is_connected(hdev, addr, addr_type))
3558 hci_pend_le_conn_add(hdev, addr, addr_type);
3559 break;
3562 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3563 auto_connect);
3565 return 0;
3568 /* This function requires the caller holds hdev->lock */
3569 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3571 struct hci_conn_params *params;
3573 params = hci_conn_params_lookup(hdev, addr, addr_type);
3574 if (!params)
3575 return;
3577 hci_pend_le_conn_del(hdev, addr, addr_type);
3579 list_del(&params->list);
3580 kfree(params);
3582 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3585 /* This function requires the caller holds hdev->lock */
3586 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3588 struct hci_conn_params *params, *tmp;
3590 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3591 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3592 continue;
3593 list_del(&params->list);
3594 kfree(params);
3597 BT_DBG("All LE disabled connection parameters were removed");
3600 /* This function requires the caller holds hdev->lock */
3601 void hci_conn_params_clear_enabled(struct hci_dev *hdev)
3603 struct hci_conn_params *params, *tmp;
3605 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3606 if (params->auto_connect == HCI_AUTO_CONN_DISABLED)
3607 continue;
3608 list_del(&params->list);
3609 kfree(params);
3612 hci_pend_le_conns_clear(hdev);
3614 BT_DBG("All enabled LE connection parameters were removed");
3617 /* This function requires the caller holds hdev->lock */
3618 void hci_conn_params_clear_all(struct hci_dev *hdev)
3620 struct hci_conn_params *params, *tmp;
3622 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3623 list_del(&params->list);
3624 kfree(params);
3627 hci_pend_le_conns_clear(hdev);
3629 BT_DBG("All LE connection parameters were removed");
3632 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3634 if (status) {
3635 BT_ERR("Failed to start inquiry: status %d", status);
3637 hci_dev_lock(hdev);
3638 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3639 hci_dev_unlock(hdev);
3640 return;
3644 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3646 /* General inquiry access code (GIAC) */
3647 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3648 struct hci_request req;
3649 struct hci_cp_inquiry cp;
3650 int err;
3652 if (status) {
3653 BT_ERR("Failed to disable LE scanning: status %d", status);
3654 return;
3657 switch (hdev->discovery.type) {
3658 case DISCOV_TYPE_LE:
3659 hci_dev_lock(hdev);
3660 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3661 hci_dev_unlock(hdev);
3662 break;
3664 case DISCOV_TYPE_INTERLEAVED:
3665 hci_req_init(&req, hdev);
3667 memset(&cp, 0, sizeof(cp));
3668 memcpy(&cp.lap, lap, sizeof(cp.lap));
3669 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3670 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3672 hci_dev_lock(hdev);
3674 hci_inquiry_cache_flush(hdev);
3676 err = hci_req_run(&req, inquiry_complete);
3677 if (err) {
3678 BT_ERR("Inquiry request failed: err %d", err);
3679 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3682 hci_dev_unlock(hdev);
3683 break;
3687 static void le_scan_disable_work(struct work_struct *work)
3689 struct hci_dev *hdev = container_of(work, struct hci_dev,
3690 le_scan_disable.work);
3691 struct hci_request req;
3692 int err;
3694 BT_DBG("%s", hdev->name);
3696 hci_req_init(&req, hdev);
3698 hci_req_add_le_scan_disable(&req);
3700 err = hci_req_run(&req, le_scan_disable_work_complete);
3701 if (err)
3702 BT_ERR("Disable LE scanning request failed: err %d", err);
3705 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3707 struct hci_dev *hdev = req->hdev;
3709 /* If we're advertising or initiating an LE connection we can't
3710 * go ahead and change the random address at this time. This is
3711 * because the eventual initiator address used for the
3712 * subsequently created connection will be undefined (some
3713 * controllers use the new address and others the one we had
3714 * when the operation started).
3716 * In this kind of scenario skip the update and let the random
3717 * address be updated at the next cycle.
3719 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3720 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3721 BT_DBG("Deferring random address update");
3722 return;
3725 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3728 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3729 u8 *own_addr_type)
3731 struct hci_dev *hdev = req->hdev;
3732 int err;
3734 /* If privacy is enabled use a resolvable private address. If
3735 * current RPA has expired or there is something else than
3736 * the current RPA in use, then generate a new one.
3738 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3739 int to;
3741 *own_addr_type = ADDR_LE_DEV_RANDOM;
3743 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3744 !bacmp(&hdev->random_addr, &hdev->rpa))
3745 return 0;
3747 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3748 if (err < 0) {
3749 BT_ERR("%s failed to generate new RPA", hdev->name);
3750 return err;
3753 set_random_addr(req, &hdev->rpa);
3755 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3756 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3758 return 0;
3761 /* In case of required privacy without resolvable private address,
3762 * use an unresolvable private address. This is useful for active
3763 * scanning and non-connectable advertising.
3765 if (require_privacy) {
3766 bdaddr_t urpa;
3768 get_random_bytes(&urpa, 6);
3769 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3771 *own_addr_type = ADDR_LE_DEV_RANDOM;
3772 set_random_addr(req, &urpa);
3773 return 0;
3776 /* If forcing static address is in use or there is no public
3777 * address use the static address as random address (but skip
3778 * the HCI command if the current random address is already the
3779 * static one.
3781 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3782 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3783 *own_addr_type = ADDR_LE_DEV_RANDOM;
3784 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3785 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3786 &hdev->static_addr);
3787 return 0;
3790 /* Neither privacy nor static address is being used so use a
3791 * public address.
3793 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3795 return 0;
3798 /* Copy the Identity Address of the controller.
3800 * If the controller has a public BD_ADDR, then by default use that one.
3801 * If this is a LE only controller without a public address, default to
3802 * the static random address.
3804 * For debugging purposes it is possible to force controllers with a
3805 * public address to use the static random address instead.
3807 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3808 u8 *bdaddr_type)
3810 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3811 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3812 bacpy(bdaddr, &hdev->static_addr);
3813 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3814 } else {
3815 bacpy(bdaddr, &hdev->bdaddr);
3816 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3820 /* Alloc HCI device */
3821 struct hci_dev *hci_alloc_dev(void)
3823 struct hci_dev *hdev;
3825 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3826 if (!hdev)
3827 return NULL;
3829 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3830 hdev->esco_type = (ESCO_HV1);
3831 hdev->link_mode = (HCI_LM_ACCEPT);
3832 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3833 hdev->io_capability = 0x03; /* No Input No Output */
3834 hdev->manufacturer = 0xffff; /* Default to internal use */
3835 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3836 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3838 hdev->sniff_max_interval = 800;
3839 hdev->sniff_min_interval = 80;
3841 hdev->le_adv_channel_map = 0x07;
3842 hdev->le_scan_interval = 0x0060;
3843 hdev->le_scan_window = 0x0030;
3844 hdev->le_conn_min_interval = 0x0028;
3845 hdev->le_conn_max_interval = 0x0038;
3846 hdev->le_conn_latency = 0x0000;
3847 hdev->le_supv_timeout = 0x002a;
3849 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3850 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3851 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3852 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3854 mutex_init(&hdev->lock);
3855 mutex_init(&hdev->req_lock);
3857 INIT_LIST_HEAD(&hdev->mgmt_pending);
3858 INIT_LIST_HEAD(&hdev->blacklist);
3859 INIT_LIST_HEAD(&hdev->uuids);
3860 INIT_LIST_HEAD(&hdev->link_keys);
3861 INIT_LIST_HEAD(&hdev->long_term_keys);
3862 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3863 INIT_LIST_HEAD(&hdev->remote_oob_data);
3864 INIT_LIST_HEAD(&hdev->le_white_list);
3865 INIT_LIST_HEAD(&hdev->le_conn_params);
3866 INIT_LIST_HEAD(&hdev->pend_le_conns);
3867 INIT_LIST_HEAD(&hdev->conn_hash.list);
3869 INIT_WORK(&hdev->rx_work, hci_rx_work);
3870 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3871 INIT_WORK(&hdev->tx_work, hci_tx_work);
3872 INIT_WORK(&hdev->power_on, hci_power_on);
3874 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3875 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3876 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3878 skb_queue_head_init(&hdev->rx_q);
3879 skb_queue_head_init(&hdev->cmd_q);
3880 skb_queue_head_init(&hdev->raw_q);
3882 init_waitqueue_head(&hdev->req_wait_q);
3884 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3886 hci_init_sysfs(hdev);
3887 discovery_init(hdev);
3889 return hdev;
3891 EXPORT_SYMBOL(hci_alloc_dev);
3893 /* Free HCI device */
3894 void hci_free_dev(struct hci_dev *hdev)
3896 /* will free via device release */
3897 put_device(&hdev->dev);
3899 EXPORT_SYMBOL(hci_free_dev);
3901 /* Register HCI device */
3902 int hci_register_dev(struct hci_dev *hdev)
3904 int id, error;
3906 if (!hdev->open || !hdev->close)
3907 return -EINVAL;
3909 /* Do not allow HCI_AMP devices to register at index 0,
3910 * so the index can be used as the AMP controller ID.
3912 switch (hdev->dev_type) {
3913 case HCI_BREDR:
3914 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3915 break;
3916 case HCI_AMP:
3917 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3918 break;
3919 default:
3920 return -EINVAL;
3923 if (id < 0)
3924 return id;
3926 sprintf(hdev->name, "hci%d", id);
3927 hdev->id = id;
3929 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3931 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3932 WQ_MEM_RECLAIM, 1, hdev->name);
3933 if (!hdev->workqueue) {
3934 error = -ENOMEM;
3935 goto err;
3938 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3939 WQ_MEM_RECLAIM, 1, hdev->name);
3940 if (!hdev->req_workqueue) {
3941 destroy_workqueue(hdev->workqueue);
3942 error = -ENOMEM;
3943 goto err;
3946 if (!IS_ERR_OR_NULL(bt_debugfs))
3947 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3949 dev_set_name(&hdev->dev, "%s", hdev->name);
3951 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3952 CRYPTO_ALG_ASYNC);
3953 if (IS_ERR(hdev->tfm_aes)) {
3954 BT_ERR("Unable to create crypto context");
3955 error = PTR_ERR(hdev->tfm_aes);
3956 hdev->tfm_aes = NULL;
3957 goto err_wqueue;
3960 error = device_add(&hdev->dev);
3961 if (error < 0)
3962 goto err_tfm;
3964 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3965 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3966 hdev);
3967 if (hdev->rfkill) {
3968 if (rfkill_register(hdev->rfkill) < 0) {
3969 rfkill_destroy(hdev->rfkill);
3970 hdev->rfkill = NULL;
3974 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3975 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3977 set_bit(HCI_SETUP, &hdev->dev_flags);
3978 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3980 if (hdev->dev_type == HCI_BREDR) {
3981 /* Assume BR/EDR support until proven otherwise (such as
3982 * through reading supported features during init.
3984 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3987 write_lock(&hci_dev_list_lock);
3988 list_add(&hdev->list, &hci_dev_list);
3989 write_unlock(&hci_dev_list_lock);
3991 /* Devices that are marked for raw-only usage are unconfigured
3992 * and should not be included in normal operation.
3994 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3995 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3997 hci_notify(hdev, HCI_DEV_REG);
3998 hci_dev_hold(hdev);
4000 queue_work(hdev->req_workqueue, &hdev->power_on);
4002 return id;
4004 err_tfm:
4005 crypto_free_blkcipher(hdev->tfm_aes);
4006 err_wqueue:
4007 destroy_workqueue(hdev->workqueue);
4008 destroy_workqueue(hdev->req_workqueue);
4009 err:
4010 ida_simple_remove(&hci_index_ida, hdev->id);
4012 return error;
4014 EXPORT_SYMBOL(hci_register_dev);
4016 /* Unregister HCI device */
4017 void hci_unregister_dev(struct hci_dev *hdev)
4019 int i, id;
4021 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4023 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4025 id = hdev->id;
4027 write_lock(&hci_dev_list_lock);
4028 list_del(&hdev->list);
4029 write_unlock(&hci_dev_list_lock);
4031 hci_dev_do_close(hdev);
4033 for (i = 0; i < NUM_REASSEMBLY; i++)
4034 kfree_skb(hdev->reassembly[i]);
4036 cancel_work_sync(&hdev->power_on);
4038 if (!test_bit(HCI_INIT, &hdev->flags) &&
4039 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
4040 hci_dev_lock(hdev);
4041 mgmt_index_removed(hdev);
4042 hci_dev_unlock(hdev);
4045 /* mgmt_index_removed should take care of emptying the
4046 * pending list */
4047 BUG_ON(!list_empty(&hdev->mgmt_pending));
4049 hci_notify(hdev, HCI_DEV_UNREG);
4051 if (hdev->rfkill) {
4052 rfkill_unregister(hdev->rfkill);
4053 rfkill_destroy(hdev->rfkill);
4056 if (hdev->tfm_aes)
4057 crypto_free_blkcipher(hdev->tfm_aes);
4059 device_del(&hdev->dev);
4061 debugfs_remove_recursive(hdev->debugfs);
4063 destroy_workqueue(hdev->workqueue);
4064 destroy_workqueue(hdev->req_workqueue);
4066 hci_dev_lock(hdev);
4067 hci_blacklist_clear(hdev);
4068 hci_uuids_clear(hdev);
4069 hci_link_keys_clear(hdev);
4070 hci_smp_ltks_clear(hdev);
4071 hci_smp_irks_clear(hdev);
4072 hci_remote_oob_data_clear(hdev);
4073 hci_white_list_clear(hdev);
4074 hci_conn_params_clear_all(hdev);
4075 hci_dev_unlock(hdev);
4077 hci_dev_put(hdev);
4079 ida_simple_remove(&hci_index_ida, id);
4081 EXPORT_SYMBOL(hci_unregister_dev);
4083 /* Suspend HCI device */
4084 int hci_suspend_dev(struct hci_dev *hdev)
4086 hci_notify(hdev, HCI_DEV_SUSPEND);
4087 return 0;
4089 EXPORT_SYMBOL(hci_suspend_dev);
4091 /* Resume HCI device */
4092 int hci_resume_dev(struct hci_dev *hdev)
4094 hci_notify(hdev, HCI_DEV_RESUME);
4095 return 0;
4097 EXPORT_SYMBOL(hci_resume_dev);
4099 /* Receive frame from HCI drivers */
4100 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4102 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4103 && !test_bit(HCI_INIT, &hdev->flags))) {
4104 kfree_skb(skb);
4105 return -ENXIO;
4108 /* Incoming skb */
4109 bt_cb(skb)->incoming = 1;
4111 /* Time stamp */
4112 __net_timestamp(skb);
4114 skb_queue_tail(&hdev->rx_q, skb);
4115 queue_work(hdev->workqueue, &hdev->rx_work);
4117 return 0;
4119 EXPORT_SYMBOL(hci_recv_frame);
4121 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4122 int count, __u8 index)
4124 int len = 0;
4125 int hlen = 0;
4126 int remain = count;
4127 struct sk_buff *skb;
4128 struct bt_skb_cb *scb;
4130 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4131 index >= NUM_REASSEMBLY)
4132 return -EILSEQ;
4134 skb = hdev->reassembly[index];
4136 if (!skb) {
4137 switch (type) {
4138 case HCI_ACLDATA_PKT:
4139 len = HCI_MAX_FRAME_SIZE;
4140 hlen = HCI_ACL_HDR_SIZE;
4141 break;
4142 case HCI_EVENT_PKT:
4143 len = HCI_MAX_EVENT_SIZE;
4144 hlen = HCI_EVENT_HDR_SIZE;
4145 break;
4146 case HCI_SCODATA_PKT:
4147 len = HCI_MAX_SCO_SIZE;
4148 hlen = HCI_SCO_HDR_SIZE;
4149 break;
4152 skb = bt_skb_alloc(len, GFP_ATOMIC);
4153 if (!skb)
4154 return -ENOMEM;
4156 scb = (void *) skb->cb;
4157 scb->expect = hlen;
4158 scb->pkt_type = type;
4160 hdev->reassembly[index] = skb;
4163 while (count) {
4164 scb = (void *) skb->cb;
4165 len = min_t(uint, scb->expect, count);
4167 memcpy(skb_put(skb, len), data, len);
4169 count -= len;
4170 data += len;
4171 scb->expect -= len;
4172 remain = count;
4174 switch (type) {
4175 case HCI_EVENT_PKT:
4176 if (skb->len == HCI_EVENT_HDR_SIZE) {
4177 struct hci_event_hdr *h = hci_event_hdr(skb);
4178 scb->expect = h->plen;
4180 if (skb_tailroom(skb) < scb->expect) {
4181 kfree_skb(skb);
4182 hdev->reassembly[index] = NULL;
4183 return -ENOMEM;
4186 break;
4188 case HCI_ACLDATA_PKT:
4189 if (skb->len == HCI_ACL_HDR_SIZE) {
4190 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4191 scb->expect = __le16_to_cpu(h->dlen);
4193 if (skb_tailroom(skb) < scb->expect) {
4194 kfree_skb(skb);
4195 hdev->reassembly[index] = NULL;
4196 return -ENOMEM;
4199 break;
4201 case HCI_SCODATA_PKT:
4202 if (skb->len == HCI_SCO_HDR_SIZE) {
4203 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4204 scb->expect = h->dlen;
4206 if (skb_tailroom(skb) < scb->expect) {
4207 kfree_skb(skb);
4208 hdev->reassembly[index] = NULL;
4209 return -ENOMEM;
4212 break;
4215 if (scb->expect == 0) {
4216 /* Complete frame */
4218 bt_cb(skb)->pkt_type = type;
4219 hci_recv_frame(hdev, skb);
4221 hdev->reassembly[index] = NULL;
4222 return remain;
4226 return remain;
4229 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4231 int rem = 0;
4233 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4234 return -EILSEQ;
4236 while (count) {
4237 rem = hci_reassembly(hdev, type, data, count, type - 1);
4238 if (rem < 0)
4239 return rem;
4241 data += (count - rem);
4242 count = rem;
4245 return rem;
4247 EXPORT_SYMBOL(hci_recv_fragment);
4249 #define STREAM_REASSEMBLY 0
4251 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4253 int type;
4254 int rem = 0;
4256 while (count) {
4257 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4259 if (!skb) {
4260 struct { char type; } *pkt;
4262 /* Start of the frame */
4263 pkt = data;
4264 type = pkt->type;
4266 data++;
4267 count--;
4268 } else
4269 type = bt_cb(skb)->pkt_type;
4271 rem = hci_reassembly(hdev, type, data, count,
4272 STREAM_REASSEMBLY);
4273 if (rem < 0)
4274 return rem;
4276 data += (count - rem);
4277 count = rem;
4280 return rem;
4282 EXPORT_SYMBOL(hci_recv_stream_fragment);
4284 /* ---- Interface to upper protocols ---- */
4286 int hci_register_cb(struct hci_cb *cb)
4288 BT_DBG("%p name %s", cb, cb->name);
4290 write_lock(&hci_cb_list_lock);
4291 list_add(&cb->list, &hci_cb_list);
4292 write_unlock(&hci_cb_list_lock);
4294 return 0;
4296 EXPORT_SYMBOL(hci_register_cb);
4298 int hci_unregister_cb(struct hci_cb *cb)
4300 BT_DBG("%p name %s", cb, cb->name);
4302 write_lock(&hci_cb_list_lock);
4303 list_del(&cb->list);
4304 write_unlock(&hci_cb_list_lock);
4306 return 0;
4308 EXPORT_SYMBOL(hci_unregister_cb);
4310 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4312 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4314 /* Time stamp */
4315 __net_timestamp(skb);
4317 /* Send copy to monitor */
4318 hci_send_to_monitor(hdev, skb);
4320 if (atomic_read(&hdev->promisc)) {
4321 /* Send copy to the sockets */
4322 hci_send_to_sock(hdev, skb);
4325 /* Get rid of skb owner, prior to sending to the driver. */
4326 skb_orphan(skb);
4328 if (hdev->send(hdev, skb) < 0)
4329 BT_ERR("%s sending frame failed", hdev->name);
4332 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4334 skb_queue_head_init(&req->cmd_q);
4335 req->hdev = hdev;
4336 req->err = 0;
4339 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4341 struct hci_dev *hdev = req->hdev;
4342 struct sk_buff *skb;
4343 unsigned long flags;
4345 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4347 /* If an error occured during request building, remove all HCI
4348 * commands queued on the HCI request queue.
4350 if (req->err) {
4351 skb_queue_purge(&req->cmd_q);
4352 return req->err;
4355 /* Do not allow empty requests */
4356 if (skb_queue_empty(&req->cmd_q))
4357 return -ENODATA;
4359 skb = skb_peek_tail(&req->cmd_q);
4360 bt_cb(skb)->req.complete = complete;
4362 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4363 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4364 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4366 queue_work(hdev->workqueue, &hdev->cmd_work);
4368 return 0;
4371 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4372 u32 plen, const void *param)
4374 int len = HCI_COMMAND_HDR_SIZE + plen;
4375 struct hci_command_hdr *hdr;
4376 struct sk_buff *skb;
4378 skb = bt_skb_alloc(len, GFP_ATOMIC);
4379 if (!skb)
4380 return NULL;
4382 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4383 hdr->opcode = cpu_to_le16(opcode);
4384 hdr->plen = plen;
4386 if (plen)
4387 memcpy(skb_put(skb, plen), param, plen);
4389 BT_DBG("skb len %d", skb->len);
4391 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4393 return skb;
4396 /* Send HCI command */
4397 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4398 const void *param)
4400 struct sk_buff *skb;
4402 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4404 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4405 if (!skb) {
4406 BT_ERR("%s no memory for command", hdev->name);
4407 return -ENOMEM;
4410 /* Stand-alone HCI commands must be flaged as
4411 * single-command requests.
4413 bt_cb(skb)->req.start = true;
4415 skb_queue_tail(&hdev->cmd_q, skb);
4416 queue_work(hdev->workqueue, &hdev->cmd_work);
4418 return 0;
4421 /* Queue a command to an asynchronous HCI request */
4422 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4423 const void *param, u8 event)
4425 struct hci_dev *hdev = req->hdev;
4426 struct sk_buff *skb;
4428 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4430 /* If an error occured during request building, there is no point in
4431 * queueing the HCI command. We can simply return.
4433 if (req->err)
4434 return;
4436 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4437 if (!skb) {
4438 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4439 hdev->name, opcode);
4440 req->err = -ENOMEM;
4441 return;
4444 if (skb_queue_empty(&req->cmd_q))
4445 bt_cb(skb)->req.start = true;
4447 bt_cb(skb)->req.event = event;
4449 skb_queue_tail(&req->cmd_q, skb);
4452 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4453 const void *param)
4455 hci_req_add_ev(req, opcode, plen, param, 0);
4458 /* Get data from the previously sent command */
4459 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4461 struct hci_command_hdr *hdr;
4463 if (!hdev->sent_cmd)
4464 return NULL;
4466 hdr = (void *) hdev->sent_cmd->data;
4468 if (hdr->opcode != cpu_to_le16(opcode))
4469 return NULL;
4471 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4473 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4476 /* Send ACL data */
4477 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4479 struct hci_acl_hdr *hdr;
4480 int len = skb->len;
4482 skb_push(skb, HCI_ACL_HDR_SIZE);
4483 skb_reset_transport_header(skb);
4484 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4485 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4486 hdr->dlen = cpu_to_le16(len);
4489 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4490 struct sk_buff *skb, __u16 flags)
4492 struct hci_conn *conn = chan->conn;
4493 struct hci_dev *hdev = conn->hdev;
4494 struct sk_buff *list;
4496 skb->len = skb_headlen(skb);
4497 skb->data_len = 0;
4499 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4501 switch (hdev->dev_type) {
4502 case HCI_BREDR:
4503 hci_add_acl_hdr(skb, conn->handle, flags);
4504 break;
4505 case HCI_AMP:
4506 hci_add_acl_hdr(skb, chan->handle, flags);
4507 break;
4508 default:
4509 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4510 return;
4513 list = skb_shinfo(skb)->frag_list;
4514 if (!list) {
4515 /* Non fragmented */
4516 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4518 skb_queue_tail(queue, skb);
4519 } else {
4520 /* Fragmented */
4521 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4523 skb_shinfo(skb)->frag_list = NULL;
4525 /* Queue all fragments atomically */
4526 spin_lock(&queue->lock);
4528 __skb_queue_tail(queue, skb);
4530 flags &= ~ACL_START;
4531 flags |= ACL_CONT;
4532 do {
4533 skb = list; list = list->next;
4535 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4536 hci_add_acl_hdr(skb, conn->handle, flags);
4538 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4540 __skb_queue_tail(queue, skb);
4541 } while (list);
4543 spin_unlock(&queue->lock);
4547 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4549 struct hci_dev *hdev = chan->conn->hdev;
4551 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4553 hci_queue_acl(chan, &chan->data_q, skb, flags);
4555 queue_work(hdev->workqueue, &hdev->tx_work);
4558 /* Send SCO data */
4559 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4561 struct hci_dev *hdev = conn->hdev;
4562 struct hci_sco_hdr hdr;
4564 BT_DBG("%s len %d", hdev->name, skb->len);
4566 hdr.handle = cpu_to_le16(conn->handle);
4567 hdr.dlen = skb->len;
4569 skb_push(skb, HCI_SCO_HDR_SIZE);
4570 skb_reset_transport_header(skb);
4571 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4573 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4575 skb_queue_tail(&conn->data_q, skb);
4576 queue_work(hdev->workqueue, &hdev->tx_work);
4579 /* ---- HCI TX task (outgoing data) ---- */
4581 /* HCI Connection scheduler */
4582 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4583 int *quote)
4585 struct hci_conn_hash *h = &hdev->conn_hash;
4586 struct hci_conn *conn = NULL, *c;
4587 unsigned int num = 0, min = ~0;
4589 /* We don't have to lock device here. Connections are always
4590 * added and removed with TX task disabled. */
4592 rcu_read_lock();
4594 list_for_each_entry_rcu(c, &h->list, list) {
4595 if (c->type != type || skb_queue_empty(&c->data_q))
4596 continue;
4598 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4599 continue;
4601 num++;
4603 if (c->sent < min) {
4604 min = c->sent;
4605 conn = c;
4608 if (hci_conn_num(hdev, type) == num)
4609 break;
4612 rcu_read_unlock();
4614 if (conn) {
4615 int cnt, q;
4617 switch (conn->type) {
4618 case ACL_LINK:
4619 cnt = hdev->acl_cnt;
4620 break;
4621 case SCO_LINK:
4622 case ESCO_LINK:
4623 cnt = hdev->sco_cnt;
4624 break;
4625 case LE_LINK:
4626 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4627 break;
4628 default:
4629 cnt = 0;
4630 BT_ERR("Unknown link type");
4633 q = cnt / num;
4634 *quote = q ? q : 1;
4635 } else
4636 *quote = 0;
4638 BT_DBG("conn %p quote %d", conn, *quote);
4639 return conn;
4642 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4644 struct hci_conn_hash *h = &hdev->conn_hash;
4645 struct hci_conn *c;
4647 BT_ERR("%s link tx timeout", hdev->name);
4649 rcu_read_lock();
4651 /* Kill stalled connections */
4652 list_for_each_entry_rcu(c, &h->list, list) {
4653 if (c->type == type && c->sent) {
4654 BT_ERR("%s killing stalled connection %pMR",
4655 hdev->name, &c->dst);
4656 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4660 rcu_read_unlock();
4663 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4664 int *quote)
4666 struct hci_conn_hash *h = &hdev->conn_hash;
4667 struct hci_chan *chan = NULL;
4668 unsigned int num = 0, min = ~0, cur_prio = 0;
4669 struct hci_conn *conn;
4670 int cnt, q, conn_num = 0;
4672 BT_DBG("%s", hdev->name);
4674 rcu_read_lock();
4676 list_for_each_entry_rcu(conn, &h->list, list) {
4677 struct hci_chan *tmp;
4679 if (conn->type != type)
4680 continue;
4682 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4683 continue;
4685 conn_num++;
4687 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4688 struct sk_buff *skb;
4690 if (skb_queue_empty(&tmp->data_q))
4691 continue;
4693 skb = skb_peek(&tmp->data_q);
4694 if (skb->priority < cur_prio)
4695 continue;
4697 if (skb->priority > cur_prio) {
4698 num = 0;
4699 min = ~0;
4700 cur_prio = skb->priority;
4703 num++;
4705 if (conn->sent < min) {
4706 min = conn->sent;
4707 chan = tmp;
4711 if (hci_conn_num(hdev, type) == conn_num)
4712 break;
4715 rcu_read_unlock();
4717 if (!chan)
4718 return NULL;
4720 switch (chan->conn->type) {
4721 case ACL_LINK:
4722 cnt = hdev->acl_cnt;
4723 break;
4724 case AMP_LINK:
4725 cnt = hdev->block_cnt;
4726 break;
4727 case SCO_LINK:
4728 case ESCO_LINK:
4729 cnt = hdev->sco_cnt;
4730 break;
4731 case LE_LINK:
4732 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4733 break;
4734 default:
4735 cnt = 0;
4736 BT_ERR("Unknown link type");
4739 q = cnt / num;
4740 *quote = q ? q : 1;
4741 BT_DBG("chan %p quote %d", chan, *quote);
4742 return chan;
4745 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4747 struct hci_conn_hash *h = &hdev->conn_hash;
4748 struct hci_conn *conn;
4749 int num = 0;
4751 BT_DBG("%s", hdev->name);
4753 rcu_read_lock();
4755 list_for_each_entry_rcu(conn, &h->list, list) {
4756 struct hci_chan *chan;
4758 if (conn->type != type)
4759 continue;
4761 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4762 continue;
4764 num++;
4766 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4767 struct sk_buff *skb;
4769 if (chan->sent) {
4770 chan->sent = 0;
4771 continue;
4774 if (skb_queue_empty(&chan->data_q))
4775 continue;
4777 skb = skb_peek(&chan->data_q);
4778 if (skb->priority >= HCI_PRIO_MAX - 1)
4779 continue;
4781 skb->priority = HCI_PRIO_MAX - 1;
4783 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4784 skb->priority);
4787 if (hci_conn_num(hdev, type) == num)
4788 break;
4791 rcu_read_unlock();
4795 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4797 /* Calculate count of blocks used by this packet */
4798 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4801 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4803 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4804 /* ACL tx timeout must be longer than maximum
4805 * link supervision timeout (40.9 seconds) */
4806 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4807 HCI_ACL_TX_TIMEOUT))
4808 hci_link_tx_to(hdev, ACL_LINK);
4812 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4814 unsigned int cnt = hdev->acl_cnt;
4815 struct hci_chan *chan;
4816 struct sk_buff *skb;
4817 int quote;
4819 __check_timeout(hdev, cnt);
4821 while (hdev->acl_cnt &&
4822 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4823 u32 priority = (skb_peek(&chan->data_q))->priority;
4824 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4825 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4826 skb->len, skb->priority);
4828 /* Stop if priority has changed */
4829 if (skb->priority < priority)
4830 break;
4832 skb = skb_dequeue(&chan->data_q);
4834 hci_conn_enter_active_mode(chan->conn,
4835 bt_cb(skb)->force_active);
4837 hci_send_frame(hdev, skb);
4838 hdev->acl_last_tx = jiffies;
4840 hdev->acl_cnt--;
4841 chan->sent++;
4842 chan->conn->sent++;
4846 if (cnt != hdev->acl_cnt)
4847 hci_prio_recalculate(hdev, ACL_LINK);
4850 static void hci_sched_acl_blk(struct hci_dev *hdev)
4852 unsigned int cnt = hdev->block_cnt;
4853 struct hci_chan *chan;
4854 struct sk_buff *skb;
4855 int quote;
4856 u8 type;
4858 __check_timeout(hdev, cnt);
4860 BT_DBG("%s", hdev->name);
4862 if (hdev->dev_type == HCI_AMP)
4863 type = AMP_LINK;
4864 else
4865 type = ACL_LINK;
4867 while (hdev->block_cnt > 0 &&
4868 (chan = hci_chan_sent(hdev, type, &quote))) {
4869 u32 priority = (skb_peek(&chan->data_q))->priority;
4870 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4871 int blocks;
4873 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4874 skb->len, skb->priority);
4876 /* Stop if priority has changed */
4877 if (skb->priority < priority)
4878 break;
4880 skb = skb_dequeue(&chan->data_q);
4882 blocks = __get_blocks(hdev, skb);
4883 if (blocks > hdev->block_cnt)
4884 return;
4886 hci_conn_enter_active_mode(chan->conn,
4887 bt_cb(skb)->force_active);
4889 hci_send_frame(hdev, skb);
4890 hdev->acl_last_tx = jiffies;
4892 hdev->block_cnt -= blocks;
4893 quote -= blocks;
4895 chan->sent += blocks;
4896 chan->conn->sent += blocks;
4900 if (cnt != hdev->block_cnt)
4901 hci_prio_recalculate(hdev, type);
4904 static void hci_sched_acl(struct hci_dev *hdev)
4906 BT_DBG("%s", hdev->name);
4908 /* No ACL link over BR/EDR controller */
4909 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4910 return;
4912 /* No AMP link over AMP controller */
4913 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4914 return;
4916 switch (hdev->flow_ctl_mode) {
4917 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4918 hci_sched_acl_pkt(hdev);
4919 break;
4921 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4922 hci_sched_acl_blk(hdev);
4923 break;
4927 /* Schedule SCO */
4928 static void hci_sched_sco(struct hci_dev *hdev)
4930 struct hci_conn *conn;
4931 struct sk_buff *skb;
4932 int quote;
4934 BT_DBG("%s", hdev->name);
4936 if (!hci_conn_num(hdev, SCO_LINK))
4937 return;
4939 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4940 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4941 BT_DBG("skb %p len %d", skb, skb->len);
4942 hci_send_frame(hdev, skb);
4944 conn->sent++;
4945 if (conn->sent == ~0)
4946 conn->sent = 0;
4951 static void hci_sched_esco(struct hci_dev *hdev)
4953 struct hci_conn *conn;
4954 struct sk_buff *skb;
4955 int quote;
4957 BT_DBG("%s", hdev->name);
4959 if (!hci_conn_num(hdev, ESCO_LINK))
4960 return;
4962 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4963 &quote))) {
4964 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4965 BT_DBG("skb %p len %d", skb, skb->len);
4966 hci_send_frame(hdev, skb);
4968 conn->sent++;
4969 if (conn->sent == ~0)
4970 conn->sent = 0;
4975 static void hci_sched_le(struct hci_dev *hdev)
4977 struct hci_chan *chan;
4978 struct sk_buff *skb;
4979 int quote, cnt, tmp;
4981 BT_DBG("%s", hdev->name);
4983 if (!hci_conn_num(hdev, LE_LINK))
4984 return;
4986 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4987 /* LE tx timeout must be longer than maximum
4988 * link supervision timeout (40.9 seconds) */
4989 if (!hdev->le_cnt && hdev->le_pkts &&
4990 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4991 hci_link_tx_to(hdev, LE_LINK);
4994 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4995 tmp = cnt;
4996 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4997 u32 priority = (skb_peek(&chan->data_q))->priority;
4998 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4999 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5000 skb->len, skb->priority);
5002 /* Stop if priority has changed */
5003 if (skb->priority < priority)
5004 break;
5006 skb = skb_dequeue(&chan->data_q);
5008 hci_send_frame(hdev, skb);
5009 hdev->le_last_tx = jiffies;
5011 cnt--;
5012 chan->sent++;
5013 chan->conn->sent++;
5017 if (hdev->le_pkts)
5018 hdev->le_cnt = cnt;
5019 else
5020 hdev->acl_cnt = cnt;
5022 if (cnt != tmp)
5023 hci_prio_recalculate(hdev, LE_LINK);
5026 static void hci_tx_work(struct work_struct *work)
5028 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5029 struct sk_buff *skb;
5031 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5032 hdev->sco_cnt, hdev->le_cnt);
5034 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5035 /* Schedule queues and send stuff to HCI driver */
5036 hci_sched_acl(hdev);
5037 hci_sched_sco(hdev);
5038 hci_sched_esco(hdev);
5039 hci_sched_le(hdev);
5042 /* Send next queued raw (unknown type) packet */
5043 while ((skb = skb_dequeue(&hdev->raw_q)))
5044 hci_send_frame(hdev, skb);
5047 /* ----- HCI RX task (incoming data processing) ----- */
5049 /* ACL data packet */
5050 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5052 struct hci_acl_hdr *hdr = (void *) skb->data;
5053 struct hci_conn *conn;
5054 __u16 handle, flags;
5056 skb_pull(skb, HCI_ACL_HDR_SIZE);
5058 handle = __le16_to_cpu(hdr->handle);
5059 flags = hci_flags(handle);
5060 handle = hci_handle(handle);
5062 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5063 handle, flags);
5065 hdev->stat.acl_rx++;
5067 hci_dev_lock(hdev);
5068 conn = hci_conn_hash_lookup_handle(hdev, handle);
5069 hci_dev_unlock(hdev);
5071 if (conn) {
5072 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5074 /* Send to upper protocol */
5075 l2cap_recv_acldata(conn, skb, flags);
5076 return;
5077 } else {
5078 BT_ERR("%s ACL packet for unknown connection handle %d",
5079 hdev->name, handle);
5082 kfree_skb(skb);
5085 /* SCO data packet */
5086 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5088 struct hci_sco_hdr *hdr = (void *) skb->data;
5089 struct hci_conn *conn;
5090 __u16 handle;
5092 skb_pull(skb, HCI_SCO_HDR_SIZE);
5094 handle = __le16_to_cpu(hdr->handle);
5096 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5098 hdev->stat.sco_rx++;
5100 hci_dev_lock(hdev);
5101 conn = hci_conn_hash_lookup_handle(hdev, handle);
5102 hci_dev_unlock(hdev);
5104 if (conn) {
5105 /* Send to upper protocol */
5106 sco_recv_scodata(conn, skb);
5107 return;
5108 } else {
5109 BT_ERR("%s SCO packet for unknown connection handle %d",
5110 hdev->name, handle);
5113 kfree_skb(skb);
5116 static bool hci_req_is_complete(struct hci_dev *hdev)
5118 struct sk_buff *skb;
5120 skb = skb_peek(&hdev->cmd_q);
5121 if (!skb)
5122 return true;
5124 return bt_cb(skb)->req.start;
5127 static void hci_resend_last(struct hci_dev *hdev)
5129 struct hci_command_hdr *sent;
5130 struct sk_buff *skb;
5131 u16 opcode;
5133 if (!hdev->sent_cmd)
5134 return;
5136 sent = (void *) hdev->sent_cmd->data;
5137 opcode = __le16_to_cpu(sent->opcode);
5138 if (opcode == HCI_OP_RESET)
5139 return;
5141 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5142 if (!skb)
5143 return;
5145 skb_queue_head(&hdev->cmd_q, skb);
5146 queue_work(hdev->workqueue, &hdev->cmd_work);
5149 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5151 hci_req_complete_t req_complete = NULL;
5152 struct sk_buff *skb;
5153 unsigned long flags;
5155 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5157 /* If the completed command doesn't match the last one that was
5158 * sent we need to do special handling of it.
5160 if (!hci_sent_cmd_data(hdev, opcode)) {
5161 /* Some CSR based controllers generate a spontaneous
5162 * reset complete event during init and any pending
5163 * command will never be completed. In such a case we
5164 * need to resend whatever was the last sent
5165 * command.
5167 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5168 hci_resend_last(hdev);
5170 return;
5173 /* If the command succeeded and there's still more commands in
5174 * this request the request is not yet complete.
5176 if (!status && !hci_req_is_complete(hdev))
5177 return;
5179 /* If this was the last command in a request the complete
5180 * callback would be found in hdev->sent_cmd instead of the
5181 * command queue (hdev->cmd_q).
5183 if (hdev->sent_cmd) {
5184 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5186 if (req_complete) {
5187 /* We must set the complete callback to NULL to
5188 * avoid calling the callback more than once if
5189 * this function gets called again.
5191 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5193 goto call_complete;
5197 /* Remove all pending commands belonging to this request */
5198 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5199 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5200 if (bt_cb(skb)->req.start) {
5201 __skb_queue_head(&hdev->cmd_q, skb);
5202 break;
5205 req_complete = bt_cb(skb)->req.complete;
5206 kfree_skb(skb);
5208 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5210 call_complete:
5211 if (req_complete)
5212 req_complete(hdev, status);
5215 static void hci_rx_work(struct work_struct *work)
5217 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5218 struct sk_buff *skb;
5220 BT_DBG("%s", hdev->name);
5222 while ((skb = skb_dequeue(&hdev->rx_q))) {
5223 /* Send copy to monitor */
5224 hci_send_to_monitor(hdev, skb);
5226 if (atomic_read(&hdev->promisc)) {
5227 /* Send copy to the sockets */
5228 hci_send_to_sock(hdev, skb);
5231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5232 kfree_skb(skb);
5233 continue;
5236 if (test_bit(HCI_INIT, &hdev->flags)) {
5237 /* Don't process data packets in this states. */
5238 switch (bt_cb(skb)->pkt_type) {
5239 case HCI_ACLDATA_PKT:
5240 case HCI_SCODATA_PKT:
5241 kfree_skb(skb);
5242 continue;
5246 /* Process frame */
5247 switch (bt_cb(skb)->pkt_type) {
5248 case HCI_EVENT_PKT:
5249 BT_DBG("%s Event packet", hdev->name);
5250 hci_event_packet(hdev, skb);
5251 break;
5253 case HCI_ACLDATA_PKT:
5254 BT_DBG("%s ACL data packet", hdev->name);
5255 hci_acldata_packet(hdev, skb);
5256 break;
5258 case HCI_SCODATA_PKT:
5259 BT_DBG("%s SCO data packet", hdev->name);
5260 hci_scodata_packet(hdev, skb);
5261 break;
5263 default:
5264 kfree_skb(skb);
5265 break;
5270 static void hci_cmd_work(struct work_struct *work)
5272 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5273 struct sk_buff *skb;
5275 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5276 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5278 /* Send queued commands */
5279 if (atomic_read(&hdev->cmd_cnt)) {
5280 skb = skb_dequeue(&hdev->cmd_q);
5281 if (!skb)
5282 return;
5284 kfree_skb(hdev->sent_cmd);
5286 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5287 if (hdev->sent_cmd) {
5288 atomic_dec(&hdev->cmd_cnt);
5289 hci_send_frame(hdev, skb);
5290 if (test_bit(HCI_RESET, &hdev->flags))
5291 cancel_delayed_work(&hdev->cmd_timer);
5292 else
5293 schedule_delayed_work(&hdev->cmd_timer,
5294 HCI_CMD_TIMEOUT);
5295 } else {
5296 skb_queue_head(&hdev->cmd_q, skb);
5297 queue_work(hdev->workqueue, &hdev->cmd_work);
5302 void hci_req_add_le_scan_disable(struct hci_request *req)
5304 struct hci_cp_le_set_scan_enable cp;
5306 memset(&cp, 0, sizeof(cp));
5307 cp.enable = LE_SCAN_DISABLE;
5308 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5311 void hci_req_add_le_passive_scan(struct hci_request *req)
5313 struct hci_cp_le_set_scan_param param_cp;
5314 struct hci_cp_le_set_scan_enable enable_cp;
5315 struct hci_dev *hdev = req->hdev;
5316 u8 own_addr_type;
5318 /* Set require_privacy to false since no SCAN_REQ are send
5319 * during passive scanning. Not using an unresolvable address
5320 * here is important so that peer devices using direct
5321 * advertising with our address will be correctly reported
5322 * by the controller.
5324 if (hci_update_random_address(req, false, &own_addr_type))
5325 return;
5327 memset(&param_cp, 0, sizeof(param_cp));
5328 param_cp.type = LE_SCAN_PASSIVE;
5329 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5330 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5331 param_cp.own_address_type = own_addr_type;
5332 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5333 &param_cp);
5335 memset(&enable_cp, 0, sizeof(enable_cp));
5336 enable_cp.enable = LE_SCAN_ENABLE;
5337 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5338 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5339 &enable_cp);
5342 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5344 if (status)
5345 BT_DBG("HCI request failed to update background scanning: "
5346 "status 0x%2.2x", status);
5349 /* This function controls the background scanning based on hdev->pend_le_conns
5350 * list. If there are pending LE connection we start the background scanning,
5351 * otherwise we stop it.
5353 * This function requires the caller holds hdev->lock.
5355 void hci_update_background_scan(struct hci_dev *hdev)
5357 struct hci_request req;
5358 struct hci_conn *conn;
5359 int err;
5361 if (!test_bit(HCI_UP, &hdev->flags) ||
5362 test_bit(HCI_INIT, &hdev->flags) ||
5363 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5364 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5365 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5366 return;
5368 hci_req_init(&req, hdev);
5370 if (list_empty(&hdev->pend_le_conns)) {
5371 /* If there is no pending LE connections, we should stop
5372 * the background scanning.
5375 /* If controller is not scanning we are done. */
5376 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5377 return;
5379 hci_req_add_le_scan_disable(&req);
5381 BT_DBG("%s stopping background scanning", hdev->name);
5382 } else {
5383 /* If there is at least one pending LE connection, we should
5384 * keep the background scan running.
5387 /* If controller is connecting, we should not start scanning
5388 * since some controllers are not able to scan and connect at
5389 * the same time.
5391 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5392 if (conn)
5393 return;
5395 /* If controller is currently scanning, we stop it to ensure we
5396 * don't miss any advertising (due to duplicates filter).
5398 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5399 hci_req_add_le_scan_disable(&req);
5401 hci_req_add_le_passive_scan(&req);
5403 BT_DBG("%s starting background scanning", hdev->name);
5406 err = hci_req_run(&req, update_background_scan_complete);
5407 if (err)
5408 BT_ERR("Failed to run HCI request: err %d", err);