PCI: spear: Remove spear13xx_pcie_remove()
[linux-2.6/btrfs-unstable.git] / net / bluetooth / hci_core.c
blob0a43cce9a914b84613c7ee2d6fc30fdfdb2a0bc5
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
39 #include "smp.h"
41 static void hci_rx_work(struct work_struct *work);
42 static void hci_cmd_work(struct work_struct *work);
43 static void hci_tx_work(struct work_struct *work);
45 /* HCI device list */
46 LIST_HEAD(hci_dev_list);
47 DEFINE_RWLOCK(hci_dev_list_lock);
49 /* HCI callback list */
50 LIST_HEAD(hci_cb_list);
51 DEFINE_RWLOCK(hci_cb_list_lock);
53 /* HCI ID Numbering */
54 static DEFINE_IDA(hci_index_ida);
56 /* ---- HCI notifications ---- */
58 static void hci_notify(struct hci_dev *hdev, int event)
60 hci_sock_dev_event(hdev, event);
63 /* ---- HCI debugfs entries ---- */
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
71 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
72 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
97 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
98 return -EALREADY;
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
115 if (err < 0)
116 return err;
118 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
120 return count;
123 static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
130 static int features_show(struct seq_file *f, void *ptr)
132 struct hci_dev *hdev = f->private;
133 u8 p;
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
151 hci_dev_unlock(hdev);
153 return 0;
156 static int features_open(struct inode *inode, struct file *file)
158 return single_open(file, features_show, inode->i_private);
161 static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
168 static int blacklist_show(struct seq_file *f, void *p)
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
176 hci_dev_unlock(hdev);
178 return 0;
181 static int blacklist_open(struct inode *inode, struct file *file)
183 return single_open(file, blacklist_show, inode->i_private);
186 static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
193 static int uuids_show(struct seq_file *f, void *p)
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
200 u8 i, val[16];
202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
209 seq_printf(f, "%pUb\n", val);
211 hci_dev_unlock(hdev);
213 return 0;
216 static int uuids_open(struct inode *inode, struct file *file)
218 return single_open(file, uuids_show, inode->i_private);
221 static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
228 static int inquiry_cache_show(struct seq_file *f, void *p)
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
234 hci_dev_lock(hdev);
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
247 hci_dev_unlock(hdev);
249 return 0;
252 static int inquiry_cache_open(struct inode *inode, struct file *file)
254 return single_open(file, inquiry_cache_show, inode->i_private);
257 static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
264 static int link_keys_show(struct seq_file *f, void *ptr)
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 hci_dev_unlock(hdev);
277 return 0;
280 static int link_keys_open(struct inode *inode, struct file *file)
282 return single_open(file, link_keys_show, inode->i_private);
285 static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
292 static int dev_class_show(struct seq_file *f, void *ptr)
294 struct hci_dev *hdev = f->private;
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
301 return 0;
304 static int dev_class_open(struct inode *inode, struct file *file)
306 return single_open(file, dev_class_show, inode->i_private);
309 static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
316 static int voice_setting_get(void *data, u64 *val)
318 struct hci_dev *hdev = data;
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
324 return 0;
327 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
330 static int auto_accept_delay_set(void *data, u64 val)
332 struct hci_dev *hdev = data;
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
338 return 0;
341 static int auto_accept_delay_get(void *data, u64 *val)
343 struct hci_dev *hdev = data;
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
349 return 0;
352 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
355 static int ssp_debug_mode_set(void *data, u64 val)
357 struct hci_dev *hdev = data;
358 struct sk_buff *skb;
359 __u8 mode;
360 int err;
362 if (val != 0 && val != 1)
363 return -EINVAL;
365 if (!test_bit(HCI_UP, &hdev->flags))
366 return -ENETDOWN;
368 hci_req_lock(hdev);
369 mode = val;
370 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
371 &mode, HCI_CMD_TIMEOUT);
372 hci_req_unlock(hdev);
374 if (IS_ERR(skb))
375 return PTR_ERR(skb);
377 err = -bt_to_errno(skb->data[0]);
378 kfree_skb(skb);
380 if (err < 0)
381 return err;
383 hci_dev_lock(hdev);
384 hdev->ssp_debug_mode = val;
385 hci_dev_unlock(hdev);
387 return 0;
390 static int ssp_debug_mode_get(void *data, u64 *val)
392 struct hci_dev *hdev = data;
394 hci_dev_lock(hdev);
395 *val = hdev->ssp_debug_mode;
396 hci_dev_unlock(hdev);
398 return 0;
401 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
402 ssp_debug_mode_set, "%llu\n");
404 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
405 size_t count, loff_t *ppos)
407 struct hci_dev *hdev = file->private_data;
408 char buf[3];
410 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
411 buf[1] = '\n';
412 buf[2] = '\0';
413 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
416 static ssize_t force_sc_support_write(struct file *file,
417 const char __user *user_buf,
418 size_t count, loff_t *ppos)
420 struct hci_dev *hdev = file->private_data;
421 char buf[32];
422 size_t buf_size = min(count, (sizeof(buf)-1));
423 bool enable;
425 if (test_bit(HCI_UP, &hdev->flags))
426 return -EBUSY;
428 if (copy_from_user(buf, user_buf, buf_size))
429 return -EFAULT;
431 buf[buf_size] = '\0';
432 if (strtobool(buf, &enable))
433 return -EINVAL;
435 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
436 return -EALREADY;
438 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
440 return count;
443 static const struct file_operations force_sc_support_fops = {
444 .open = simple_open,
445 .read = force_sc_support_read,
446 .write = force_sc_support_write,
447 .llseek = default_llseek,
450 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
451 size_t count, loff_t *ppos)
453 struct hci_dev *hdev = file->private_data;
454 char buf[3];
456 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
457 buf[1] = '\n';
458 buf[2] = '\0';
459 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
462 static const struct file_operations sc_only_mode_fops = {
463 .open = simple_open,
464 .read = sc_only_mode_read,
465 .llseek = default_llseek,
468 static int idle_timeout_set(void *data, u64 val)
470 struct hci_dev *hdev = data;
472 if (val != 0 && (val < 500 || val > 3600000))
473 return -EINVAL;
475 hci_dev_lock(hdev);
476 hdev->idle_timeout = val;
477 hci_dev_unlock(hdev);
479 return 0;
482 static int idle_timeout_get(void *data, u64 *val)
484 struct hci_dev *hdev = data;
486 hci_dev_lock(hdev);
487 *val = hdev->idle_timeout;
488 hci_dev_unlock(hdev);
490 return 0;
493 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
494 idle_timeout_set, "%llu\n");
496 static int rpa_timeout_set(void *data, u64 val)
498 struct hci_dev *hdev = data;
500 /* Require the RPA timeout to be at least 30 seconds and at most
501 * 24 hours.
503 if (val < 30 || val > (60 * 60 * 24))
504 return -EINVAL;
506 hci_dev_lock(hdev);
507 hdev->rpa_timeout = val;
508 hci_dev_unlock(hdev);
510 return 0;
513 static int rpa_timeout_get(void *data, u64 *val)
515 struct hci_dev *hdev = data;
517 hci_dev_lock(hdev);
518 *val = hdev->rpa_timeout;
519 hci_dev_unlock(hdev);
521 return 0;
524 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
525 rpa_timeout_set, "%llu\n");
527 static int sniff_min_interval_set(void *data, u64 val)
529 struct hci_dev *hdev = data;
531 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
532 return -EINVAL;
534 hci_dev_lock(hdev);
535 hdev->sniff_min_interval = val;
536 hci_dev_unlock(hdev);
538 return 0;
541 static int sniff_min_interval_get(void *data, u64 *val)
543 struct hci_dev *hdev = data;
545 hci_dev_lock(hdev);
546 *val = hdev->sniff_min_interval;
547 hci_dev_unlock(hdev);
549 return 0;
552 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
553 sniff_min_interval_set, "%llu\n");
555 static int sniff_max_interval_set(void *data, u64 val)
557 struct hci_dev *hdev = data;
559 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
560 return -EINVAL;
562 hci_dev_lock(hdev);
563 hdev->sniff_max_interval = val;
564 hci_dev_unlock(hdev);
566 return 0;
569 static int sniff_max_interval_get(void *data, u64 *val)
571 struct hci_dev *hdev = data;
573 hci_dev_lock(hdev);
574 *val = hdev->sniff_max_interval;
575 hci_dev_unlock(hdev);
577 return 0;
580 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
581 sniff_max_interval_set, "%llu\n");
583 static int conn_info_min_age_set(void *data, u64 val)
585 struct hci_dev *hdev = data;
587 if (val == 0 || val > hdev->conn_info_max_age)
588 return -EINVAL;
590 hci_dev_lock(hdev);
591 hdev->conn_info_min_age = val;
592 hci_dev_unlock(hdev);
594 return 0;
597 static int conn_info_min_age_get(void *data, u64 *val)
599 struct hci_dev *hdev = data;
601 hci_dev_lock(hdev);
602 *val = hdev->conn_info_min_age;
603 hci_dev_unlock(hdev);
605 return 0;
608 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
609 conn_info_min_age_set, "%llu\n");
611 static int conn_info_max_age_set(void *data, u64 val)
613 struct hci_dev *hdev = data;
615 if (val == 0 || val < hdev->conn_info_min_age)
616 return -EINVAL;
618 hci_dev_lock(hdev);
619 hdev->conn_info_max_age = val;
620 hci_dev_unlock(hdev);
622 return 0;
625 static int conn_info_max_age_get(void *data, u64 *val)
627 struct hci_dev *hdev = data;
629 hci_dev_lock(hdev);
630 *val = hdev->conn_info_max_age;
631 hci_dev_unlock(hdev);
633 return 0;
636 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
637 conn_info_max_age_set, "%llu\n");
639 static int identity_show(struct seq_file *f, void *p)
641 struct hci_dev *hdev = f->private;
642 bdaddr_t addr;
643 u8 addr_type;
645 hci_dev_lock(hdev);
647 hci_copy_identity_address(hdev, &addr, &addr_type);
649 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
650 16, hdev->irk, &hdev->rpa);
652 hci_dev_unlock(hdev);
654 return 0;
657 static int identity_open(struct inode *inode, struct file *file)
659 return single_open(file, identity_show, inode->i_private);
662 static const struct file_operations identity_fops = {
663 .open = identity_open,
664 .read = seq_read,
665 .llseek = seq_lseek,
666 .release = single_release,
669 static int random_address_show(struct seq_file *f, void *p)
671 struct hci_dev *hdev = f->private;
673 hci_dev_lock(hdev);
674 seq_printf(f, "%pMR\n", &hdev->random_addr);
675 hci_dev_unlock(hdev);
677 return 0;
680 static int random_address_open(struct inode *inode, struct file *file)
682 return single_open(file, random_address_show, inode->i_private);
685 static const struct file_operations random_address_fops = {
686 .open = random_address_open,
687 .read = seq_read,
688 .llseek = seq_lseek,
689 .release = single_release,
692 static int static_address_show(struct seq_file *f, void *p)
694 struct hci_dev *hdev = f->private;
696 hci_dev_lock(hdev);
697 seq_printf(f, "%pMR\n", &hdev->static_addr);
698 hci_dev_unlock(hdev);
700 return 0;
703 static int static_address_open(struct inode *inode, struct file *file)
705 return single_open(file, static_address_show, inode->i_private);
708 static const struct file_operations static_address_fops = {
709 .open = static_address_open,
710 .read = seq_read,
711 .llseek = seq_lseek,
712 .release = single_release,
715 static ssize_t force_static_address_read(struct file *file,
716 char __user *user_buf,
717 size_t count, loff_t *ppos)
719 struct hci_dev *hdev = file->private_data;
720 char buf[3];
722 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
723 buf[1] = '\n';
724 buf[2] = '\0';
725 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
728 static ssize_t force_static_address_write(struct file *file,
729 const char __user *user_buf,
730 size_t count, loff_t *ppos)
732 struct hci_dev *hdev = file->private_data;
733 char buf[32];
734 size_t buf_size = min(count, (sizeof(buf)-1));
735 bool enable;
737 if (test_bit(HCI_UP, &hdev->flags))
738 return -EBUSY;
740 if (copy_from_user(buf, user_buf, buf_size))
741 return -EFAULT;
743 buf[buf_size] = '\0';
744 if (strtobool(buf, &enable))
745 return -EINVAL;
747 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
748 return -EALREADY;
750 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
752 return count;
755 static const struct file_operations force_static_address_fops = {
756 .open = simple_open,
757 .read = force_static_address_read,
758 .write = force_static_address_write,
759 .llseek = default_llseek,
762 static int white_list_show(struct seq_file *f, void *ptr)
764 struct hci_dev *hdev = f->private;
765 struct bdaddr_list *b;
767 hci_dev_lock(hdev);
768 list_for_each_entry(b, &hdev->le_white_list, list)
769 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
770 hci_dev_unlock(hdev);
772 return 0;
775 static int white_list_open(struct inode *inode, struct file *file)
777 return single_open(file, white_list_show, inode->i_private);
780 static const struct file_operations white_list_fops = {
781 .open = white_list_open,
782 .read = seq_read,
783 .llseek = seq_lseek,
784 .release = single_release,
787 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
789 struct hci_dev *hdev = f->private;
790 struct list_head *p, *n;
792 hci_dev_lock(hdev);
793 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
794 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
795 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
796 &irk->bdaddr, irk->addr_type,
797 16, irk->val, &irk->rpa);
799 hci_dev_unlock(hdev);
801 return 0;
804 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
806 return single_open(file, identity_resolving_keys_show,
807 inode->i_private);
810 static const struct file_operations identity_resolving_keys_fops = {
811 .open = identity_resolving_keys_open,
812 .read = seq_read,
813 .llseek = seq_lseek,
814 .release = single_release,
817 static int long_term_keys_show(struct seq_file *f, void *ptr)
819 struct hci_dev *hdev = f->private;
820 struct list_head *p, *n;
822 hci_dev_lock(hdev);
823 list_for_each_safe(p, n, &hdev->long_term_keys) {
824 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
828 __le64_to_cpu(ltk->rand), 16, ltk->val);
830 hci_dev_unlock(hdev);
832 return 0;
835 static int long_term_keys_open(struct inode *inode, struct file *file)
837 return single_open(file, long_term_keys_show, inode->i_private);
840 static const struct file_operations long_term_keys_fops = {
841 .open = long_term_keys_open,
842 .read = seq_read,
843 .llseek = seq_lseek,
844 .release = single_release,
847 static int conn_min_interval_set(void *data, u64 val)
849 struct hci_dev *hdev = data;
851 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
852 return -EINVAL;
854 hci_dev_lock(hdev);
855 hdev->le_conn_min_interval = val;
856 hci_dev_unlock(hdev);
858 return 0;
861 static int conn_min_interval_get(void *data, u64 *val)
863 struct hci_dev *hdev = data;
865 hci_dev_lock(hdev);
866 *val = hdev->le_conn_min_interval;
867 hci_dev_unlock(hdev);
869 return 0;
872 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
873 conn_min_interval_set, "%llu\n");
875 static int conn_max_interval_set(void *data, u64 val)
877 struct hci_dev *hdev = data;
879 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
880 return -EINVAL;
882 hci_dev_lock(hdev);
883 hdev->le_conn_max_interval = val;
884 hci_dev_unlock(hdev);
886 return 0;
889 static int conn_max_interval_get(void *data, u64 *val)
891 struct hci_dev *hdev = data;
893 hci_dev_lock(hdev);
894 *val = hdev->le_conn_max_interval;
895 hci_dev_unlock(hdev);
897 return 0;
900 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
901 conn_max_interval_set, "%llu\n");
903 static int adv_channel_map_set(void *data, u64 val)
905 struct hci_dev *hdev = data;
907 if (val < 0x01 || val > 0x07)
908 return -EINVAL;
910 hci_dev_lock(hdev);
911 hdev->le_adv_channel_map = val;
912 hci_dev_unlock(hdev);
914 return 0;
917 static int adv_channel_map_get(void *data, u64 *val)
919 struct hci_dev *hdev = data;
921 hci_dev_lock(hdev);
922 *val = hdev->le_adv_channel_map;
923 hci_dev_unlock(hdev);
925 return 0;
928 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
929 adv_channel_map_set, "%llu\n");
931 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
932 size_t count, loff_t *ppos)
934 struct hci_dev *hdev = file->private_data;
935 char buf[3];
937 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
938 buf[1] = '\n';
939 buf[2] = '\0';
940 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
943 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
944 size_t count, loff_t *position)
946 struct hci_dev *hdev = fp->private_data;
947 bool enable;
948 char buf[32];
949 size_t buf_size = min(count, (sizeof(buf)-1));
951 if (copy_from_user(buf, user_buffer, buf_size))
952 return -EFAULT;
954 buf[buf_size] = '\0';
956 if (strtobool(buf, &enable) < 0)
957 return -EINVAL;
959 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
960 return -EALREADY;
962 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
964 return count;
967 static const struct file_operations lowpan_debugfs_fops = {
968 .open = simple_open,
969 .read = lowpan_read,
970 .write = lowpan_write,
971 .llseek = default_llseek,
974 static int le_auto_conn_show(struct seq_file *sf, void *ptr)
976 struct hci_dev *hdev = sf->private;
977 struct hci_conn_params *p;
979 hci_dev_lock(hdev);
981 list_for_each_entry(p, &hdev->le_conn_params, list) {
982 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
983 p->auto_connect);
986 hci_dev_unlock(hdev);
988 return 0;
991 static int le_auto_conn_open(struct inode *inode, struct file *file)
993 return single_open(file, le_auto_conn_show, inode->i_private);
996 static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
997 size_t count, loff_t *offset)
999 struct seq_file *sf = file->private_data;
1000 struct hci_dev *hdev = sf->private;
1001 u8 auto_connect = 0;
1002 bdaddr_t addr;
1003 u8 addr_type;
1004 char *buf;
1005 int err = 0;
1006 int n;
1008 /* Don't allow partial write */
1009 if (*offset != 0)
1010 return -EINVAL;
1012 if (count < 3)
1013 return -EINVAL;
1015 buf = memdup_user(data, count);
1016 if (IS_ERR(buf))
1017 return PTR_ERR(buf);
1019 if (memcmp(buf, "add", 3) == 0) {
1020 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
1021 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1022 &addr.b[1], &addr.b[0], &addr_type,
1023 &auto_connect);
1025 if (n < 7) {
1026 err = -EINVAL;
1027 goto done;
1030 hci_dev_lock(hdev);
1031 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
1032 hdev->le_conn_min_interval,
1033 hdev->le_conn_max_interval);
1034 hci_dev_unlock(hdev);
1036 if (err)
1037 goto done;
1038 } else if (memcmp(buf, "del", 3) == 0) {
1039 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
1040 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1041 &addr.b[1], &addr.b[0], &addr_type);
1043 if (n < 7) {
1044 err = -EINVAL;
1045 goto done;
1048 hci_dev_lock(hdev);
1049 hci_conn_params_del(hdev, &addr, addr_type);
1050 hci_dev_unlock(hdev);
1051 } else if (memcmp(buf, "clr", 3) == 0) {
1052 hci_dev_lock(hdev);
1053 hci_conn_params_clear(hdev);
1054 hci_pend_le_conns_clear(hdev);
1055 hci_update_background_scan(hdev);
1056 hci_dev_unlock(hdev);
1057 } else {
1058 err = -EINVAL;
1061 done:
1062 kfree(buf);
1064 if (err)
1065 return err;
1066 else
1067 return count;
1070 static const struct file_operations le_auto_conn_fops = {
1071 .open = le_auto_conn_open,
1072 .read = seq_read,
1073 .write = le_auto_conn_write,
1074 .llseek = seq_lseek,
1075 .release = single_release,
1078 /* ---- HCI requests ---- */
1080 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1082 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1084 if (hdev->req_status == HCI_REQ_PEND) {
1085 hdev->req_result = result;
1086 hdev->req_status = HCI_REQ_DONE;
1087 wake_up_interruptible(&hdev->req_wait_q);
1091 static void hci_req_cancel(struct hci_dev *hdev, int err)
1093 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1095 if (hdev->req_status == HCI_REQ_PEND) {
1096 hdev->req_result = err;
1097 hdev->req_status = HCI_REQ_CANCELED;
1098 wake_up_interruptible(&hdev->req_wait_q);
1102 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1103 u8 event)
1105 struct hci_ev_cmd_complete *ev;
1106 struct hci_event_hdr *hdr;
1107 struct sk_buff *skb;
1109 hci_dev_lock(hdev);
1111 skb = hdev->recv_evt;
1112 hdev->recv_evt = NULL;
1114 hci_dev_unlock(hdev);
1116 if (!skb)
1117 return ERR_PTR(-ENODATA);
1119 if (skb->len < sizeof(*hdr)) {
1120 BT_ERR("Too short HCI event");
1121 goto failed;
1124 hdr = (void *) skb->data;
1125 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1127 if (event) {
1128 if (hdr->evt != event)
1129 goto failed;
1130 return skb;
1133 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1134 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1135 goto failed;
1138 if (skb->len < sizeof(*ev)) {
1139 BT_ERR("Too short cmd_complete event");
1140 goto failed;
1143 ev = (void *) skb->data;
1144 skb_pull(skb, sizeof(*ev));
1146 if (opcode == __le16_to_cpu(ev->opcode))
1147 return skb;
1149 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1150 __le16_to_cpu(ev->opcode));
1152 failed:
1153 kfree_skb(skb);
1154 return ERR_PTR(-ENODATA);
1157 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1158 const void *param, u8 event, u32 timeout)
1160 DECLARE_WAITQUEUE(wait, current);
1161 struct hci_request req;
1162 int err = 0;
1164 BT_DBG("%s", hdev->name);
1166 hci_req_init(&req, hdev);
1168 hci_req_add_ev(&req, opcode, plen, param, event);
1170 hdev->req_status = HCI_REQ_PEND;
1172 err = hci_req_run(&req, hci_req_sync_complete);
1173 if (err < 0)
1174 return ERR_PTR(err);
1176 add_wait_queue(&hdev->req_wait_q, &wait);
1177 set_current_state(TASK_INTERRUPTIBLE);
1179 schedule_timeout(timeout);
1181 remove_wait_queue(&hdev->req_wait_q, &wait);
1183 if (signal_pending(current))
1184 return ERR_PTR(-EINTR);
1186 switch (hdev->req_status) {
1187 case HCI_REQ_DONE:
1188 err = -bt_to_errno(hdev->req_result);
1189 break;
1191 case HCI_REQ_CANCELED:
1192 err = -hdev->req_result;
1193 break;
1195 default:
1196 err = -ETIMEDOUT;
1197 break;
1200 hdev->req_status = hdev->req_result = 0;
1202 BT_DBG("%s end: err %d", hdev->name, err);
1204 if (err < 0)
1205 return ERR_PTR(err);
1207 return hci_get_cmd_complete(hdev, opcode, event);
1209 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1211 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1212 const void *param, u32 timeout)
1214 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1216 EXPORT_SYMBOL(__hci_cmd_sync);
1218 /* Execute request and wait for completion. */
1219 static int __hci_req_sync(struct hci_dev *hdev,
1220 void (*func)(struct hci_request *req,
1221 unsigned long opt),
1222 unsigned long opt, __u32 timeout)
1224 struct hci_request req;
1225 DECLARE_WAITQUEUE(wait, current);
1226 int err = 0;
1228 BT_DBG("%s start", hdev->name);
1230 hci_req_init(&req, hdev);
1232 hdev->req_status = HCI_REQ_PEND;
1234 func(&req, opt);
1236 err = hci_req_run(&req, hci_req_sync_complete);
1237 if (err < 0) {
1238 hdev->req_status = 0;
1240 /* ENODATA means the HCI request command queue is empty.
1241 * This can happen when a request with conditionals doesn't
1242 * trigger any commands to be sent. This is normal behavior
1243 * and should not trigger an error return.
1245 if (err == -ENODATA)
1246 return 0;
1248 return err;
1251 add_wait_queue(&hdev->req_wait_q, &wait);
1252 set_current_state(TASK_INTERRUPTIBLE);
1254 schedule_timeout(timeout);
1256 remove_wait_queue(&hdev->req_wait_q, &wait);
1258 if (signal_pending(current))
1259 return -EINTR;
1261 switch (hdev->req_status) {
1262 case HCI_REQ_DONE:
1263 err = -bt_to_errno(hdev->req_result);
1264 break;
1266 case HCI_REQ_CANCELED:
1267 err = -hdev->req_result;
1268 break;
1270 default:
1271 err = -ETIMEDOUT;
1272 break;
1275 hdev->req_status = hdev->req_result = 0;
1277 BT_DBG("%s end: err %d", hdev->name, err);
1279 return err;
1282 static int hci_req_sync(struct hci_dev *hdev,
1283 void (*req)(struct hci_request *req,
1284 unsigned long opt),
1285 unsigned long opt, __u32 timeout)
1287 int ret;
1289 if (!test_bit(HCI_UP, &hdev->flags))
1290 return -ENETDOWN;
1292 /* Serialize all requests */
1293 hci_req_lock(hdev);
1294 ret = __hci_req_sync(hdev, req, opt, timeout);
1295 hci_req_unlock(hdev);
1297 return ret;
1300 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1302 BT_DBG("%s %ld", req->hdev->name, opt);
1304 /* Reset device */
1305 set_bit(HCI_RESET, &req->hdev->flags);
1306 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1309 static void bredr_init(struct hci_request *req)
1311 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1313 /* Read Local Supported Features */
1314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1316 /* Read Local Version */
1317 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1319 /* Read BD Address */
1320 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1323 static void amp_init(struct hci_request *req)
1325 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1327 /* Read Local Version */
1328 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1330 /* Read Local Supported Commands */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1333 /* Read Local Supported Features */
1334 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1336 /* Read Local AMP Info */
1337 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1339 /* Read Data Blk size */
1340 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1342 /* Read Flow Control Mode */
1343 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1345 /* Read Location Data */
1346 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1349 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1351 struct hci_dev *hdev = req->hdev;
1353 BT_DBG("%s %ld", hdev->name, opt);
1355 /* Reset */
1356 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1357 hci_reset_req(req, 0);
1359 switch (hdev->dev_type) {
1360 case HCI_BREDR:
1361 bredr_init(req);
1362 break;
1364 case HCI_AMP:
1365 amp_init(req);
1366 break;
1368 default:
1369 BT_ERR("Unknown device type %d", hdev->dev_type);
1370 break;
1374 static void bredr_setup(struct hci_request *req)
1376 struct hci_dev *hdev = req->hdev;
1378 __le16 param;
1379 __u8 flt_type;
1381 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1382 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1384 /* Read Class of Device */
1385 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1387 /* Read Local Name */
1388 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1390 /* Read Voice Setting */
1391 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1393 /* Read Number of Supported IAC */
1394 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1396 /* Read Current IAC LAP */
1397 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1399 /* Clear Event Filters */
1400 flt_type = HCI_FLT_CLEAR_ALL;
1401 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1403 /* Connection accept timeout ~20 secs */
1404 param = cpu_to_le16(0x7d00);
1405 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1407 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408 * but it does not support page scan related HCI commands.
1410 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1411 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1416 static void le_setup(struct hci_request *req)
1418 struct hci_dev *hdev = req->hdev;
1420 /* Read LE Buffer Size */
1421 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1423 /* Read LE Local Supported Features */
1424 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1426 /* Read LE Supported States */
1427 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1429 /* Read LE Advertising Channel TX Power */
1430 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1432 /* Read LE White List Size */
1433 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1435 /* Clear LE White List */
1436 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1438 /* LE-only controllers have LE implicitly enabled */
1439 if (!lmp_bredr_capable(hdev))
1440 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1443 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1445 if (lmp_ext_inq_capable(hdev))
1446 return 0x02;
1448 if (lmp_inq_rssi_capable(hdev))
1449 return 0x01;
1451 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1452 hdev->lmp_subver == 0x0757)
1453 return 0x01;
1455 if (hdev->manufacturer == 15) {
1456 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1457 return 0x01;
1458 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1459 return 0x01;
1460 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1461 return 0x01;
1464 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1465 hdev->lmp_subver == 0x1805)
1466 return 0x01;
1468 return 0x00;
1471 static void hci_setup_inquiry_mode(struct hci_request *req)
1473 u8 mode;
1475 mode = hci_get_inquiry_mode(req->hdev);
1477 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1480 static void hci_setup_event_mask(struct hci_request *req)
1482 struct hci_dev *hdev = req->hdev;
1484 /* The second byte is 0xff instead of 0x9f (two reserved bits
1485 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1486 * command otherwise.
1488 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1490 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1491 * any event mask for pre 1.2 devices.
1493 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1494 return;
1496 if (lmp_bredr_capable(hdev)) {
1497 events[4] |= 0x01; /* Flow Specification Complete */
1498 events[4] |= 0x02; /* Inquiry Result with RSSI */
1499 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1500 events[5] |= 0x08; /* Synchronous Connection Complete */
1501 events[5] |= 0x10; /* Synchronous Connection Changed */
1502 } else {
1503 /* Use a different default for LE-only devices */
1504 memset(events, 0, sizeof(events));
1505 events[0] |= 0x10; /* Disconnection Complete */
1506 events[0] |= 0x80; /* Encryption Change */
1507 events[1] |= 0x08; /* Read Remote Version Information Complete */
1508 events[1] |= 0x20; /* Command Complete */
1509 events[1] |= 0x40; /* Command Status */
1510 events[1] |= 0x80; /* Hardware Error */
1511 events[2] |= 0x04; /* Number of Completed Packets */
1512 events[3] |= 0x02; /* Data Buffer Overflow */
1513 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1516 if (lmp_inq_rssi_capable(hdev))
1517 events[4] |= 0x02; /* Inquiry Result with RSSI */
1519 if (lmp_sniffsubr_capable(hdev))
1520 events[5] |= 0x20; /* Sniff Subrating */
1522 if (lmp_pause_enc_capable(hdev))
1523 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1525 if (lmp_ext_inq_capable(hdev))
1526 events[5] |= 0x40; /* Extended Inquiry Result */
1528 if (lmp_no_flush_capable(hdev))
1529 events[7] |= 0x01; /* Enhanced Flush Complete */
1531 if (lmp_lsto_capable(hdev))
1532 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1534 if (lmp_ssp_capable(hdev)) {
1535 events[6] |= 0x01; /* IO Capability Request */
1536 events[6] |= 0x02; /* IO Capability Response */
1537 events[6] |= 0x04; /* User Confirmation Request */
1538 events[6] |= 0x08; /* User Passkey Request */
1539 events[6] |= 0x10; /* Remote OOB Data Request */
1540 events[6] |= 0x20; /* Simple Pairing Complete */
1541 events[7] |= 0x04; /* User Passkey Notification */
1542 events[7] |= 0x08; /* Keypress Notification */
1543 events[7] |= 0x10; /* Remote Host Supported
1544 * Features Notification
1548 if (lmp_le_capable(hdev))
1549 events[7] |= 0x20; /* LE Meta-Event */
1551 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1553 if (lmp_le_capable(hdev)) {
1554 memset(events, 0, sizeof(events));
1555 events[0] = 0x1f;
1556 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1557 sizeof(events), events);
1561 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1563 struct hci_dev *hdev = req->hdev;
1565 if (lmp_bredr_capable(hdev))
1566 bredr_setup(req);
1567 else
1568 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1570 if (lmp_le_capable(hdev))
1571 le_setup(req);
1573 hci_setup_event_mask(req);
1575 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1576 * local supported commands HCI command.
1578 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1579 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1581 if (lmp_ssp_capable(hdev)) {
1582 /* When SSP is available, then the host features page
1583 * should also be available as well. However some
1584 * controllers list the max_page as 0 as long as SSP
1585 * has not been enabled. To achieve proper debugging
1586 * output, force the minimum max_page to 1 at least.
1588 hdev->max_page = 0x01;
1590 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1591 u8 mode = 0x01;
1592 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1593 sizeof(mode), &mode);
1594 } else {
1595 struct hci_cp_write_eir cp;
1597 memset(hdev->eir, 0, sizeof(hdev->eir));
1598 memset(&cp, 0, sizeof(cp));
1600 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1604 if (lmp_inq_rssi_capable(hdev))
1605 hci_setup_inquiry_mode(req);
1607 if (lmp_inq_tx_pwr_capable(hdev))
1608 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1610 if (lmp_ext_feat_capable(hdev)) {
1611 struct hci_cp_read_local_ext_features cp;
1613 cp.page = 0x01;
1614 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1615 sizeof(cp), &cp);
1618 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1619 u8 enable = 1;
1620 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1621 &enable);
1625 static void hci_setup_link_policy(struct hci_request *req)
1627 struct hci_dev *hdev = req->hdev;
1628 struct hci_cp_write_def_link_policy cp;
1629 u16 link_policy = 0;
1631 if (lmp_rswitch_capable(hdev))
1632 link_policy |= HCI_LP_RSWITCH;
1633 if (lmp_hold_capable(hdev))
1634 link_policy |= HCI_LP_HOLD;
1635 if (lmp_sniff_capable(hdev))
1636 link_policy |= HCI_LP_SNIFF;
1637 if (lmp_park_capable(hdev))
1638 link_policy |= HCI_LP_PARK;
1640 cp.policy = cpu_to_le16(link_policy);
1641 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1644 static void hci_set_le_support(struct hci_request *req)
1646 struct hci_dev *hdev = req->hdev;
1647 struct hci_cp_write_le_host_supported cp;
1649 /* LE-only devices do not support explicit enablement */
1650 if (!lmp_bredr_capable(hdev))
1651 return;
1653 memset(&cp, 0, sizeof(cp));
1655 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1656 cp.le = 0x01;
1657 cp.simul = lmp_le_br_capable(hdev);
1660 if (cp.le != lmp_host_le_capable(hdev))
1661 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1662 &cp);
1665 static void hci_set_event_mask_page_2(struct hci_request *req)
1667 struct hci_dev *hdev = req->hdev;
1668 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1670 /* If Connectionless Slave Broadcast master role is supported
1671 * enable all necessary events for it.
1673 if (lmp_csb_master_capable(hdev)) {
1674 events[1] |= 0x40; /* Triggered Clock Capture */
1675 events[1] |= 0x80; /* Synchronization Train Complete */
1676 events[2] |= 0x10; /* Slave Page Response Timeout */
1677 events[2] |= 0x20; /* CSB Channel Map Change */
1680 /* If Connectionless Slave Broadcast slave role is supported
1681 * enable all necessary events for it.
1683 if (lmp_csb_slave_capable(hdev)) {
1684 events[2] |= 0x01; /* Synchronization Train Received */
1685 events[2] |= 0x02; /* CSB Receive */
1686 events[2] |= 0x04; /* CSB Timeout */
1687 events[2] |= 0x08; /* Truncated Page Complete */
1690 /* Enable Authenticated Payload Timeout Expired event if supported */
1691 if (lmp_ping_capable(hdev))
1692 events[2] |= 0x80;
1694 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1697 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1699 struct hci_dev *hdev = req->hdev;
1700 u8 p;
1702 /* Some Broadcom based Bluetooth controllers do not support the
1703 * Delete Stored Link Key command. They are clearly indicating its
1704 * absence in the bit mask of supported commands.
1706 * Check the supported commands and only if the the command is marked
1707 * as supported send it. If not supported assume that the controller
1708 * does not have actual support for stored link keys which makes this
1709 * command redundant anyway.
1711 * Some controllers indicate that they support handling deleting
1712 * stored link keys, but they don't. The quirk lets a driver
1713 * just disable this command.
1715 if (hdev->commands[6] & 0x80 &&
1716 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1717 struct hci_cp_delete_stored_link_key cp;
1719 bacpy(&cp.bdaddr, BDADDR_ANY);
1720 cp.delete_all = 0x01;
1721 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1722 sizeof(cp), &cp);
1725 if (hdev->commands[5] & 0x10)
1726 hci_setup_link_policy(req);
1728 if (lmp_le_capable(hdev))
1729 hci_set_le_support(req);
1731 /* Read features beyond page 1 if available */
1732 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1733 struct hci_cp_read_local_ext_features cp;
1735 cp.page = p;
1736 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1737 sizeof(cp), &cp);
1741 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1743 struct hci_dev *hdev = req->hdev;
1745 /* Set event mask page 2 if the HCI command for it is supported */
1746 if (hdev->commands[22] & 0x04)
1747 hci_set_event_mask_page_2(req);
1749 /* Check for Synchronization Train support */
1750 if (lmp_sync_train_capable(hdev))
1751 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1753 /* Enable Secure Connections if supported and configured */
1754 if ((lmp_sc_capable(hdev) ||
1755 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
1756 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1757 u8 support = 0x01;
1758 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1759 sizeof(support), &support);
1763 static int __hci_init(struct hci_dev *hdev)
1765 int err;
1767 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1768 if (err < 0)
1769 return err;
1771 /* The Device Under Test (DUT) mode is special and available for
1772 * all controller types. So just create it early on.
1774 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1775 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1776 &dut_mode_fops);
1779 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1780 * BR/EDR/LE type controllers. AMP controllers only need the
1781 * first stage init.
1783 if (hdev->dev_type != HCI_BREDR)
1784 return 0;
1786 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1787 if (err < 0)
1788 return err;
1790 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1791 if (err < 0)
1792 return err;
1794 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1795 if (err < 0)
1796 return err;
1798 /* Only create debugfs entries during the initial setup
1799 * phase and not every time the controller gets powered on.
1801 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1802 return 0;
1804 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1805 &features_fops);
1806 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1807 &hdev->manufacturer);
1808 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1809 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1810 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1811 &blacklist_fops);
1812 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1814 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1815 &conn_info_min_age_fops);
1816 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1817 &conn_info_max_age_fops);
1819 if (lmp_bredr_capable(hdev)) {
1820 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1821 hdev, &inquiry_cache_fops);
1822 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1823 hdev, &link_keys_fops);
1824 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1825 hdev, &dev_class_fops);
1826 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1827 hdev, &voice_setting_fops);
1830 if (lmp_ssp_capable(hdev)) {
1831 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1832 hdev, &auto_accept_delay_fops);
1833 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1834 hdev, &ssp_debug_mode_fops);
1835 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1836 hdev, &force_sc_support_fops);
1837 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1838 hdev, &sc_only_mode_fops);
1841 if (lmp_sniff_capable(hdev)) {
1842 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1843 hdev, &idle_timeout_fops);
1844 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1845 hdev, &sniff_min_interval_fops);
1846 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1847 hdev, &sniff_max_interval_fops);
1850 if (lmp_le_capable(hdev)) {
1851 debugfs_create_file("identity", 0400, hdev->debugfs,
1852 hdev, &identity_fops);
1853 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1854 hdev, &rpa_timeout_fops);
1855 debugfs_create_file("random_address", 0444, hdev->debugfs,
1856 hdev, &random_address_fops);
1857 debugfs_create_file("static_address", 0444, hdev->debugfs,
1858 hdev, &static_address_fops);
1860 /* For controllers with a public address, provide a debug
1861 * option to force the usage of the configured static
1862 * address. By default the public address is used.
1864 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1865 debugfs_create_file("force_static_address", 0644,
1866 hdev->debugfs, hdev,
1867 &force_static_address_fops);
1869 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1870 &hdev->le_white_list_size);
1871 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1872 &white_list_fops);
1873 debugfs_create_file("identity_resolving_keys", 0400,
1874 hdev->debugfs, hdev,
1875 &identity_resolving_keys_fops);
1876 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1877 hdev, &long_term_keys_fops);
1878 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1879 hdev, &conn_min_interval_fops);
1880 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1881 hdev, &conn_max_interval_fops);
1882 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1883 hdev, &adv_channel_map_fops);
1884 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1885 &lowpan_debugfs_fops);
1886 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1887 &le_auto_conn_fops);
1888 debugfs_create_u16("discov_interleaved_timeout", 0644,
1889 hdev->debugfs,
1890 &hdev->discov_interleaved_timeout);
1893 return 0;
1896 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1898 __u8 scan = opt;
1900 BT_DBG("%s %x", req->hdev->name, scan);
1902 /* Inquiry and Page scans */
1903 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1906 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1908 __u8 auth = opt;
1910 BT_DBG("%s %x", req->hdev->name, auth);
1912 /* Authentication */
1913 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1916 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1918 __u8 encrypt = opt;
1920 BT_DBG("%s %x", req->hdev->name, encrypt);
1922 /* Encryption */
1923 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1926 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1928 __le16 policy = cpu_to_le16(opt);
1930 BT_DBG("%s %x", req->hdev->name, policy);
1932 /* Default link policy */
1933 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1936 /* Get HCI device by index.
1937 * Device is held on return. */
1938 struct hci_dev *hci_dev_get(int index)
1940 struct hci_dev *hdev = NULL, *d;
1942 BT_DBG("%d", index);
1944 if (index < 0)
1945 return NULL;
1947 read_lock(&hci_dev_list_lock);
1948 list_for_each_entry(d, &hci_dev_list, list) {
1949 if (d->id == index) {
1950 hdev = hci_dev_hold(d);
1951 break;
1954 read_unlock(&hci_dev_list_lock);
1955 return hdev;
1958 /* ---- Inquiry support ---- */
1960 bool hci_discovery_active(struct hci_dev *hdev)
1962 struct discovery_state *discov = &hdev->discovery;
1964 switch (discov->state) {
1965 case DISCOVERY_FINDING:
1966 case DISCOVERY_RESOLVING:
1967 return true;
1969 default:
1970 return false;
1974 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1976 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1978 if (hdev->discovery.state == state)
1979 return;
1981 switch (state) {
1982 case DISCOVERY_STOPPED:
1983 hci_update_background_scan(hdev);
1985 if (hdev->discovery.state != DISCOVERY_STARTING)
1986 mgmt_discovering(hdev, 0);
1987 break;
1988 case DISCOVERY_STARTING:
1989 break;
1990 case DISCOVERY_FINDING:
1991 mgmt_discovering(hdev, 1);
1992 break;
1993 case DISCOVERY_RESOLVING:
1994 break;
1995 case DISCOVERY_STOPPING:
1996 break;
1999 hdev->discovery.state = state;
2002 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2004 struct discovery_state *cache = &hdev->discovery;
2005 struct inquiry_entry *p, *n;
2007 list_for_each_entry_safe(p, n, &cache->all, all) {
2008 list_del(&p->all);
2009 kfree(p);
2012 INIT_LIST_HEAD(&cache->unknown);
2013 INIT_LIST_HEAD(&cache->resolve);
2016 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2017 bdaddr_t *bdaddr)
2019 struct discovery_state *cache = &hdev->discovery;
2020 struct inquiry_entry *e;
2022 BT_DBG("cache %p, %pMR", cache, bdaddr);
2024 list_for_each_entry(e, &cache->all, all) {
2025 if (!bacmp(&e->data.bdaddr, bdaddr))
2026 return e;
2029 return NULL;
2032 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2033 bdaddr_t *bdaddr)
2035 struct discovery_state *cache = &hdev->discovery;
2036 struct inquiry_entry *e;
2038 BT_DBG("cache %p, %pMR", cache, bdaddr);
2040 list_for_each_entry(e, &cache->unknown, list) {
2041 if (!bacmp(&e->data.bdaddr, bdaddr))
2042 return e;
2045 return NULL;
2048 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2049 bdaddr_t *bdaddr,
2050 int state)
2052 struct discovery_state *cache = &hdev->discovery;
2053 struct inquiry_entry *e;
2055 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2057 list_for_each_entry(e, &cache->resolve, list) {
2058 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2059 return e;
2060 if (!bacmp(&e->data.bdaddr, bdaddr))
2061 return e;
2064 return NULL;
2067 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2068 struct inquiry_entry *ie)
2070 struct discovery_state *cache = &hdev->discovery;
2071 struct list_head *pos = &cache->resolve;
2072 struct inquiry_entry *p;
2074 list_del(&ie->list);
2076 list_for_each_entry(p, &cache->resolve, list) {
2077 if (p->name_state != NAME_PENDING &&
2078 abs(p->data.rssi) >= abs(ie->data.rssi))
2079 break;
2080 pos = &p->list;
2083 list_add(&ie->list, pos);
2086 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2087 bool name_known, bool *ssp)
2089 struct discovery_state *cache = &hdev->discovery;
2090 struct inquiry_entry *ie;
2092 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2094 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2096 *ssp = data->ssp_mode;
2098 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2099 if (ie) {
2100 if (ie->data.ssp_mode)
2101 *ssp = true;
2103 if (ie->name_state == NAME_NEEDED &&
2104 data->rssi != ie->data.rssi) {
2105 ie->data.rssi = data->rssi;
2106 hci_inquiry_cache_update_resolve(hdev, ie);
2109 goto update;
2112 /* Entry not in the cache. Add new one. */
2113 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2114 if (!ie)
2115 return false;
2117 list_add(&ie->all, &cache->all);
2119 if (name_known) {
2120 ie->name_state = NAME_KNOWN;
2121 } else {
2122 ie->name_state = NAME_NOT_KNOWN;
2123 list_add(&ie->list, &cache->unknown);
2126 update:
2127 if (name_known && ie->name_state != NAME_KNOWN &&
2128 ie->name_state != NAME_PENDING) {
2129 ie->name_state = NAME_KNOWN;
2130 list_del(&ie->list);
2133 memcpy(&ie->data, data, sizeof(*data));
2134 ie->timestamp = jiffies;
2135 cache->timestamp = jiffies;
2137 if (ie->name_state == NAME_NOT_KNOWN)
2138 return false;
2140 return true;
2143 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2145 struct discovery_state *cache = &hdev->discovery;
2146 struct inquiry_info *info = (struct inquiry_info *) buf;
2147 struct inquiry_entry *e;
2148 int copied = 0;
2150 list_for_each_entry(e, &cache->all, all) {
2151 struct inquiry_data *data = &e->data;
2153 if (copied >= num)
2154 break;
2156 bacpy(&info->bdaddr, &data->bdaddr);
2157 info->pscan_rep_mode = data->pscan_rep_mode;
2158 info->pscan_period_mode = data->pscan_period_mode;
2159 info->pscan_mode = data->pscan_mode;
2160 memcpy(info->dev_class, data->dev_class, 3);
2161 info->clock_offset = data->clock_offset;
2163 info++;
2164 copied++;
2167 BT_DBG("cache %p, copied %d", cache, copied);
2168 return copied;
2171 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2173 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2174 struct hci_dev *hdev = req->hdev;
2175 struct hci_cp_inquiry cp;
2177 BT_DBG("%s", hdev->name);
2179 if (test_bit(HCI_INQUIRY, &hdev->flags))
2180 return;
2182 /* Start Inquiry */
2183 memcpy(&cp.lap, &ir->lap, 3);
2184 cp.length = ir->length;
2185 cp.num_rsp = ir->num_rsp;
2186 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2189 static int wait_inquiry(void *word)
2191 schedule();
2192 return signal_pending(current);
2195 int hci_inquiry(void __user *arg)
2197 __u8 __user *ptr = arg;
2198 struct hci_inquiry_req ir;
2199 struct hci_dev *hdev;
2200 int err = 0, do_inquiry = 0, max_rsp;
2201 long timeo;
2202 __u8 *buf;
2204 if (copy_from_user(&ir, ptr, sizeof(ir)))
2205 return -EFAULT;
2207 hdev = hci_dev_get(ir.dev_id);
2208 if (!hdev)
2209 return -ENODEV;
2211 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2212 err = -EBUSY;
2213 goto done;
2216 if (hdev->dev_type != HCI_BREDR) {
2217 err = -EOPNOTSUPP;
2218 goto done;
2221 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2222 err = -EOPNOTSUPP;
2223 goto done;
2226 hci_dev_lock(hdev);
2227 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2228 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2229 hci_inquiry_cache_flush(hdev);
2230 do_inquiry = 1;
2232 hci_dev_unlock(hdev);
2234 timeo = ir.length * msecs_to_jiffies(2000);
2236 if (do_inquiry) {
2237 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2238 timeo);
2239 if (err < 0)
2240 goto done;
2242 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2243 * cleared). If it is interrupted by a signal, return -EINTR.
2245 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2246 TASK_INTERRUPTIBLE))
2247 return -EINTR;
2250 /* for unlimited number of responses we will use buffer with
2251 * 255 entries
2253 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2255 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2256 * copy it to the user space.
2258 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2259 if (!buf) {
2260 err = -ENOMEM;
2261 goto done;
2264 hci_dev_lock(hdev);
2265 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2266 hci_dev_unlock(hdev);
2268 BT_DBG("num_rsp %d", ir.num_rsp);
2270 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2271 ptr += sizeof(ir);
2272 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2273 ir.num_rsp))
2274 err = -EFAULT;
2275 } else
2276 err = -EFAULT;
2278 kfree(buf);
2280 done:
2281 hci_dev_put(hdev);
2282 return err;
2285 static int hci_dev_do_open(struct hci_dev *hdev)
2287 int ret = 0;
2289 BT_DBG("%s %p", hdev->name, hdev);
2291 hci_req_lock(hdev);
2293 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2294 ret = -ENODEV;
2295 goto done;
2298 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2299 /* Check for rfkill but allow the HCI setup stage to
2300 * proceed (which in itself doesn't cause any RF activity).
2302 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2303 ret = -ERFKILL;
2304 goto done;
2307 /* Check for valid public address or a configured static
2308 * random adddress, but let the HCI setup proceed to
2309 * be able to determine if there is a public address
2310 * or not.
2312 * In case of user channel usage, it is not important
2313 * if a public address or static random address is
2314 * available.
2316 * This check is only valid for BR/EDR controllers
2317 * since AMP controllers do not have an address.
2319 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2320 hdev->dev_type == HCI_BREDR &&
2321 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2322 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2323 ret = -EADDRNOTAVAIL;
2324 goto done;
2328 if (test_bit(HCI_UP, &hdev->flags)) {
2329 ret = -EALREADY;
2330 goto done;
2333 if (hdev->open(hdev)) {
2334 ret = -EIO;
2335 goto done;
2338 atomic_set(&hdev->cmd_cnt, 1);
2339 set_bit(HCI_INIT, &hdev->flags);
2341 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2342 ret = hdev->setup(hdev);
2344 if (!ret) {
2345 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2346 set_bit(HCI_RAW, &hdev->flags);
2348 if (!test_bit(HCI_RAW, &hdev->flags) &&
2349 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2350 ret = __hci_init(hdev);
2353 clear_bit(HCI_INIT, &hdev->flags);
2355 if (!ret) {
2356 hci_dev_hold(hdev);
2357 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2358 set_bit(HCI_UP, &hdev->flags);
2359 hci_notify(hdev, HCI_DEV_UP);
2360 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2361 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2362 hdev->dev_type == HCI_BREDR) {
2363 hci_dev_lock(hdev);
2364 mgmt_powered(hdev, 1);
2365 hci_dev_unlock(hdev);
2367 } else {
2368 /* Init failed, cleanup */
2369 flush_work(&hdev->tx_work);
2370 flush_work(&hdev->cmd_work);
2371 flush_work(&hdev->rx_work);
2373 skb_queue_purge(&hdev->cmd_q);
2374 skb_queue_purge(&hdev->rx_q);
2376 if (hdev->flush)
2377 hdev->flush(hdev);
2379 if (hdev->sent_cmd) {
2380 kfree_skb(hdev->sent_cmd);
2381 hdev->sent_cmd = NULL;
2384 hdev->close(hdev);
2385 hdev->flags = 0;
2388 done:
2389 hci_req_unlock(hdev);
2390 return ret;
2393 /* ---- HCI ioctl helpers ---- */
2395 int hci_dev_open(__u16 dev)
2397 struct hci_dev *hdev;
2398 int err;
2400 hdev = hci_dev_get(dev);
2401 if (!hdev)
2402 return -ENODEV;
2404 /* We need to ensure that no other power on/off work is pending
2405 * before proceeding to call hci_dev_do_open. This is
2406 * particularly important if the setup procedure has not yet
2407 * completed.
2409 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2410 cancel_delayed_work(&hdev->power_off);
2412 /* After this call it is guaranteed that the setup procedure
2413 * has finished. This means that error conditions like RFKILL
2414 * or no valid public or static random address apply.
2416 flush_workqueue(hdev->req_workqueue);
2418 err = hci_dev_do_open(hdev);
2420 hci_dev_put(hdev);
2422 return err;
2425 static int hci_dev_do_close(struct hci_dev *hdev)
2427 BT_DBG("%s %p", hdev->name, hdev);
2429 cancel_delayed_work(&hdev->power_off);
2431 hci_req_cancel(hdev, ENODEV);
2432 hci_req_lock(hdev);
2434 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2435 del_timer_sync(&hdev->cmd_timer);
2436 hci_req_unlock(hdev);
2437 return 0;
2440 /* Flush RX and TX works */
2441 flush_work(&hdev->tx_work);
2442 flush_work(&hdev->rx_work);
2444 if (hdev->discov_timeout > 0) {
2445 cancel_delayed_work(&hdev->discov_off);
2446 hdev->discov_timeout = 0;
2447 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2448 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2451 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2452 cancel_delayed_work(&hdev->service_cache);
2454 cancel_delayed_work_sync(&hdev->le_scan_disable);
2456 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2457 cancel_delayed_work_sync(&hdev->rpa_expired);
2459 hci_dev_lock(hdev);
2460 hci_inquiry_cache_flush(hdev);
2461 hci_conn_hash_flush(hdev);
2462 hci_pend_le_conns_clear(hdev);
2463 hci_dev_unlock(hdev);
2465 hci_notify(hdev, HCI_DEV_DOWN);
2467 if (hdev->flush)
2468 hdev->flush(hdev);
2470 /* Reset device */
2471 skb_queue_purge(&hdev->cmd_q);
2472 atomic_set(&hdev->cmd_cnt, 1);
2473 if (!test_bit(HCI_RAW, &hdev->flags) &&
2474 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2475 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2476 set_bit(HCI_INIT, &hdev->flags);
2477 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2478 clear_bit(HCI_INIT, &hdev->flags);
2481 /* flush cmd work */
2482 flush_work(&hdev->cmd_work);
2484 /* Drop queues */
2485 skb_queue_purge(&hdev->rx_q);
2486 skb_queue_purge(&hdev->cmd_q);
2487 skb_queue_purge(&hdev->raw_q);
2489 /* Drop last sent command */
2490 if (hdev->sent_cmd) {
2491 del_timer_sync(&hdev->cmd_timer);
2492 kfree_skb(hdev->sent_cmd);
2493 hdev->sent_cmd = NULL;
2496 kfree_skb(hdev->recv_evt);
2497 hdev->recv_evt = NULL;
2499 /* After this point our queues are empty
2500 * and no tasks are scheduled. */
2501 hdev->close(hdev);
2503 /* Clear flags */
2504 hdev->flags = 0;
2505 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2507 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2508 if (hdev->dev_type == HCI_BREDR) {
2509 hci_dev_lock(hdev);
2510 mgmt_powered(hdev, 0);
2511 hci_dev_unlock(hdev);
2515 /* Controller radio is available but is currently powered down */
2516 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2518 memset(hdev->eir, 0, sizeof(hdev->eir));
2519 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2520 bacpy(&hdev->random_addr, BDADDR_ANY);
2522 hci_req_unlock(hdev);
2524 hci_dev_put(hdev);
2525 return 0;
2528 int hci_dev_close(__u16 dev)
2530 struct hci_dev *hdev;
2531 int err;
2533 hdev = hci_dev_get(dev);
2534 if (!hdev)
2535 return -ENODEV;
2537 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2538 err = -EBUSY;
2539 goto done;
2542 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2543 cancel_delayed_work(&hdev->power_off);
2545 err = hci_dev_do_close(hdev);
2547 done:
2548 hci_dev_put(hdev);
2549 return err;
2552 int hci_dev_reset(__u16 dev)
2554 struct hci_dev *hdev;
2555 int ret = 0;
2557 hdev = hci_dev_get(dev);
2558 if (!hdev)
2559 return -ENODEV;
2561 hci_req_lock(hdev);
2563 if (!test_bit(HCI_UP, &hdev->flags)) {
2564 ret = -ENETDOWN;
2565 goto done;
2568 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2569 ret = -EBUSY;
2570 goto done;
2573 /* Drop queues */
2574 skb_queue_purge(&hdev->rx_q);
2575 skb_queue_purge(&hdev->cmd_q);
2577 hci_dev_lock(hdev);
2578 hci_inquiry_cache_flush(hdev);
2579 hci_conn_hash_flush(hdev);
2580 hci_dev_unlock(hdev);
2582 if (hdev->flush)
2583 hdev->flush(hdev);
2585 atomic_set(&hdev->cmd_cnt, 1);
2586 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2588 if (!test_bit(HCI_RAW, &hdev->flags))
2589 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2591 done:
2592 hci_req_unlock(hdev);
2593 hci_dev_put(hdev);
2594 return ret;
2597 int hci_dev_reset_stat(__u16 dev)
2599 struct hci_dev *hdev;
2600 int ret = 0;
2602 hdev = hci_dev_get(dev);
2603 if (!hdev)
2604 return -ENODEV;
2606 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2607 ret = -EBUSY;
2608 goto done;
2611 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2613 done:
2614 hci_dev_put(hdev);
2615 return ret;
2618 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2620 struct hci_dev *hdev;
2621 struct hci_dev_req dr;
2622 int err = 0;
2624 if (copy_from_user(&dr, arg, sizeof(dr)))
2625 return -EFAULT;
2627 hdev = hci_dev_get(dr.dev_id);
2628 if (!hdev)
2629 return -ENODEV;
2631 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2632 err = -EBUSY;
2633 goto done;
2636 if (hdev->dev_type != HCI_BREDR) {
2637 err = -EOPNOTSUPP;
2638 goto done;
2641 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2642 err = -EOPNOTSUPP;
2643 goto done;
2646 switch (cmd) {
2647 case HCISETAUTH:
2648 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2649 HCI_INIT_TIMEOUT);
2650 break;
2652 case HCISETENCRYPT:
2653 if (!lmp_encrypt_capable(hdev)) {
2654 err = -EOPNOTSUPP;
2655 break;
2658 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2659 /* Auth must be enabled first */
2660 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2661 HCI_INIT_TIMEOUT);
2662 if (err)
2663 break;
2666 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2667 HCI_INIT_TIMEOUT);
2668 break;
2670 case HCISETSCAN:
2671 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2672 HCI_INIT_TIMEOUT);
2673 break;
2675 case HCISETLINKPOL:
2676 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2677 HCI_INIT_TIMEOUT);
2678 break;
2680 case HCISETLINKMODE:
2681 hdev->link_mode = ((__u16) dr.dev_opt) &
2682 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2683 break;
2685 case HCISETPTYPE:
2686 hdev->pkt_type = (__u16) dr.dev_opt;
2687 break;
2689 case HCISETACLMTU:
2690 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2691 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2692 break;
2694 case HCISETSCOMTU:
2695 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2696 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2697 break;
2699 default:
2700 err = -EINVAL;
2701 break;
2704 done:
2705 hci_dev_put(hdev);
2706 return err;
2709 int hci_get_dev_list(void __user *arg)
2711 struct hci_dev *hdev;
2712 struct hci_dev_list_req *dl;
2713 struct hci_dev_req *dr;
2714 int n = 0, size, err;
2715 __u16 dev_num;
2717 if (get_user(dev_num, (__u16 __user *) arg))
2718 return -EFAULT;
2720 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2721 return -EINVAL;
2723 size = sizeof(*dl) + dev_num * sizeof(*dr);
2725 dl = kzalloc(size, GFP_KERNEL);
2726 if (!dl)
2727 return -ENOMEM;
2729 dr = dl->dev_req;
2731 read_lock(&hci_dev_list_lock);
2732 list_for_each_entry(hdev, &hci_dev_list, list) {
2733 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2734 cancel_delayed_work(&hdev->power_off);
2736 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2737 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2739 (dr + n)->dev_id = hdev->id;
2740 (dr + n)->dev_opt = hdev->flags;
2742 if (++n >= dev_num)
2743 break;
2745 read_unlock(&hci_dev_list_lock);
2747 dl->dev_num = n;
2748 size = sizeof(*dl) + n * sizeof(*dr);
2750 err = copy_to_user(arg, dl, size);
2751 kfree(dl);
2753 return err ? -EFAULT : 0;
2756 int hci_get_dev_info(void __user *arg)
2758 struct hci_dev *hdev;
2759 struct hci_dev_info di;
2760 int err = 0;
2762 if (copy_from_user(&di, arg, sizeof(di)))
2763 return -EFAULT;
2765 hdev = hci_dev_get(di.dev_id);
2766 if (!hdev)
2767 return -ENODEV;
2769 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2770 cancel_delayed_work_sync(&hdev->power_off);
2772 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2773 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2775 strcpy(di.name, hdev->name);
2776 di.bdaddr = hdev->bdaddr;
2777 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2778 di.flags = hdev->flags;
2779 di.pkt_type = hdev->pkt_type;
2780 if (lmp_bredr_capable(hdev)) {
2781 di.acl_mtu = hdev->acl_mtu;
2782 di.acl_pkts = hdev->acl_pkts;
2783 di.sco_mtu = hdev->sco_mtu;
2784 di.sco_pkts = hdev->sco_pkts;
2785 } else {
2786 di.acl_mtu = hdev->le_mtu;
2787 di.acl_pkts = hdev->le_pkts;
2788 di.sco_mtu = 0;
2789 di.sco_pkts = 0;
2791 di.link_policy = hdev->link_policy;
2792 di.link_mode = hdev->link_mode;
2794 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2795 memcpy(&di.features, &hdev->features, sizeof(di.features));
2797 if (copy_to_user(arg, &di, sizeof(di)))
2798 err = -EFAULT;
2800 hci_dev_put(hdev);
2802 return err;
2805 /* ---- Interface to HCI drivers ---- */
2807 static int hci_rfkill_set_block(void *data, bool blocked)
2809 struct hci_dev *hdev = data;
2811 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2813 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2814 return -EBUSY;
2816 if (blocked) {
2817 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2818 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2819 hci_dev_do_close(hdev);
2820 } else {
2821 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2824 return 0;
2827 static const struct rfkill_ops hci_rfkill_ops = {
2828 .set_block = hci_rfkill_set_block,
2831 static void hci_power_on(struct work_struct *work)
2833 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2834 int err;
2836 BT_DBG("%s", hdev->name);
2838 err = hci_dev_do_open(hdev);
2839 if (err < 0) {
2840 mgmt_set_powered_failed(hdev, err);
2841 return;
2844 /* During the HCI setup phase, a few error conditions are
2845 * ignored and they need to be checked now. If they are still
2846 * valid, it is important to turn the device back off.
2848 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2849 (hdev->dev_type == HCI_BREDR &&
2850 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2851 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2852 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2853 hci_dev_do_close(hdev);
2854 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2855 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2856 HCI_AUTO_OFF_TIMEOUT);
2859 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2860 mgmt_index_added(hdev);
2863 static void hci_power_off(struct work_struct *work)
2865 struct hci_dev *hdev = container_of(work, struct hci_dev,
2866 power_off.work);
2868 BT_DBG("%s", hdev->name);
2870 hci_dev_do_close(hdev);
2873 static void hci_discov_off(struct work_struct *work)
2875 struct hci_dev *hdev;
2877 hdev = container_of(work, struct hci_dev, discov_off.work);
2879 BT_DBG("%s", hdev->name);
2881 mgmt_discoverable_timeout(hdev);
2884 void hci_uuids_clear(struct hci_dev *hdev)
2886 struct bt_uuid *uuid, *tmp;
2888 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2889 list_del(&uuid->list);
2890 kfree(uuid);
2894 void hci_link_keys_clear(struct hci_dev *hdev)
2896 struct list_head *p, *n;
2898 list_for_each_safe(p, n, &hdev->link_keys) {
2899 struct link_key *key;
2901 key = list_entry(p, struct link_key, list);
2903 list_del(p);
2904 kfree(key);
2908 void hci_smp_ltks_clear(struct hci_dev *hdev)
2910 struct smp_ltk *k, *tmp;
2912 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2913 list_del(&k->list);
2914 kfree(k);
2918 void hci_smp_irks_clear(struct hci_dev *hdev)
2920 struct smp_irk *k, *tmp;
2922 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2923 list_del(&k->list);
2924 kfree(k);
2928 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2930 struct link_key *k;
2932 list_for_each_entry(k, &hdev->link_keys, list)
2933 if (bacmp(bdaddr, &k->bdaddr) == 0)
2934 return k;
2936 return NULL;
2939 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2940 u8 key_type, u8 old_key_type)
2942 /* Legacy key */
2943 if (key_type < 0x03)
2944 return true;
2946 /* Debug keys are insecure so don't store them persistently */
2947 if (key_type == HCI_LK_DEBUG_COMBINATION)
2948 return false;
2950 /* Changed combination key and there's no previous one */
2951 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2952 return false;
2954 /* Security mode 3 case */
2955 if (!conn)
2956 return true;
2958 /* Neither local nor remote side had no-bonding as requirement */
2959 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2960 return true;
2962 /* Local side had dedicated bonding as requirement */
2963 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2964 return true;
2966 /* Remote side had dedicated bonding as requirement */
2967 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2968 return true;
2970 /* If none of the above criteria match, then don't store the key
2971 * persistently */
2972 return false;
2975 static bool ltk_type_master(u8 type)
2977 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2978 return true;
2980 return false;
2983 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2984 bool master)
2986 struct smp_ltk *k;
2988 list_for_each_entry(k, &hdev->long_term_keys, list) {
2989 if (k->ediv != ediv || k->rand != rand)
2990 continue;
2992 if (ltk_type_master(k->type) != master)
2993 continue;
2995 return k;
2998 return NULL;
3001 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3002 u8 addr_type, bool master)
3004 struct smp_ltk *k;
3006 list_for_each_entry(k, &hdev->long_term_keys, list)
3007 if (addr_type == k->bdaddr_type &&
3008 bacmp(bdaddr, &k->bdaddr) == 0 &&
3009 ltk_type_master(k->type) == master)
3010 return k;
3012 return NULL;
3015 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3017 struct smp_irk *irk;
3019 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3020 if (!bacmp(&irk->rpa, rpa))
3021 return irk;
3024 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3025 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3026 bacpy(&irk->rpa, rpa);
3027 return irk;
3031 return NULL;
3034 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3035 u8 addr_type)
3037 struct smp_irk *irk;
3039 /* Identity Address must be public or static random */
3040 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3041 return NULL;
3043 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3044 if (addr_type == irk->addr_type &&
3045 bacmp(bdaddr, &irk->bdaddr) == 0)
3046 return irk;
3049 return NULL;
3052 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
3053 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
3055 struct link_key *key, *old_key;
3056 u8 old_key_type;
3057 bool persistent;
3059 old_key = hci_find_link_key(hdev, bdaddr);
3060 if (old_key) {
3061 old_key_type = old_key->type;
3062 key = old_key;
3063 } else {
3064 old_key_type = conn ? conn->key_type : 0xff;
3065 key = kzalloc(sizeof(*key), GFP_KERNEL);
3066 if (!key)
3067 return -ENOMEM;
3068 list_add(&key->list, &hdev->link_keys);
3071 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3073 /* Some buggy controller combinations generate a changed
3074 * combination key for legacy pairing even when there's no
3075 * previous key */
3076 if (type == HCI_LK_CHANGED_COMBINATION &&
3077 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3078 type = HCI_LK_COMBINATION;
3079 if (conn)
3080 conn->key_type = type;
3083 bacpy(&key->bdaddr, bdaddr);
3084 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3085 key->pin_len = pin_len;
3087 if (type == HCI_LK_CHANGED_COMBINATION)
3088 key->type = old_key_type;
3089 else
3090 key->type = type;
3092 if (!new_key)
3093 return 0;
3095 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3097 mgmt_new_link_key(hdev, key, persistent);
3099 if (conn)
3100 conn->flush_key = !persistent;
3102 return 0;
3105 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3106 u8 addr_type, u8 type, u8 authenticated,
3107 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3109 struct smp_ltk *key, *old_key;
3110 bool master = ltk_type_master(type);
3112 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3113 if (old_key)
3114 key = old_key;
3115 else {
3116 key = kzalloc(sizeof(*key), GFP_KERNEL);
3117 if (!key)
3118 return NULL;
3119 list_add(&key->list, &hdev->long_term_keys);
3122 bacpy(&key->bdaddr, bdaddr);
3123 key->bdaddr_type = addr_type;
3124 memcpy(key->val, tk, sizeof(key->val));
3125 key->authenticated = authenticated;
3126 key->ediv = ediv;
3127 key->rand = rand;
3128 key->enc_size = enc_size;
3129 key->type = type;
3131 return key;
3134 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3135 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3137 struct smp_irk *irk;
3139 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3140 if (!irk) {
3141 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3142 if (!irk)
3143 return NULL;
3145 bacpy(&irk->bdaddr, bdaddr);
3146 irk->addr_type = addr_type;
3148 list_add(&irk->list, &hdev->identity_resolving_keys);
3151 memcpy(irk->val, val, 16);
3152 bacpy(&irk->rpa, rpa);
3154 return irk;
3157 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3159 struct link_key *key;
3161 key = hci_find_link_key(hdev, bdaddr);
3162 if (!key)
3163 return -ENOENT;
3165 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3167 list_del(&key->list);
3168 kfree(key);
3170 return 0;
3173 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3175 struct smp_ltk *k, *tmp;
3176 int removed = 0;
3178 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3179 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3180 continue;
3182 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3184 list_del(&k->list);
3185 kfree(k);
3186 removed++;
3189 return removed ? 0 : -ENOENT;
3192 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3194 struct smp_irk *k, *tmp;
3196 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3197 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3198 continue;
3200 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3202 list_del(&k->list);
3203 kfree(k);
3207 /* HCI command timer function */
3208 static void hci_cmd_timeout(unsigned long arg)
3210 struct hci_dev *hdev = (void *) arg;
3212 if (hdev->sent_cmd) {
3213 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3214 u16 opcode = __le16_to_cpu(sent->opcode);
3216 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3217 } else {
3218 BT_ERR("%s command tx timeout", hdev->name);
3221 atomic_set(&hdev->cmd_cnt, 1);
3222 queue_work(hdev->workqueue, &hdev->cmd_work);
3225 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3226 bdaddr_t *bdaddr)
3228 struct oob_data *data;
3230 list_for_each_entry(data, &hdev->remote_oob_data, list)
3231 if (bacmp(bdaddr, &data->bdaddr) == 0)
3232 return data;
3234 return NULL;
3237 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3239 struct oob_data *data;
3241 data = hci_find_remote_oob_data(hdev, bdaddr);
3242 if (!data)
3243 return -ENOENT;
3245 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3247 list_del(&data->list);
3248 kfree(data);
3250 return 0;
3253 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3255 struct oob_data *data, *n;
3257 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3258 list_del(&data->list);
3259 kfree(data);
3263 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3264 u8 *hash, u8 *randomizer)
3266 struct oob_data *data;
3268 data = hci_find_remote_oob_data(hdev, bdaddr);
3269 if (!data) {
3270 data = kmalloc(sizeof(*data), GFP_KERNEL);
3271 if (!data)
3272 return -ENOMEM;
3274 bacpy(&data->bdaddr, bdaddr);
3275 list_add(&data->list, &hdev->remote_oob_data);
3278 memcpy(data->hash192, hash, sizeof(data->hash192));
3279 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3281 memset(data->hash256, 0, sizeof(data->hash256));
3282 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3284 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3286 return 0;
3289 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3290 u8 *hash192, u8 *randomizer192,
3291 u8 *hash256, u8 *randomizer256)
3293 struct oob_data *data;
3295 data = hci_find_remote_oob_data(hdev, bdaddr);
3296 if (!data) {
3297 data = kmalloc(sizeof(*data), GFP_KERNEL);
3298 if (!data)
3299 return -ENOMEM;
3301 bacpy(&data->bdaddr, bdaddr);
3302 list_add(&data->list, &hdev->remote_oob_data);
3305 memcpy(data->hash192, hash192, sizeof(data->hash192));
3306 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3308 memcpy(data->hash256, hash256, sizeof(data->hash256));
3309 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3311 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3313 return 0;
3316 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3317 bdaddr_t *bdaddr, u8 type)
3319 struct bdaddr_list *b;
3321 list_for_each_entry(b, &hdev->blacklist, list) {
3322 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3323 return b;
3326 return NULL;
3329 static void hci_blacklist_clear(struct hci_dev *hdev)
3331 struct list_head *p, *n;
3333 list_for_each_safe(p, n, &hdev->blacklist) {
3334 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3336 list_del(p);
3337 kfree(b);
3341 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3343 struct bdaddr_list *entry;
3345 if (!bacmp(bdaddr, BDADDR_ANY))
3346 return -EBADF;
3348 if (hci_blacklist_lookup(hdev, bdaddr, type))
3349 return -EEXIST;
3351 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3352 if (!entry)
3353 return -ENOMEM;
3355 bacpy(&entry->bdaddr, bdaddr);
3356 entry->bdaddr_type = type;
3358 list_add(&entry->list, &hdev->blacklist);
3360 return mgmt_device_blocked(hdev, bdaddr, type);
3363 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3365 struct bdaddr_list *entry;
3367 if (!bacmp(bdaddr, BDADDR_ANY)) {
3368 hci_blacklist_clear(hdev);
3369 return 0;
3372 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3373 if (!entry)
3374 return -ENOENT;
3376 list_del(&entry->list);
3377 kfree(entry);
3379 return mgmt_device_unblocked(hdev, bdaddr, type);
3382 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3383 bdaddr_t *bdaddr, u8 type)
3385 struct bdaddr_list *b;
3387 list_for_each_entry(b, &hdev->le_white_list, list) {
3388 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3389 return b;
3392 return NULL;
3395 void hci_white_list_clear(struct hci_dev *hdev)
3397 struct list_head *p, *n;
3399 list_for_each_safe(p, n, &hdev->le_white_list) {
3400 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3402 list_del(p);
3403 kfree(b);
3407 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3409 struct bdaddr_list *entry;
3411 if (!bacmp(bdaddr, BDADDR_ANY))
3412 return -EBADF;
3414 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3415 if (!entry)
3416 return -ENOMEM;
3418 bacpy(&entry->bdaddr, bdaddr);
3419 entry->bdaddr_type = type;
3421 list_add(&entry->list, &hdev->le_white_list);
3423 return 0;
3426 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3428 struct bdaddr_list *entry;
3430 if (!bacmp(bdaddr, BDADDR_ANY))
3431 return -EBADF;
3433 entry = hci_white_list_lookup(hdev, bdaddr, type);
3434 if (!entry)
3435 return -ENOENT;
3437 list_del(&entry->list);
3438 kfree(entry);
3440 return 0;
3443 /* This function requires the caller holds hdev->lock */
3444 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3445 bdaddr_t *addr, u8 addr_type)
3447 struct hci_conn_params *params;
3449 list_for_each_entry(params, &hdev->le_conn_params, list) {
3450 if (bacmp(&params->addr, addr) == 0 &&
3451 params->addr_type == addr_type) {
3452 return params;
3456 return NULL;
3459 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3461 struct hci_conn *conn;
3463 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3464 if (!conn)
3465 return false;
3467 if (conn->dst_type != type)
3468 return false;
3470 if (conn->state != BT_CONNECTED)
3471 return false;
3473 return true;
3476 static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3478 if (addr_type == ADDR_LE_DEV_PUBLIC)
3479 return true;
3481 /* Check for Random Static address type */
3482 if ((addr->b[5] & 0xc0) == 0xc0)
3483 return true;
3485 return false;
3488 /* This function requires the caller holds hdev->lock */
3489 int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3490 u8 auto_connect, u16 conn_min_interval,
3491 u16 conn_max_interval)
3493 struct hci_conn_params *params;
3495 if (!is_identity_address(addr, addr_type))
3496 return -EINVAL;
3498 params = hci_conn_params_lookup(hdev, addr, addr_type);
3499 if (params)
3500 goto update;
3502 params = kzalloc(sizeof(*params), GFP_KERNEL);
3503 if (!params) {
3504 BT_ERR("Out of memory");
3505 return -ENOMEM;
3508 bacpy(&params->addr, addr);
3509 params->addr_type = addr_type;
3511 list_add(&params->list, &hdev->le_conn_params);
3513 update:
3514 params->conn_min_interval = conn_min_interval;
3515 params->conn_max_interval = conn_max_interval;
3516 params->auto_connect = auto_connect;
3518 switch (auto_connect) {
3519 case HCI_AUTO_CONN_DISABLED:
3520 case HCI_AUTO_CONN_LINK_LOSS:
3521 hci_pend_le_conn_del(hdev, addr, addr_type);
3522 break;
3523 case HCI_AUTO_CONN_ALWAYS:
3524 if (!is_connected(hdev, addr, addr_type))
3525 hci_pend_le_conn_add(hdev, addr, addr_type);
3526 break;
3529 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3530 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3531 conn_min_interval, conn_max_interval);
3533 return 0;
3536 /* This function requires the caller holds hdev->lock */
3537 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3539 struct hci_conn_params *params;
3541 params = hci_conn_params_lookup(hdev, addr, addr_type);
3542 if (!params)
3543 return;
3545 hci_pend_le_conn_del(hdev, addr, addr_type);
3547 list_del(&params->list);
3548 kfree(params);
3550 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3553 /* This function requires the caller holds hdev->lock */
3554 void hci_conn_params_clear(struct hci_dev *hdev)
3556 struct hci_conn_params *params, *tmp;
3558 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3559 list_del(&params->list);
3560 kfree(params);
3563 BT_DBG("All LE connection parameters were removed");
3566 /* This function requires the caller holds hdev->lock */
3567 struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3568 bdaddr_t *addr, u8 addr_type)
3570 struct bdaddr_list *entry;
3572 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3573 if (bacmp(&entry->bdaddr, addr) == 0 &&
3574 entry->bdaddr_type == addr_type)
3575 return entry;
3578 return NULL;
3581 /* This function requires the caller holds hdev->lock */
3582 void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3584 struct bdaddr_list *entry;
3586 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3587 if (entry)
3588 goto done;
3590 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3591 if (!entry) {
3592 BT_ERR("Out of memory");
3593 return;
3596 bacpy(&entry->bdaddr, addr);
3597 entry->bdaddr_type = addr_type;
3599 list_add(&entry->list, &hdev->pend_le_conns);
3601 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3603 done:
3604 hci_update_background_scan(hdev);
3607 /* This function requires the caller holds hdev->lock */
3608 void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3610 struct bdaddr_list *entry;
3612 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3613 if (!entry)
3614 goto done;
3616 list_del(&entry->list);
3617 kfree(entry);
3619 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3621 done:
3622 hci_update_background_scan(hdev);
3625 /* This function requires the caller holds hdev->lock */
3626 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3628 struct bdaddr_list *entry, *tmp;
3630 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3631 list_del(&entry->list);
3632 kfree(entry);
3635 BT_DBG("All LE pending connections cleared");
3638 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3640 if (status) {
3641 BT_ERR("Failed to start inquiry: status %d", status);
3643 hci_dev_lock(hdev);
3644 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3645 hci_dev_unlock(hdev);
3646 return;
3650 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3652 /* General inquiry access code (GIAC) */
3653 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3654 struct hci_request req;
3655 struct hci_cp_inquiry cp;
3656 int err;
3658 if (status) {
3659 BT_ERR("Failed to disable LE scanning: status %d", status);
3660 return;
3663 switch (hdev->discovery.type) {
3664 case DISCOV_TYPE_LE:
3665 hci_dev_lock(hdev);
3666 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3667 hci_dev_unlock(hdev);
3668 break;
3670 case DISCOV_TYPE_INTERLEAVED:
3671 hci_req_init(&req, hdev);
3673 memset(&cp, 0, sizeof(cp));
3674 memcpy(&cp.lap, lap, sizeof(cp.lap));
3675 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3676 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3678 hci_dev_lock(hdev);
3680 hci_inquiry_cache_flush(hdev);
3682 err = hci_req_run(&req, inquiry_complete);
3683 if (err) {
3684 BT_ERR("Inquiry request failed: err %d", err);
3685 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3688 hci_dev_unlock(hdev);
3689 break;
3693 static void le_scan_disable_work(struct work_struct *work)
3695 struct hci_dev *hdev = container_of(work, struct hci_dev,
3696 le_scan_disable.work);
3697 struct hci_request req;
3698 int err;
3700 BT_DBG("%s", hdev->name);
3702 hci_req_init(&req, hdev);
3704 hci_req_add_le_scan_disable(&req);
3706 err = hci_req_run(&req, le_scan_disable_work_complete);
3707 if (err)
3708 BT_ERR("Disable LE scanning request failed: err %d", err);
3711 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3713 struct hci_dev *hdev = req->hdev;
3715 /* If we're advertising or initiating an LE connection we can't
3716 * go ahead and change the random address at this time. This is
3717 * because the eventual initiator address used for the
3718 * subsequently created connection will be undefined (some
3719 * controllers use the new address and others the one we had
3720 * when the operation started).
3722 * In this kind of scenario skip the update and let the random
3723 * address be updated at the next cycle.
3725 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3726 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3727 BT_DBG("Deferring random address update");
3728 return;
3731 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3734 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3735 u8 *own_addr_type)
3737 struct hci_dev *hdev = req->hdev;
3738 int err;
3740 /* If privacy is enabled use a resolvable private address. If
3741 * current RPA has expired or there is something else than
3742 * the current RPA in use, then generate a new one.
3744 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3745 int to;
3747 *own_addr_type = ADDR_LE_DEV_RANDOM;
3749 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3750 !bacmp(&hdev->random_addr, &hdev->rpa))
3751 return 0;
3753 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3754 if (err < 0) {
3755 BT_ERR("%s failed to generate new RPA", hdev->name);
3756 return err;
3759 set_random_addr(req, &hdev->rpa);
3761 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3762 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3764 return 0;
3767 /* In case of required privacy without resolvable private address,
3768 * use an unresolvable private address. This is useful for active
3769 * scanning and non-connectable advertising.
3771 if (require_privacy) {
3772 bdaddr_t urpa;
3774 get_random_bytes(&urpa, 6);
3775 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3777 *own_addr_type = ADDR_LE_DEV_RANDOM;
3778 set_random_addr(req, &urpa);
3779 return 0;
3782 /* If forcing static address is in use or there is no public
3783 * address use the static address as random address (but skip
3784 * the HCI command if the current random address is already the
3785 * static one.
3787 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3788 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3789 *own_addr_type = ADDR_LE_DEV_RANDOM;
3790 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3791 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3792 &hdev->static_addr);
3793 return 0;
3796 /* Neither privacy nor static address is being used so use a
3797 * public address.
3799 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3801 return 0;
3804 /* Copy the Identity Address of the controller.
3806 * If the controller has a public BD_ADDR, then by default use that one.
3807 * If this is a LE only controller without a public address, default to
3808 * the static random address.
3810 * For debugging purposes it is possible to force controllers with a
3811 * public address to use the static random address instead.
3813 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3814 u8 *bdaddr_type)
3816 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3817 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3818 bacpy(bdaddr, &hdev->static_addr);
3819 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3820 } else {
3821 bacpy(bdaddr, &hdev->bdaddr);
3822 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3826 /* Alloc HCI device */
3827 struct hci_dev *hci_alloc_dev(void)
3829 struct hci_dev *hdev;
3831 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3832 if (!hdev)
3833 return NULL;
3835 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3836 hdev->esco_type = (ESCO_HV1);
3837 hdev->link_mode = (HCI_LM_ACCEPT);
3838 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3839 hdev->io_capability = 0x03; /* No Input No Output */
3840 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3841 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3843 hdev->sniff_max_interval = 800;
3844 hdev->sniff_min_interval = 80;
3846 hdev->le_adv_channel_map = 0x07;
3847 hdev->le_scan_interval = 0x0060;
3848 hdev->le_scan_window = 0x0030;
3849 hdev->le_conn_min_interval = 0x0028;
3850 hdev->le_conn_max_interval = 0x0038;
3852 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3853 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3854 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3855 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3857 mutex_init(&hdev->lock);
3858 mutex_init(&hdev->req_lock);
3860 INIT_LIST_HEAD(&hdev->mgmt_pending);
3861 INIT_LIST_HEAD(&hdev->blacklist);
3862 INIT_LIST_HEAD(&hdev->uuids);
3863 INIT_LIST_HEAD(&hdev->link_keys);
3864 INIT_LIST_HEAD(&hdev->long_term_keys);
3865 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3866 INIT_LIST_HEAD(&hdev->remote_oob_data);
3867 INIT_LIST_HEAD(&hdev->le_white_list);
3868 INIT_LIST_HEAD(&hdev->le_conn_params);
3869 INIT_LIST_HEAD(&hdev->pend_le_conns);
3870 INIT_LIST_HEAD(&hdev->conn_hash.list);
3872 INIT_WORK(&hdev->rx_work, hci_rx_work);
3873 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3874 INIT_WORK(&hdev->tx_work, hci_tx_work);
3875 INIT_WORK(&hdev->power_on, hci_power_on);
3877 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3878 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3879 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3881 skb_queue_head_init(&hdev->rx_q);
3882 skb_queue_head_init(&hdev->cmd_q);
3883 skb_queue_head_init(&hdev->raw_q);
3885 init_waitqueue_head(&hdev->req_wait_q);
3887 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
3889 hci_init_sysfs(hdev);
3890 discovery_init(hdev);
3892 return hdev;
3894 EXPORT_SYMBOL(hci_alloc_dev);
3896 /* Free HCI device */
3897 void hci_free_dev(struct hci_dev *hdev)
3899 /* will free via device release */
3900 put_device(&hdev->dev);
3902 EXPORT_SYMBOL(hci_free_dev);
3904 /* Register HCI device */
3905 int hci_register_dev(struct hci_dev *hdev)
3907 int id, error;
3909 if (!hdev->open || !hdev->close)
3910 return -EINVAL;
3912 /* Do not allow HCI_AMP devices to register at index 0,
3913 * so the index can be used as the AMP controller ID.
3915 switch (hdev->dev_type) {
3916 case HCI_BREDR:
3917 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3918 break;
3919 case HCI_AMP:
3920 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3921 break;
3922 default:
3923 return -EINVAL;
3926 if (id < 0)
3927 return id;
3929 sprintf(hdev->name, "hci%d", id);
3930 hdev->id = id;
3932 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3934 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3935 WQ_MEM_RECLAIM, 1, hdev->name);
3936 if (!hdev->workqueue) {
3937 error = -ENOMEM;
3938 goto err;
3941 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3942 WQ_MEM_RECLAIM, 1, hdev->name);
3943 if (!hdev->req_workqueue) {
3944 destroy_workqueue(hdev->workqueue);
3945 error = -ENOMEM;
3946 goto err;
3949 if (!IS_ERR_OR_NULL(bt_debugfs))
3950 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3952 dev_set_name(&hdev->dev, "%s", hdev->name);
3954 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3955 CRYPTO_ALG_ASYNC);
3956 if (IS_ERR(hdev->tfm_aes)) {
3957 BT_ERR("Unable to create crypto context");
3958 error = PTR_ERR(hdev->tfm_aes);
3959 hdev->tfm_aes = NULL;
3960 goto err_wqueue;
3963 error = device_add(&hdev->dev);
3964 if (error < 0)
3965 goto err_tfm;
3967 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3968 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3969 hdev);
3970 if (hdev->rfkill) {
3971 if (rfkill_register(hdev->rfkill) < 0) {
3972 rfkill_destroy(hdev->rfkill);
3973 hdev->rfkill = NULL;
3977 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3978 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3980 set_bit(HCI_SETUP, &hdev->dev_flags);
3981 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3983 if (hdev->dev_type == HCI_BREDR) {
3984 /* Assume BR/EDR support until proven otherwise (such as
3985 * through reading supported features during init.
3987 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3990 write_lock(&hci_dev_list_lock);
3991 list_add(&hdev->list, &hci_dev_list);
3992 write_unlock(&hci_dev_list_lock);
3994 hci_notify(hdev, HCI_DEV_REG);
3995 hci_dev_hold(hdev);
3997 queue_work(hdev->req_workqueue, &hdev->power_on);
3999 return id;
4001 err_tfm:
4002 crypto_free_blkcipher(hdev->tfm_aes);
4003 err_wqueue:
4004 destroy_workqueue(hdev->workqueue);
4005 destroy_workqueue(hdev->req_workqueue);
4006 err:
4007 ida_simple_remove(&hci_index_ida, hdev->id);
4009 return error;
4011 EXPORT_SYMBOL(hci_register_dev);
4013 /* Unregister HCI device */
4014 void hci_unregister_dev(struct hci_dev *hdev)
4016 int i, id;
4018 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4020 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4022 id = hdev->id;
4024 write_lock(&hci_dev_list_lock);
4025 list_del(&hdev->list);
4026 write_unlock(&hci_dev_list_lock);
4028 hci_dev_do_close(hdev);
4030 for (i = 0; i < NUM_REASSEMBLY; i++)
4031 kfree_skb(hdev->reassembly[i]);
4033 cancel_work_sync(&hdev->power_on);
4035 if (!test_bit(HCI_INIT, &hdev->flags) &&
4036 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
4037 hci_dev_lock(hdev);
4038 mgmt_index_removed(hdev);
4039 hci_dev_unlock(hdev);
4042 /* mgmt_index_removed should take care of emptying the
4043 * pending list */
4044 BUG_ON(!list_empty(&hdev->mgmt_pending));
4046 hci_notify(hdev, HCI_DEV_UNREG);
4048 if (hdev->rfkill) {
4049 rfkill_unregister(hdev->rfkill);
4050 rfkill_destroy(hdev->rfkill);
4053 if (hdev->tfm_aes)
4054 crypto_free_blkcipher(hdev->tfm_aes);
4056 device_del(&hdev->dev);
4058 debugfs_remove_recursive(hdev->debugfs);
4060 destroy_workqueue(hdev->workqueue);
4061 destroy_workqueue(hdev->req_workqueue);
4063 hci_dev_lock(hdev);
4064 hci_blacklist_clear(hdev);
4065 hci_uuids_clear(hdev);
4066 hci_link_keys_clear(hdev);
4067 hci_smp_ltks_clear(hdev);
4068 hci_smp_irks_clear(hdev);
4069 hci_remote_oob_data_clear(hdev);
4070 hci_white_list_clear(hdev);
4071 hci_conn_params_clear(hdev);
4072 hci_pend_le_conns_clear(hdev);
4073 hci_dev_unlock(hdev);
4075 hci_dev_put(hdev);
4077 ida_simple_remove(&hci_index_ida, id);
4079 EXPORT_SYMBOL(hci_unregister_dev);
4081 /* Suspend HCI device */
4082 int hci_suspend_dev(struct hci_dev *hdev)
4084 hci_notify(hdev, HCI_DEV_SUSPEND);
4085 return 0;
4087 EXPORT_SYMBOL(hci_suspend_dev);
4089 /* Resume HCI device */
4090 int hci_resume_dev(struct hci_dev *hdev)
4092 hci_notify(hdev, HCI_DEV_RESUME);
4093 return 0;
4095 EXPORT_SYMBOL(hci_resume_dev);
4097 /* Receive frame from HCI drivers */
4098 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4100 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4101 && !test_bit(HCI_INIT, &hdev->flags))) {
4102 kfree_skb(skb);
4103 return -ENXIO;
4106 /* Incoming skb */
4107 bt_cb(skb)->incoming = 1;
4109 /* Time stamp */
4110 __net_timestamp(skb);
4112 skb_queue_tail(&hdev->rx_q, skb);
4113 queue_work(hdev->workqueue, &hdev->rx_work);
4115 return 0;
4117 EXPORT_SYMBOL(hci_recv_frame);
4119 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4120 int count, __u8 index)
4122 int len = 0;
4123 int hlen = 0;
4124 int remain = count;
4125 struct sk_buff *skb;
4126 struct bt_skb_cb *scb;
4128 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4129 index >= NUM_REASSEMBLY)
4130 return -EILSEQ;
4132 skb = hdev->reassembly[index];
4134 if (!skb) {
4135 switch (type) {
4136 case HCI_ACLDATA_PKT:
4137 len = HCI_MAX_FRAME_SIZE;
4138 hlen = HCI_ACL_HDR_SIZE;
4139 break;
4140 case HCI_EVENT_PKT:
4141 len = HCI_MAX_EVENT_SIZE;
4142 hlen = HCI_EVENT_HDR_SIZE;
4143 break;
4144 case HCI_SCODATA_PKT:
4145 len = HCI_MAX_SCO_SIZE;
4146 hlen = HCI_SCO_HDR_SIZE;
4147 break;
4150 skb = bt_skb_alloc(len, GFP_ATOMIC);
4151 if (!skb)
4152 return -ENOMEM;
4154 scb = (void *) skb->cb;
4155 scb->expect = hlen;
4156 scb->pkt_type = type;
4158 hdev->reassembly[index] = skb;
4161 while (count) {
4162 scb = (void *) skb->cb;
4163 len = min_t(uint, scb->expect, count);
4165 memcpy(skb_put(skb, len), data, len);
4167 count -= len;
4168 data += len;
4169 scb->expect -= len;
4170 remain = count;
4172 switch (type) {
4173 case HCI_EVENT_PKT:
4174 if (skb->len == HCI_EVENT_HDR_SIZE) {
4175 struct hci_event_hdr *h = hci_event_hdr(skb);
4176 scb->expect = h->plen;
4178 if (skb_tailroom(skb) < scb->expect) {
4179 kfree_skb(skb);
4180 hdev->reassembly[index] = NULL;
4181 return -ENOMEM;
4184 break;
4186 case HCI_ACLDATA_PKT:
4187 if (skb->len == HCI_ACL_HDR_SIZE) {
4188 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4189 scb->expect = __le16_to_cpu(h->dlen);
4191 if (skb_tailroom(skb) < scb->expect) {
4192 kfree_skb(skb);
4193 hdev->reassembly[index] = NULL;
4194 return -ENOMEM;
4197 break;
4199 case HCI_SCODATA_PKT:
4200 if (skb->len == HCI_SCO_HDR_SIZE) {
4201 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4202 scb->expect = h->dlen;
4204 if (skb_tailroom(skb) < scb->expect) {
4205 kfree_skb(skb);
4206 hdev->reassembly[index] = NULL;
4207 return -ENOMEM;
4210 break;
4213 if (scb->expect == 0) {
4214 /* Complete frame */
4216 bt_cb(skb)->pkt_type = type;
4217 hci_recv_frame(hdev, skb);
4219 hdev->reassembly[index] = NULL;
4220 return remain;
4224 return remain;
4227 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4229 int rem = 0;
4231 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4232 return -EILSEQ;
4234 while (count) {
4235 rem = hci_reassembly(hdev, type, data, count, type - 1);
4236 if (rem < 0)
4237 return rem;
4239 data += (count - rem);
4240 count = rem;
4243 return rem;
4245 EXPORT_SYMBOL(hci_recv_fragment);
4247 #define STREAM_REASSEMBLY 0
4249 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4251 int type;
4252 int rem = 0;
4254 while (count) {
4255 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4257 if (!skb) {
4258 struct { char type; } *pkt;
4260 /* Start of the frame */
4261 pkt = data;
4262 type = pkt->type;
4264 data++;
4265 count--;
4266 } else
4267 type = bt_cb(skb)->pkt_type;
4269 rem = hci_reassembly(hdev, type, data, count,
4270 STREAM_REASSEMBLY);
4271 if (rem < 0)
4272 return rem;
4274 data += (count - rem);
4275 count = rem;
4278 return rem;
4280 EXPORT_SYMBOL(hci_recv_stream_fragment);
4282 /* ---- Interface to upper protocols ---- */
4284 int hci_register_cb(struct hci_cb *cb)
4286 BT_DBG("%p name %s", cb, cb->name);
4288 write_lock(&hci_cb_list_lock);
4289 list_add(&cb->list, &hci_cb_list);
4290 write_unlock(&hci_cb_list_lock);
4292 return 0;
4294 EXPORT_SYMBOL(hci_register_cb);
4296 int hci_unregister_cb(struct hci_cb *cb)
4298 BT_DBG("%p name %s", cb, cb->name);
4300 write_lock(&hci_cb_list_lock);
4301 list_del(&cb->list);
4302 write_unlock(&hci_cb_list_lock);
4304 return 0;
4306 EXPORT_SYMBOL(hci_unregister_cb);
4308 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4310 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4312 /* Time stamp */
4313 __net_timestamp(skb);
4315 /* Send copy to monitor */
4316 hci_send_to_monitor(hdev, skb);
4318 if (atomic_read(&hdev->promisc)) {
4319 /* Send copy to the sockets */
4320 hci_send_to_sock(hdev, skb);
4323 /* Get rid of skb owner, prior to sending to the driver. */
4324 skb_orphan(skb);
4326 if (hdev->send(hdev, skb) < 0)
4327 BT_ERR("%s sending frame failed", hdev->name);
4330 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4332 skb_queue_head_init(&req->cmd_q);
4333 req->hdev = hdev;
4334 req->err = 0;
4337 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4339 struct hci_dev *hdev = req->hdev;
4340 struct sk_buff *skb;
4341 unsigned long flags;
4343 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4345 /* If an error occured during request building, remove all HCI
4346 * commands queued on the HCI request queue.
4348 if (req->err) {
4349 skb_queue_purge(&req->cmd_q);
4350 return req->err;
4353 /* Do not allow empty requests */
4354 if (skb_queue_empty(&req->cmd_q))
4355 return -ENODATA;
4357 skb = skb_peek_tail(&req->cmd_q);
4358 bt_cb(skb)->req.complete = complete;
4360 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4361 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4362 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4364 queue_work(hdev->workqueue, &hdev->cmd_work);
4366 return 0;
4369 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4370 u32 plen, const void *param)
4372 int len = HCI_COMMAND_HDR_SIZE + plen;
4373 struct hci_command_hdr *hdr;
4374 struct sk_buff *skb;
4376 skb = bt_skb_alloc(len, GFP_ATOMIC);
4377 if (!skb)
4378 return NULL;
4380 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4381 hdr->opcode = cpu_to_le16(opcode);
4382 hdr->plen = plen;
4384 if (plen)
4385 memcpy(skb_put(skb, plen), param, plen);
4387 BT_DBG("skb len %d", skb->len);
4389 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4391 return skb;
4394 /* Send HCI command */
4395 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4396 const void *param)
4398 struct sk_buff *skb;
4400 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4402 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4403 if (!skb) {
4404 BT_ERR("%s no memory for command", hdev->name);
4405 return -ENOMEM;
4408 /* Stand-alone HCI commands must be flaged as
4409 * single-command requests.
4411 bt_cb(skb)->req.start = true;
4413 skb_queue_tail(&hdev->cmd_q, skb);
4414 queue_work(hdev->workqueue, &hdev->cmd_work);
4416 return 0;
4419 /* Queue a command to an asynchronous HCI request */
4420 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4421 const void *param, u8 event)
4423 struct hci_dev *hdev = req->hdev;
4424 struct sk_buff *skb;
4426 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4428 /* If an error occured during request building, there is no point in
4429 * queueing the HCI command. We can simply return.
4431 if (req->err)
4432 return;
4434 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4435 if (!skb) {
4436 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4437 hdev->name, opcode);
4438 req->err = -ENOMEM;
4439 return;
4442 if (skb_queue_empty(&req->cmd_q))
4443 bt_cb(skb)->req.start = true;
4445 bt_cb(skb)->req.event = event;
4447 skb_queue_tail(&req->cmd_q, skb);
4450 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4451 const void *param)
4453 hci_req_add_ev(req, opcode, plen, param, 0);
4456 /* Get data from the previously sent command */
4457 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4459 struct hci_command_hdr *hdr;
4461 if (!hdev->sent_cmd)
4462 return NULL;
4464 hdr = (void *) hdev->sent_cmd->data;
4466 if (hdr->opcode != cpu_to_le16(opcode))
4467 return NULL;
4469 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4471 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4474 /* Send ACL data */
4475 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4477 struct hci_acl_hdr *hdr;
4478 int len = skb->len;
4480 skb_push(skb, HCI_ACL_HDR_SIZE);
4481 skb_reset_transport_header(skb);
4482 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4483 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4484 hdr->dlen = cpu_to_le16(len);
4487 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4488 struct sk_buff *skb, __u16 flags)
4490 struct hci_conn *conn = chan->conn;
4491 struct hci_dev *hdev = conn->hdev;
4492 struct sk_buff *list;
4494 skb->len = skb_headlen(skb);
4495 skb->data_len = 0;
4497 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4499 switch (hdev->dev_type) {
4500 case HCI_BREDR:
4501 hci_add_acl_hdr(skb, conn->handle, flags);
4502 break;
4503 case HCI_AMP:
4504 hci_add_acl_hdr(skb, chan->handle, flags);
4505 break;
4506 default:
4507 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4508 return;
4511 list = skb_shinfo(skb)->frag_list;
4512 if (!list) {
4513 /* Non fragmented */
4514 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4516 skb_queue_tail(queue, skb);
4517 } else {
4518 /* Fragmented */
4519 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4521 skb_shinfo(skb)->frag_list = NULL;
4523 /* Queue all fragments atomically */
4524 spin_lock(&queue->lock);
4526 __skb_queue_tail(queue, skb);
4528 flags &= ~ACL_START;
4529 flags |= ACL_CONT;
4530 do {
4531 skb = list; list = list->next;
4533 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4534 hci_add_acl_hdr(skb, conn->handle, flags);
4536 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4538 __skb_queue_tail(queue, skb);
4539 } while (list);
4541 spin_unlock(&queue->lock);
4545 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4547 struct hci_dev *hdev = chan->conn->hdev;
4549 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4551 hci_queue_acl(chan, &chan->data_q, skb, flags);
4553 queue_work(hdev->workqueue, &hdev->tx_work);
4556 /* Send SCO data */
4557 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4559 struct hci_dev *hdev = conn->hdev;
4560 struct hci_sco_hdr hdr;
4562 BT_DBG("%s len %d", hdev->name, skb->len);
4564 hdr.handle = cpu_to_le16(conn->handle);
4565 hdr.dlen = skb->len;
4567 skb_push(skb, HCI_SCO_HDR_SIZE);
4568 skb_reset_transport_header(skb);
4569 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4571 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4573 skb_queue_tail(&conn->data_q, skb);
4574 queue_work(hdev->workqueue, &hdev->tx_work);
4577 /* ---- HCI TX task (outgoing data) ---- */
4579 /* HCI Connection scheduler */
4580 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4581 int *quote)
4583 struct hci_conn_hash *h = &hdev->conn_hash;
4584 struct hci_conn *conn = NULL, *c;
4585 unsigned int num = 0, min = ~0;
4587 /* We don't have to lock device here. Connections are always
4588 * added and removed with TX task disabled. */
4590 rcu_read_lock();
4592 list_for_each_entry_rcu(c, &h->list, list) {
4593 if (c->type != type || skb_queue_empty(&c->data_q))
4594 continue;
4596 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4597 continue;
4599 num++;
4601 if (c->sent < min) {
4602 min = c->sent;
4603 conn = c;
4606 if (hci_conn_num(hdev, type) == num)
4607 break;
4610 rcu_read_unlock();
4612 if (conn) {
4613 int cnt, q;
4615 switch (conn->type) {
4616 case ACL_LINK:
4617 cnt = hdev->acl_cnt;
4618 break;
4619 case SCO_LINK:
4620 case ESCO_LINK:
4621 cnt = hdev->sco_cnt;
4622 break;
4623 case LE_LINK:
4624 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4625 break;
4626 default:
4627 cnt = 0;
4628 BT_ERR("Unknown link type");
4631 q = cnt / num;
4632 *quote = q ? q : 1;
4633 } else
4634 *quote = 0;
4636 BT_DBG("conn %p quote %d", conn, *quote);
4637 return conn;
4640 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4642 struct hci_conn_hash *h = &hdev->conn_hash;
4643 struct hci_conn *c;
4645 BT_ERR("%s link tx timeout", hdev->name);
4647 rcu_read_lock();
4649 /* Kill stalled connections */
4650 list_for_each_entry_rcu(c, &h->list, list) {
4651 if (c->type == type && c->sent) {
4652 BT_ERR("%s killing stalled connection %pMR",
4653 hdev->name, &c->dst);
4654 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4658 rcu_read_unlock();
4661 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4662 int *quote)
4664 struct hci_conn_hash *h = &hdev->conn_hash;
4665 struct hci_chan *chan = NULL;
4666 unsigned int num = 0, min = ~0, cur_prio = 0;
4667 struct hci_conn *conn;
4668 int cnt, q, conn_num = 0;
4670 BT_DBG("%s", hdev->name);
4672 rcu_read_lock();
4674 list_for_each_entry_rcu(conn, &h->list, list) {
4675 struct hci_chan *tmp;
4677 if (conn->type != type)
4678 continue;
4680 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4681 continue;
4683 conn_num++;
4685 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4686 struct sk_buff *skb;
4688 if (skb_queue_empty(&tmp->data_q))
4689 continue;
4691 skb = skb_peek(&tmp->data_q);
4692 if (skb->priority < cur_prio)
4693 continue;
4695 if (skb->priority > cur_prio) {
4696 num = 0;
4697 min = ~0;
4698 cur_prio = skb->priority;
4701 num++;
4703 if (conn->sent < min) {
4704 min = conn->sent;
4705 chan = tmp;
4709 if (hci_conn_num(hdev, type) == conn_num)
4710 break;
4713 rcu_read_unlock();
4715 if (!chan)
4716 return NULL;
4718 switch (chan->conn->type) {
4719 case ACL_LINK:
4720 cnt = hdev->acl_cnt;
4721 break;
4722 case AMP_LINK:
4723 cnt = hdev->block_cnt;
4724 break;
4725 case SCO_LINK:
4726 case ESCO_LINK:
4727 cnt = hdev->sco_cnt;
4728 break;
4729 case LE_LINK:
4730 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4731 break;
4732 default:
4733 cnt = 0;
4734 BT_ERR("Unknown link type");
4737 q = cnt / num;
4738 *quote = q ? q : 1;
4739 BT_DBG("chan %p quote %d", chan, *quote);
4740 return chan;
4743 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4745 struct hci_conn_hash *h = &hdev->conn_hash;
4746 struct hci_conn *conn;
4747 int num = 0;
4749 BT_DBG("%s", hdev->name);
4751 rcu_read_lock();
4753 list_for_each_entry_rcu(conn, &h->list, list) {
4754 struct hci_chan *chan;
4756 if (conn->type != type)
4757 continue;
4759 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4760 continue;
4762 num++;
4764 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4765 struct sk_buff *skb;
4767 if (chan->sent) {
4768 chan->sent = 0;
4769 continue;
4772 if (skb_queue_empty(&chan->data_q))
4773 continue;
4775 skb = skb_peek(&chan->data_q);
4776 if (skb->priority >= HCI_PRIO_MAX - 1)
4777 continue;
4779 skb->priority = HCI_PRIO_MAX - 1;
4781 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4782 skb->priority);
4785 if (hci_conn_num(hdev, type) == num)
4786 break;
4789 rcu_read_unlock();
4793 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4795 /* Calculate count of blocks used by this packet */
4796 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4799 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4801 if (!test_bit(HCI_RAW, &hdev->flags)) {
4802 /* ACL tx timeout must be longer than maximum
4803 * link supervision timeout (40.9 seconds) */
4804 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4805 HCI_ACL_TX_TIMEOUT))
4806 hci_link_tx_to(hdev, ACL_LINK);
4810 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4812 unsigned int cnt = hdev->acl_cnt;
4813 struct hci_chan *chan;
4814 struct sk_buff *skb;
4815 int quote;
4817 __check_timeout(hdev, cnt);
4819 while (hdev->acl_cnt &&
4820 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4821 u32 priority = (skb_peek(&chan->data_q))->priority;
4822 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4823 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4824 skb->len, skb->priority);
4826 /* Stop if priority has changed */
4827 if (skb->priority < priority)
4828 break;
4830 skb = skb_dequeue(&chan->data_q);
4832 hci_conn_enter_active_mode(chan->conn,
4833 bt_cb(skb)->force_active);
4835 hci_send_frame(hdev, skb);
4836 hdev->acl_last_tx = jiffies;
4838 hdev->acl_cnt--;
4839 chan->sent++;
4840 chan->conn->sent++;
4844 if (cnt != hdev->acl_cnt)
4845 hci_prio_recalculate(hdev, ACL_LINK);
4848 static void hci_sched_acl_blk(struct hci_dev *hdev)
4850 unsigned int cnt = hdev->block_cnt;
4851 struct hci_chan *chan;
4852 struct sk_buff *skb;
4853 int quote;
4854 u8 type;
4856 __check_timeout(hdev, cnt);
4858 BT_DBG("%s", hdev->name);
4860 if (hdev->dev_type == HCI_AMP)
4861 type = AMP_LINK;
4862 else
4863 type = ACL_LINK;
4865 while (hdev->block_cnt > 0 &&
4866 (chan = hci_chan_sent(hdev, type, &quote))) {
4867 u32 priority = (skb_peek(&chan->data_q))->priority;
4868 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4869 int blocks;
4871 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4872 skb->len, skb->priority);
4874 /* Stop if priority has changed */
4875 if (skb->priority < priority)
4876 break;
4878 skb = skb_dequeue(&chan->data_q);
4880 blocks = __get_blocks(hdev, skb);
4881 if (blocks > hdev->block_cnt)
4882 return;
4884 hci_conn_enter_active_mode(chan->conn,
4885 bt_cb(skb)->force_active);
4887 hci_send_frame(hdev, skb);
4888 hdev->acl_last_tx = jiffies;
4890 hdev->block_cnt -= blocks;
4891 quote -= blocks;
4893 chan->sent += blocks;
4894 chan->conn->sent += blocks;
4898 if (cnt != hdev->block_cnt)
4899 hci_prio_recalculate(hdev, type);
4902 static void hci_sched_acl(struct hci_dev *hdev)
4904 BT_DBG("%s", hdev->name);
4906 /* No ACL link over BR/EDR controller */
4907 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4908 return;
4910 /* No AMP link over AMP controller */
4911 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4912 return;
4914 switch (hdev->flow_ctl_mode) {
4915 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4916 hci_sched_acl_pkt(hdev);
4917 break;
4919 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4920 hci_sched_acl_blk(hdev);
4921 break;
4925 /* Schedule SCO */
4926 static void hci_sched_sco(struct hci_dev *hdev)
4928 struct hci_conn *conn;
4929 struct sk_buff *skb;
4930 int quote;
4932 BT_DBG("%s", hdev->name);
4934 if (!hci_conn_num(hdev, SCO_LINK))
4935 return;
4937 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4938 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4939 BT_DBG("skb %p len %d", skb, skb->len);
4940 hci_send_frame(hdev, skb);
4942 conn->sent++;
4943 if (conn->sent == ~0)
4944 conn->sent = 0;
4949 static void hci_sched_esco(struct hci_dev *hdev)
4951 struct hci_conn *conn;
4952 struct sk_buff *skb;
4953 int quote;
4955 BT_DBG("%s", hdev->name);
4957 if (!hci_conn_num(hdev, ESCO_LINK))
4958 return;
4960 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4961 &quote))) {
4962 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4963 BT_DBG("skb %p len %d", skb, skb->len);
4964 hci_send_frame(hdev, skb);
4966 conn->sent++;
4967 if (conn->sent == ~0)
4968 conn->sent = 0;
4973 static void hci_sched_le(struct hci_dev *hdev)
4975 struct hci_chan *chan;
4976 struct sk_buff *skb;
4977 int quote, cnt, tmp;
4979 BT_DBG("%s", hdev->name);
4981 if (!hci_conn_num(hdev, LE_LINK))
4982 return;
4984 if (!test_bit(HCI_RAW, &hdev->flags)) {
4985 /* LE tx timeout must be longer than maximum
4986 * link supervision timeout (40.9 seconds) */
4987 if (!hdev->le_cnt && hdev->le_pkts &&
4988 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4989 hci_link_tx_to(hdev, LE_LINK);
4992 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4993 tmp = cnt;
4994 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4995 u32 priority = (skb_peek(&chan->data_q))->priority;
4996 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4997 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4998 skb->len, skb->priority);
5000 /* Stop if priority has changed */
5001 if (skb->priority < priority)
5002 break;
5004 skb = skb_dequeue(&chan->data_q);
5006 hci_send_frame(hdev, skb);
5007 hdev->le_last_tx = jiffies;
5009 cnt--;
5010 chan->sent++;
5011 chan->conn->sent++;
5015 if (hdev->le_pkts)
5016 hdev->le_cnt = cnt;
5017 else
5018 hdev->acl_cnt = cnt;
5020 if (cnt != tmp)
5021 hci_prio_recalculate(hdev, LE_LINK);
5024 static void hci_tx_work(struct work_struct *work)
5026 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5027 struct sk_buff *skb;
5029 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5030 hdev->sco_cnt, hdev->le_cnt);
5032 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5033 /* Schedule queues and send stuff to HCI driver */
5034 hci_sched_acl(hdev);
5035 hci_sched_sco(hdev);
5036 hci_sched_esco(hdev);
5037 hci_sched_le(hdev);
5040 /* Send next queued raw (unknown type) packet */
5041 while ((skb = skb_dequeue(&hdev->raw_q)))
5042 hci_send_frame(hdev, skb);
5045 /* ----- HCI RX task (incoming data processing) ----- */
5047 /* ACL data packet */
5048 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5050 struct hci_acl_hdr *hdr = (void *) skb->data;
5051 struct hci_conn *conn;
5052 __u16 handle, flags;
5054 skb_pull(skb, HCI_ACL_HDR_SIZE);
5056 handle = __le16_to_cpu(hdr->handle);
5057 flags = hci_flags(handle);
5058 handle = hci_handle(handle);
5060 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5061 handle, flags);
5063 hdev->stat.acl_rx++;
5065 hci_dev_lock(hdev);
5066 conn = hci_conn_hash_lookup_handle(hdev, handle);
5067 hci_dev_unlock(hdev);
5069 if (conn) {
5070 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5072 /* Send to upper protocol */
5073 l2cap_recv_acldata(conn, skb, flags);
5074 return;
5075 } else {
5076 BT_ERR("%s ACL packet for unknown connection handle %d",
5077 hdev->name, handle);
5080 kfree_skb(skb);
5083 /* SCO data packet */
5084 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5086 struct hci_sco_hdr *hdr = (void *) skb->data;
5087 struct hci_conn *conn;
5088 __u16 handle;
5090 skb_pull(skb, HCI_SCO_HDR_SIZE);
5092 handle = __le16_to_cpu(hdr->handle);
5094 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5096 hdev->stat.sco_rx++;
5098 hci_dev_lock(hdev);
5099 conn = hci_conn_hash_lookup_handle(hdev, handle);
5100 hci_dev_unlock(hdev);
5102 if (conn) {
5103 /* Send to upper protocol */
5104 sco_recv_scodata(conn, skb);
5105 return;
5106 } else {
5107 BT_ERR("%s SCO packet for unknown connection handle %d",
5108 hdev->name, handle);
5111 kfree_skb(skb);
5114 static bool hci_req_is_complete(struct hci_dev *hdev)
5116 struct sk_buff *skb;
5118 skb = skb_peek(&hdev->cmd_q);
5119 if (!skb)
5120 return true;
5122 return bt_cb(skb)->req.start;
5125 static void hci_resend_last(struct hci_dev *hdev)
5127 struct hci_command_hdr *sent;
5128 struct sk_buff *skb;
5129 u16 opcode;
5131 if (!hdev->sent_cmd)
5132 return;
5134 sent = (void *) hdev->sent_cmd->data;
5135 opcode = __le16_to_cpu(sent->opcode);
5136 if (opcode == HCI_OP_RESET)
5137 return;
5139 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5140 if (!skb)
5141 return;
5143 skb_queue_head(&hdev->cmd_q, skb);
5144 queue_work(hdev->workqueue, &hdev->cmd_work);
5147 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5149 hci_req_complete_t req_complete = NULL;
5150 struct sk_buff *skb;
5151 unsigned long flags;
5153 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5155 /* If the completed command doesn't match the last one that was
5156 * sent we need to do special handling of it.
5158 if (!hci_sent_cmd_data(hdev, opcode)) {
5159 /* Some CSR based controllers generate a spontaneous
5160 * reset complete event during init and any pending
5161 * command will never be completed. In such a case we
5162 * need to resend whatever was the last sent
5163 * command.
5165 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5166 hci_resend_last(hdev);
5168 return;
5171 /* If the command succeeded and there's still more commands in
5172 * this request the request is not yet complete.
5174 if (!status && !hci_req_is_complete(hdev))
5175 return;
5177 /* If this was the last command in a request the complete
5178 * callback would be found in hdev->sent_cmd instead of the
5179 * command queue (hdev->cmd_q).
5181 if (hdev->sent_cmd) {
5182 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5184 if (req_complete) {
5185 /* We must set the complete callback to NULL to
5186 * avoid calling the callback more than once if
5187 * this function gets called again.
5189 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5191 goto call_complete;
5195 /* Remove all pending commands belonging to this request */
5196 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5197 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5198 if (bt_cb(skb)->req.start) {
5199 __skb_queue_head(&hdev->cmd_q, skb);
5200 break;
5203 req_complete = bt_cb(skb)->req.complete;
5204 kfree_skb(skb);
5206 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5208 call_complete:
5209 if (req_complete)
5210 req_complete(hdev, status);
5213 static void hci_rx_work(struct work_struct *work)
5215 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5216 struct sk_buff *skb;
5218 BT_DBG("%s", hdev->name);
5220 while ((skb = skb_dequeue(&hdev->rx_q))) {
5221 /* Send copy to monitor */
5222 hci_send_to_monitor(hdev, skb);
5224 if (atomic_read(&hdev->promisc)) {
5225 /* Send copy to the sockets */
5226 hci_send_to_sock(hdev, skb);
5229 if (test_bit(HCI_RAW, &hdev->flags) ||
5230 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5231 kfree_skb(skb);
5232 continue;
5235 if (test_bit(HCI_INIT, &hdev->flags)) {
5236 /* Don't process data packets in this states. */
5237 switch (bt_cb(skb)->pkt_type) {
5238 case HCI_ACLDATA_PKT:
5239 case HCI_SCODATA_PKT:
5240 kfree_skb(skb);
5241 continue;
5245 /* Process frame */
5246 switch (bt_cb(skb)->pkt_type) {
5247 case HCI_EVENT_PKT:
5248 BT_DBG("%s Event packet", hdev->name);
5249 hci_event_packet(hdev, skb);
5250 break;
5252 case HCI_ACLDATA_PKT:
5253 BT_DBG("%s ACL data packet", hdev->name);
5254 hci_acldata_packet(hdev, skb);
5255 break;
5257 case HCI_SCODATA_PKT:
5258 BT_DBG("%s SCO data packet", hdev->name);
5259 hci_scodata_packet(hdev, skb);
5260 break;
5262 default:
5263 kfree_skb(skb);
5264 break;
5269 static void hci_cmd_work(struct work_struct *work)
5271 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5272 struct sk_buff *skb;
5274 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5275 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5277 /* Send queued commands */
5278 if (atomic_read(&hdev->cmd_cnt)) {
5279 skb = skb_dequeue(&hdev->cmd_q);
5280 if (!skb)
5281 return;
5283 kfree_skb(hdev->sent_cmd);
5285 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5286 if (hdev->sent_cmd) {
5287 atomic_dec(&hdev->cmd_cnt);
5288 hci_send_frame(hdev, skb);
5289 if (test_bit(HCI_RESET, &hdev->flags))
5290 del_timer(&hdev->cmd_timer);
5291 else
5292 mod_timer(&hdev->cmd_timer,
5293 jiffies + HCI_CMD_TIMEOUT);
5294 } else {
5295 skb_queue_head(&hdev->cmd_q, skb);
5296 queue_work(hdev->workqueue, &hdev->cmd_work);
5301 void hci_req_add_le_scan_disable(struct hci_request *req)
5303 struct hci_cp_le_set_scan_enable cp;
5305 memset(&cp, 0, sizeof(cp));
5306 cp.enable = LE_SCAN_DISABLE;
5307 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5310 void hci_req_add_le_passive_scan(struct hci_request *req)
5312 struct hci_cp_le_set_scan_param param_cp;
5313 struct hci_cp_le_set_scan_enable enable_cp;
5314 struct hci_dev *hdev = req->hdev;
5315 u8 own_addr_type;
5317 /* Set require_privacy to true to avoid identification from
5318 * unknown peer devices. Since this is passive scanning, no
5319 * SCAN_REQ using the local identity should be sent. Mandating
5320 * privacy is just an extra precaution.
5322 if (hci_update_random_address(req, true, &own_addr_type))
5323 return;
5325 memset(&param_cp, 0, sizeof(param_cp));
5326 param_cp.type = LE_SCAN_PASSIVE;
5327 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5328 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5329 param_cp.own_address_type = own_addr_type;
5330 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5331 &param_cp);
5333 memset(&enable_cp, 0, sizeof(enable_cp));
5334 enable_cp.enable = LE_SCAN_ENABLE;
5335 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5336 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5337 &enable_cp);
5340 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5342 if (status)
5343 BT_DBG("HCI request failed to update background scanning: "
5344 "status 0x%2.2x", status);
5347 /* This function controls the background scanning based on hdev->pend_le_conns
5348 * list. If there are pending LE connection we start the background scanning,
5349 * otherwise we stop it.
5351 * This function requires the caller holds hdev->lock.
5353 void hci_update_background_scan(struct hci_dev *hdev)
5355 struct hci_request req;
5356 struct hci_conn *conn;
5357 int err;
5359 hci_req_init(&req, hdev);
5361 if (list_empty(&hdev->pend_le_conns)) {
5362 /* If there is no pending LE connections, we should stop
5363 * the background scanning.
5366 /* If controller is not scanning we are done. */
5367 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5368 return;
5370 hci_req_add_le_scan_disable(&req);
5372 BT_DBG("%s stopping background scanning", hdev->name);
5373 } else {
5374 /* If there is at least one pending LE connection, we should
5375 * keep the background scan running.
5378 /* If controller is connecting, we should not start scanning
5379 * since some controllers are not able to scan and connect at
5380 * the same time.
5382 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5383 if (conn)
5384 return;
5386 /* If controller is currently scanning, we stop it to ensure we
5387 * don't miss any advertising (due to duplicates filter).
5389 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5390 hci_req_add_le_scan_disable(&req);
5392 hci_req_add_le_passive_scan(&req);
5394 BT_DBG("%s starting background scanning", hdev->name);
5397 err = hci_req_run(&req, update_background_scan_complete);
5398 if (err)
5399 BT_ERR("Failed to run HCI request: err %d", err);