arm64: Fix overlapping VA allocations
[linux-2.6/btrfs-unstable.git] / net / bluetooth / hci_core.c
blob5dcacf9607e445777607e8be7492e4c74ec44b2e
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "smp.h"
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ----- HCI requests ----- */
59 #define HCI_REQ_DONE 0
60 #define HCI_REQ_PEND 1
61 #define HCI_REQ_CANCELED 2
63 #define hci_req_lock(d) mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
66 /* ---- HCI notifications ---- */
68 static void hci_notify(struct hci_dev *hdev, int event)
70 hci_sock_dev_event(hdev, event);
73 /* ---- HCI debugfs entries ---- */
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108 return -EALREADY;
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
125 if (err < 0)
126 return err;
128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
130 return count;
133 static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
140 static int features_show(struct seq_file *f, void *ptr)
142 struct hci_dev *hdev = f->private;
143 u8 p;
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
161 hci_dev_unlock(hdev);
163 return 0;
166 static int features_open(struct inode *inode, struct file *file)
168 return single_open(file, features_show, inode->i_private);
171 static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
178 static int blacklist_show(struct seq_file *f, void *p)
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186 hci_dev_unlock(hdev);
188 return 0;
191 static int blacklist_open(struct inode *inode, struct file *file)
193 return single_open(file, blacklist_show, inode->i_private);
196 static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
203 static int uuids_show(struct seq_file *f, void *p)
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
210 u8 i, val[16];
212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
219 seq_printf(f, "%pUb\n", val);
221 hci_dev_unlock(hdev);
223 return 0;
226 static int uuids_open(struct inode *inode, struct file *file)
228 return single_open(file, uuids_show, inode->i_private);
231 static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
238 static int inquiry_cache_show(struct seq_file *f, void *p)
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
244 hci_dev_lock(hdev);
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
257 hci_dev_unlock(hdev);
259 return 0;
262 static int inquiry_cache_open(struct inode *inode, struct file *file)
264 return single_open(file, inquiry_cache_show, inode->i_private);
267 static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
274 static int link_keys_show(struct seq_file *f, void *ptr)
276 struct hci_dev *hdev = f->private;
277 struct link_key *key;
279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
283 rcu_read_unlock();
285 return 0;
288 static int link_keys_open(struct inode *inode, struct file *file)
290 return single_open(file, link_keys_show, inode->i_private);
293 static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
300 static int dev_class_show(struct seq_file *f, void *ptr)
302 struct hci_dev *hdev = f->private;
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
309 return 0;
312 static int dev_class_open(struct inode *inode, struct file *file)
314 return single_open(file, dev_class_show, inode->i_private);
317 static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
324 static int voice_setting_get(void *data, u64 *val)
326 struct hci_dev *hdev = data;
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
332 return 0;
335 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
338 static int auto_accept_delay_set(void *data, u64 val)
340 struct hci_dev *hdev = data;
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
346 return 0;
349 static int auto_accept_delay_get(void *data, u64 *val)
351 struct hci_dev *hdev = data;
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
357 return 0;
360 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
363 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
375 static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
395 return -EALREADY;
397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
399 return count;
402 static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
409 static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
415 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
421 static ssize_t force_lesc_support_write(struct file *file,
422 const char __user *user_buf,
423 size_t count, loff_t *ppos)
425 struct hci_dev *hdev = file->private_data;
426 char buf[32];
427 size_t buf_size = min(count, (sizeof(buf)-1));
428 bool enable;
430 if (copy_from_user(buf, user_buf, buf_size))
431 return -EFAULT;
433 buf[buf_size] = '\0';
434 if (strtobool(buf, &enable))
435 return -EINVAL;
437 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
438 return -EALREADY;
440 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
442 return count;
445 static const struct file_operations force_lesc_support_fops = {
446 .open = simple_open,
447 .read = force_lesc_support_read,
448 .write = force_lesc_support_write,
449 .llseek = default_llseek,
452 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453 size_t count, loff_t *ppos)
455 struct hci_dev *hdev = file->private_data;
456 char buf[3];
458 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
459 buf[1] = '\n';
460 buf[2] = '\0';
461 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
464 static const struct file_operations sc_only_mode_fops = {
465 .open = simple_open,
466 .read = sc_only_mode_read,
467 .llseek = default_llseek,
470 static int idle_timeout_set(void *data, u64 val)
472 struct hci_dev *hdev = data;
474 if (val != 0 && (val < 500 || val > 3600000))
475 return -EINVAL;
477 hci_dev_lock(hdev);
478 hdev->idle_timeout = val;
479 hci_dev_unlock(hdev);
481 return 0;
484 static int idle_timeout_get(void *data, u64 *val)
486 struct hci_dev *hdev = data;
488 hci_dev_lock(hdev);
489 *val = hdev->idle_timeout;
490 hci_dev_unlock(hdev);
492 return 0;
495 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496 idle_timeout_set, "%llu\n");
498 static int rpa_timeout_set(void *data, u64 val)
500 struct hci_dev *hdev = data;
502 /* Require the RPA timeout to be at least 30 seconds and at most
503 * 24 hours.
505 if (val < 30 || val > (60 * 60 * 24))
506 return -EINVAL;
508 hci_dev_lock(hdev);
509 hdev->rpa_timeout = val;
510 hci_dev_unlock(hdev);
512 return 0;
515 static int rpa_timeout_get(void *data, u64 *val)
517 struct hci_dev *hdev = data;
519 hci_dev_lock(hdev);
520 *val = hdev->rpa_timeout;
521 hci_dev_unlock(hdev);
523 return 0;
526 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527 rpa_timeout_set, "%llu\n");
529 static int sniff_min_interval_set(void *data, u64 val)
531 struct hci_dev *hdev = data;
533 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534 return -EINVAL;
536 hci_dev_lock(hdev);
537 hdev->sniff_min_interval = val;
538 hci_dev_unlock(hdev);
540 return 0;
543 static int sniff_min_interval_get(void *data, u64 *val)
545 struct hci_dev *hdev = data;
547 hci_dev_lock(hdev);
548 *val = hdev->sniff_min_interval;
549 hci_dev_unlock(hdev);
551 return 0;
554 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555 sniff_min_interval_set, "%llu\n");
557 static int sniff_max_interval_set(void *data, u64 val)
559 struct hci_dev *hdev = data;
561 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562 return -EINVAL;
564 hci_dev_lock(hdev);
565 hdev->sniff_max_interval = val;
566 hci_dev_unlock(hdev);
568 return 0;
571 static int sniff_max_interval_get(void *data, u64 *val)
573 struct hci_dev *hdev = data;
575 hci_dev_lock(hdev);
576 *val = hdev->sniff_max_interval;
577 hci_dev_unlock(hdev);
579 return 0;
582 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583 sniff_max_interval_set, "%llu\n");
585 static int conn_info_min_age_set(void *data, u64 val)
587 struct hci_dev *hdev = data;
589 if (val == 0 || val > hdev->conn_info_max_age)
590 return -EINVAL;
592 hci_dev_lock(hdev);
593 hdev->conn_info_min_age = val;
594 hci_dev_unlock(hdev);
596 return 0;
599 static int conn_info_min_age_get(void *data, u64 *val)
601 struct hci_dev *hdev = data;
603 hci_dev_lock(hdev);
604 *val = hdev->conn_info_min_age;
605 hci_dev_unlock(hdev);
607 return 0;
610 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611 conn_info_min_age_set, "%llu\n");
613 static int conn_info_max_age_set(void *data, u64 val)
615 struct hci_dev *hdev = data;
617 if (val == 0 || val < hdev->conn_info_min_age)
618 return -EINVAL;
620 hci_dev_lock(hdev);
621 hdev->conn_info_max_age = val;
622 hci_dev_unlock(hdev);
624 return 0;
627 static int conn_info_max_age_get(void *data, u64 *val)
629 struct hci_dev *hdev = data;
631 hci_dev_lock(hdev);
632 *val = hdev->conn_info_max_age;
633 hci_dev_unlock(hdev);
635 return 0;
638 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639 conn_info_max_age_set, "%llu\n");
641 static int identity_show(struct seq_file *f, void *p)
643 struct hci_dev *hdev = f->private;
644 bdaddr_t addr;
645 u8 addr_type;
647 hci_dev_lock(hdev);
649 hci_copy_identity_address(hdev, &addr, &addr_type);
651 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
652 16, hdev->irk, &hdev->rpa);
654 hci_dev_unlock(hdev);
656 return 0;
659 static int identity_open(struct inode *inode, struct file *file)
661 return single_open(file, identity_show, inode->i_private);
664 static const struct file_operations identity_fops = {
665 .open = identity_open,
666 .read = seq_read,
667 .llseek = seq_lseek,
668 .release = single_release,
671 static int random_address_show(struct seq_file *f, void *p)
673 struct hci_dev *hdev = f->private;
675 hci_dev_lock(hdev);
676 seq_printf(f, "%pMR\n", &hdev->random_addr);
677 hci_dev_unlock(hdev);
679 return 0;
682 static int random_address_open(struct inode *inode, struct file *file)
684 return single_open(file, random_address_show, inode->i_private);
687 static const struct file_operations random_address_fops = {
688 .open = random_address_open,
689 .read = seq_read,
690 .llseek = seq_lseek,
691 .release = single_release,
694 static int static_address_show(struct seq_file *f, void *p)
696 struct hci_dev *hdev = f->private;
698 hci_dev_lock(hdev);
699 seq_printf(f, "%pMR\n", &hdev->static_addr);
700 hci_dev_unlock(hdev);
702 return 0;
705 static int static_address_open(struct inode *inode, struct file *file)
707 return single_open(file, static_address_show, inode->i_private);
710 static const struct file_operations static_address_fops = {
711 .open = static_address_open,
712 .read = seq_read,
713 .llseek = seq_lseek,
714 .release = single_release,
717 static ssize_t force_static_address_read(struct file *file,
718 char __user *user_buf,
719 size_t count, loff_t *ppos)
721 struct hci_dev *hdev = file->private_data;
722 char buf[3];
724 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
725 buf[1] = '\n';
726 buf[2] = '\0';
727 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
730 static ssize_t force_static_address_write(struct file *file,
731 const char __user *user_buf,
732 size_t count, loff_t *ppos)
734 struct hci_dev *hdev = file->private_data;
735 char buf[32];
736 size_t buf_size = min(count, (sizeof(buf)-1));
737 bool enable;
739 if (test_bit(HCI_UP, &hdev->flags))
740 return -EBUSY;
742 if (copy_from_user(buf, user_buf, buf_size))
743 return -EFAULT;
745 buf[buf_size] = '\0';
746 if (strtobool(buf, &enable))
747 return -EINVAL;
749 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
750 return -EALREADY;
752 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
754 return count;
757 static const struct file_operations force_static_address_fops = {
758 .open = simple_open,
759 .read = force_static_address_read,
760 .write = force_static_address_write,
761 .llseek = default_llseek,
764 static int white_list_show(struct seq_file *f, void *ptr)
766 struct hci_dev *hdev = f->private;
767 struct bdaddr_list *b;
769 hci_dev_lock(hdev);
770 list_for_each_entry(b, &hdev->le_white_list, list)
771 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772 hci_dev_unlock(hdev);
774 return 0;
777 static int white_list_open(struct inode *inode, struct file *file)
779 return single_open(file, white_list_show, inode->i_private);
782 static const struct file_operations white_list_fops = {
783 .open = white_list_open,
784 .read = seq_read,
785 .llseek = seq_lseek,
786 .release = single_release,
789 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
791 struct hci_dev *hdev = f->private;
792 struct smp_irk *irk;
794 rcu_read_lock();
795 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
796 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797 &irk->bdaddr, irk->addr_type,
798 16, irk->val, &irk->rpa);
800 rcu_read_unlock();
802 return 0;
805 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
807 return single_open(file, identity_resolving_keys_show,
808 inode->i_private);
811 static const struct file_operations identity_resolving_keys_fops = {
812 .open = identity_resolving_keys_open,
813 .read = seq_read,
814 .llseek = seq_lseek,
815 .release = single_release,
818 static int long_term_keys_show(struct seq_file *f, void *ptr)
820 struct hci_dev *hdev = f->private;
821 struct smp_ltk *ltk;
823 rcu_read_lock();
824 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
828 __le64_to_cpu(ltk->rand), 16, ltk->val);
829 rcu_read_unlock();
831 return 0;
834 static int long_term_keys_open(struct inode *inode, struct file *file)
836 return single_open(file, long_term_keys_show, inode->i_private);
839 static const struct file_operations long_term_keys_fops = {
840 .open = long_term_keys_open,
841 .read = seq_read,
842 .llseek = seq_lseek,
843 .release = single_release,
846 static int conn_min_interval_set(void *data, u64 val)
848 struct hci_dev *hdev = data;
850 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
851 return -EINVAL;
853 hci_dev_lock(hdev);
854 hdev->le_conn_min_interval = val;
855 hci_dev_unlock(hdev);
857 return 0;
860 static int conn_min_interval_get(void *data, u64 *val)
862 struct hci_dev *hdev = data;
864 hci_dev_lock(hdev);
865 *val = hdev->le_conn_min_interval;
866 hci_dev_unlock(hdev);
868 return 0;
871 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872 conn_min_interval_set, "%llu\n");
874 static int conn_max_interval_set(void *data, u64 val)
876 struct hci_dev *hdev = data;
878 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
879 return -EINVAL;
881 hci_dev_lock(hdev);
882 hdev->le_conn_max_interval = val;
883 hci_dev_unlock(hdev);
885 return 0;
888 static int conn_max_interval_get(void *data, u64 *val)
890 struct hci_dev *hdev = data;
892 hci_dev_lock(hdev);
893 *val = hdev->le_conn_max_interval;
894 hci_dev_unlock(hdev);
896 return 0;
899 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900 conn_max_interval_set, "%llu\n");
902 static int conn_latency_set(void *data, u64 val)
904 struct hci_dev *hdev = data;
906 if (val > 0x01f3)
907 return -EINVAL;
909 hci_dev_lock(hdev);
910 hdev->le_conn_latency = val;
911 hci_dev_unlock(hdev);
913 return 0;
916 static int conn_latency_get(void *data, u64 *val)
918 struct hci_dev *hdev = data;
920 hci_dev_lock(hdev);
921 *val = hdev->le_conn_latency;
922 hci_dev_unlock(hdev);
924 return 0;
927 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928 conn_latency_set, "%llu\n");
930 static int supervision_timeout_set(void *data, u64 val)
932 struct hci_dev *hdev = data;
934 if (val < 0x000a || val > 0x0c80)
935 return -EINVAL;
937 hci_dev_lock(hdev);
938 hdev->le_supv_timeout = val;
939 hci_dev_unlock(hdev);
941 return 0;
944 static int supervision_timeout_get(void *data, u64 *val)
946 struct hci_dev *hdev = data;
948 hci_dev_lock(hdev);
949 *val = hdev->le_supv_timeout;
950 hci_dev_unlock(hdev);
952 return 0;
955 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956 supervision_timeout_set, "%llu\n");
958 static int adv_channel_map_set(void *data, u64 val)
960 struct hci_dev *hdev = data;
962 if (val < 0x01 || val > 0x07)
963 return -EINVAL;
965 hci_dev_lock(hdev);
966 hdev->le_adv_channel_map = val;
967 hci_dev_unlock(hdev);
969 return 0;
972 static int adv_channel_map_get(void *data, u64 *val)
974 struct hci_dev *hdev = data;
976 hci_dev_lock(hdev);
977 *val = hdev->le_adv_channel_map;
978 hci_dev_unlock(hdev);
980 return 0;
983 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984 adv_channel_map_set, "%llu\n");
986 static int adv_min_interval_set(void *data, u64 val)
988 struct hci_dev *hdev = data;
990 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
991 return -EINVAL;
993 hci_dev_lock(hdev);
994 hdev->le_adv_min_interval = val;
995 hci_dev_unlock(hdev);
997 return 0;
1000 static int adv_min_interval_get(void *data, u64 *val)
1002 struct hci_dev *hdev = data;
1004 hci_dev_lock(hdev);
1005 *val = hdev->le_adv_min_interval;
1006 hci_dev_unlock(hdev);
1008 return 0;
1011 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012 adv_min_interval_set, "%llu\n");
1014 static int adv_max_interval_set(void *data, u64 val)
1016 struct hci_dev *hdev = data;
1018 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1019 return -EINVAL;
1021 hci_dev_lock(hdev);
1022 hdev->le_adv_max_interval = val;
1023 hci_dev_unlock(hdev);
1025 return 0;
1028 static int adv_max_interval_get(void *data, u64 *val)
1030 struct hci_dev *hdev = data;
1032 hci_dev_lock(hdev);
1033 *val = hdev->le_adv_max_interval;
1034 hci_dev_unlock(hdev);
1036 return 0;
1039 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040 adv_max_interval_set, "%llu\n");
1042 static int device_list_show(struct seq_file *f, void *ptr)
1044 struct hci_dev *hdev = f->private;
1045 struct hci_conn_params *p;
1046 struct bdaddr_list *b;
1048 hci_dev_lock(hdev);
1049 list_for_each_entry(b, &hdev->whitelist, list)
1050 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
1051 list_for_each_entry(p, &hdev->le_conn_params, list) {
1052 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
1053 p->auto_connect);
1055 hci_dev_unlock(hdev);
1057 return 0;
1060 static int device_list_open(struct inode *inode, struct file *file)
1062 return single_open(file, device_list_show, inode->i_private);
1065 static const struct file_operations device_list_fops = {
1066 .open = device_list_open,
1067 .read = seq_read,
1068 .llseek = seq_lseek,
1069 .release = single_release,
1072 /* ---- HCI requests ---- */
1074 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1076 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1078 if (hdev->req_status == HCI_REQ_PEND) {
1079 hdev->req_result = result;
1080 hdev->req_status = HCI_REQ_DONE;
1081 wake_up_interruptible(&hdev->req_wait_q);
1085 static void hci_req_cancel(struct hci_dev *hdev, int err)
1087 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1089 if (hdev->req_status == HCI_REQ_PEND) {
1090 hdev->req_result = err;
1091 hdev->req_status = HCI_REQ_CANCELED;
1092 wake_up_interruptible(&hdev->req_wait_q);
1096 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1097 u8 event)
1099 struct hci_ev_cmd_complete *ev;
1100 struct hci_event_hdr *hdr;
1101 struct sk_buff *skb;
1103 hci_dev_lock(hdev);
1105 skb = hdev->recv_evt;
1106 hdev->recv_evt = NULL;
1108 hci_dev_unlock(hdev);
1110 if (!skb)
1111 return ERR_PTR(-ENODATA);
1113 if (skb->len < sizeof(*hdr)) {
1114 BT_ERR("Too short HCI event");
1115 goto failed;
1118 hdr = (void *) skb->data;
1119 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1121 if (event) {
1122 if (hdr->evt != event)
1123 goto failed;
1124 return skb;
1127 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1128 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1129 goto failed;
1132 if (skb->len < sizeof(*ev)) {
1133 BT_ERR("Too short cmd_complete event");
1134 goto failed;
1137 ev = (void *) skb->data;
1138 skb_pull(skb, sizeof(*ev));
1140 if (opcode == __le16_to_cpu(ev->opcode))
1141 return skb;
1143 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1144 __le16_to_cpu(ev->opcode));
1146 failed:
1147 kfree_skb(skb);
1148 return ERR_PTR(-ENODATA);
1151 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1152 const void *param, u8 event, u32 timeout)
1154 DECLARE_WAITQUEUE(wait, current);
1155 struct hci_request req;
1156 int err = 0;
1158 BT_DBG("%s", hdev->name);
1160 hci_req_init(&req, hdev);
1162 hci_req_add_ev(&req, opcode, plen, param, event);
1164 hdev->req_status = HCI_REQ_PEND;
1166 add_wait_queue(&hdev->req_wait_q, &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1169 err = hci_req_run(&req, hci_req_sync_complete);
1170 if (err < 0) {
1171 remove_wait_queue(&hdev->req_wait_q, &wait);
1172 set_current_state(TASK_RUNNING);
1173 return ERR_PTR(err);
1176 schedule_timeout(timeout);
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1180 if (signal_pending(current))
1181 return ERR_PTR(-EINTR);
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
1185 err = -bt_to_errno(hdev->req_result);
1186 break;
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
1197 hdev->req_status = hdev->req_result = 0;
1199 BT_DBG("%s end: err %d", hdev->name, err);
1201 if (err < 0)
1202 return ERR_PTR(err);
1204 return hci_get_cmd_complete(hdev, opcode, event);
1206 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1208 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1209 const void *param, u32 timeout)
1211 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1213 EXPORT_SYMBOL(__hci_cmd_sync);
1215 /* Execute request and wait for completion. */
1216 static int __hci_req_sync(struct hci_dev *hdev,
1217 void (*func)(struct hci_request *req,
1218 unsigned long opt),
1219 unsigned long opt, __u32 timeout)
1221 struct hci_request req;
1222 DECLARE_WAITQUEUE(wait, current);
1223 int err = 0;
1225 BT_DBG("%s start", hdev->name);
1227 hci_req_init(&req, hdev);
1229 hdev->req_status = HCI_REQ_PEND;
1231 func(&req, opt);
1233 add_wait_queue(&hdev->req_wait_q, &wait);
1234 set_current_state(TASK_INTERRUPTIBLE);
1236 err = hci_req_run(&req, hci_req_sync_complete);
1237 if (err < 0) {
1238 hdev->req_status = 0;
1240 remove_wait_queue(&hdev->req_wait_q, &wait);
1241 set_current_state(TASK_RUNNING);
1243 /* ENODATA means the HCI request command queue is empty.
1244 * This can happen when a request with conditionals doesn't
1245 * trigger any commands to be sent. This is normal behavior
1246 * and should not trigger an error return.
1248 if (err == -ENODATA)
1249 return 0;
1251 return err;
1254 schedule_timeout(timeout);
1256 remove_wait_queue(&hdev->req_wait_q, &wait);
1258 if (signal_pending(current))
1259 return -EINTR;
1261 switch (hdev->req_status) {
1262 case HCI_REQ_DONE:
1263 err = -bt_to_errno(hdev->req_result);
1264 break;
1266 case HCI_REQ_CANCELED:
1267 err = -hdev->req_result;
1268 break;
1270 default:
1271 err = -ETIMEDOUT;
1272 break;
1275 hdev->req_status = hdev->req_result = 0;
1277 BT_DBG("%s end: err %d", hdev->name, err);
1279 return err;
1282 static int hci_req_sync(struct hci_dev *hdev,
1283 void (*req)(struct hci_request *req,
1284 unsigned long opt),
1285 unsigned long opt, __u32 timeout)
1287 int ret;
1289 if (!test_bit(HCI_UP, &hdev->flags))
1290 return -ENETDOWN;
1292 /* Serialize all requests */
1293 hci_req_lock(hdev);
1294 ret = __hci_req_sync(hdev, req, opt, timeout);
1295 hci_req_unlock(hdev);
1297 return ret;
1300 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1302 BT_DBG("%s %ld", req->hdev->name, opt);
1304 /* Reset device */
1305 set_bit(HCI_RESET, &req->hdev->flags);
1306 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1309 static void bredr_init(struct hci_request *req)
1311 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1313 /* Read Local Supported Features */
1314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1316 /* Read Local Version */
1317 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1319 /* Read BD Address */
1320 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1323 static void amp_init(struct hci_request *req)
1325 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1327 /* Read Local Version */
1328 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1330 /* Read Local Supported Commands */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1333 /* Read Local Supported Features */
1334 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1336 /* Read Local AMP Info */
1337 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1339 /* Read Data Blk size */
1340 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1342 /* Read Flow Control Mode */
1343 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1345 /* Read Location Data */
1346 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1349 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1351 struct hci_dev *hdev = req->hdev;
1353 BT_DBG("%s %ld", hdev->name, opt);
1355 /* Reset */
1356 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1357 hci_reset_req(req, 0);
1359 switch (hdev->dev_type) {
1360 case HCI_BREDR:
1361 bredr_init(req);
1362 break;
1364 case HCI_AMP:
1365 amp_init(req);
1366 break;
1368 default:
1369 BT_ERR("Unknown device type %d", hdev->dev_type);
1370 break;
1374 static void bredr_setup(struct hci_request *req)
1376 __le16 param;
1377 __u8 flt_type;
1379 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1380 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1382 /* Read Class of Device */
1383 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1385 /* Read Local Name */
1386 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1388 /* Read Voice Setting */
1389 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1391 /* Read Number of Supported IAC */
1392 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1394 /* Read Current IAC LAP */
1395 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1397 /* Clear Event Filters */
1398 flt_type = HCI_FLT_CLEAR_ALL;
1399 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1401 /* Connection accept timeout ~20 secs */
1402 param = cpu_to_le16(0x7d00);
1403 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1406 static void le_setup(struct hci_request *req)
1408 struct hci_dev *hdev = req->hdev;
1410 /* Read LE Buffer Size */
1411 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1413 /* Read LE Local Supported Features */
1414 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1416 /* Read LE Supported States */
1417 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1419 /* Read LE White List Size */
1420 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1422 /* Clear LE White List */
1423 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1425 /* LE-only controllers have LE implicitly enabled */
1426 if (!lmp_bredr_capable(hdev))
1427 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1430 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1432 if (lmp_ext_inq_capable(hdev))
1433 return 0x02;
1435 if (lmp_inq_rssi_capable(hdev))
1436 return 0x01;
1438 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1439 hdev->lmp_subver == 0x0757)
1440 return 0x01;
1442 if (hdev->manufacturer == 15) {
1443 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1444 return 0x01;
1445 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1446 return 0x01;
1447 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1448 return 0x01;
1451 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1452 hdev->lmp_subver == 0x1805)
1453 return 0x01;
1455 return 0x00;
1458 static void hci_setup_inquiry_mode(struct hci_request *req)
1460 u8 mode;
1462 mode = hci_get_inquiry_mode(req->hdev);
1464 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1467 static void hci_setup_event_mask(struct hci_request *req)
1469 struct hci_dev *hdev = req->hdev;
1471 /* The second byte is 0xff instead of 0x9f (two reserved bits
1472 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1473 * command otherwise.
1475 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1477 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1478 * any event mask for pre 1.2 devices.
1480 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1481 return;
1483 if (lmp_bredr_capable(hdev)) {
1484 events[4] |= 0x01; /* Flow Specification Complete */
1485 events[4] |= 0x02; /* Inquiry Result with RSSI */
1486 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1487 events[5] |= 0x08; /* Synchronous Connection Complete */
1488 events[5] |= 0x10; /* Synchronous Connection Changed */
1489 } else {
1490 /* Use a different default for LE-only devices */
1491 memset(events, 0, sizeof(events));
1492 events[0] |= 0x10; /* Disconnection Complete */
1493 events[1] |= 0x08; /* Read Remote Version Information Complete */
1494 events[1] |= 0x20; /* Command Complete */
1495 events[1] |= 0x40; /* Command Status */
1496 events[1] |= 0x80; /* Hardware Error */
1497 events[2] |= 0x04; /* Number of Completed Packets */
1498 events[3] |= 0x02; /* Data Buffer Overflow */
1500 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1501 events[0] |= 0x80; /* Encryption Change */
1502 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1506 if (lmp_inq_rssi_capable(hdev))
1507 events[4] |= 0x02; /* Inquiry Result with RSSI */
1509 if (lmp_sniffsubr_capable(hdev))
1510 events[5] |= 0x20; /* Sniff Subrating */
1512 if (lmp_pause_enc_capable(hdev))
1513 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1515 if (lmp_ext_inq_capable(hdev))
1516 events[5] |= 0x40; /* Extended Inquiry Result */
1518 if (lmp_no_flush_capable(hdev))
1519 events[7] |= 0x01; /* Enhanced Flush Complete */
1521 if (lmp_lsto_capable(hdev))
1522 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1524 if (lmp_ssp_capable(hdev)) {
1525 events[6] |= 0x01; /* IO Capability Request */
1526 events[6] |= 0x02; /* IO Capability Response */
1527 events[6] |= 0x04; /* User Confirmation Request */
1528 events[6] |= 0x08; /* User Passkey Request */
1529 events[6] |= 0x10; /* Remote OOB Data Request */
1530 events[6] |= 0x20; /* Simple Pairing Complete */
1531 events[7] |= 0x04; /* User Passkey Notification */
1532 events[7] |= 0x08; /* Keypress Notification */
1533 events[7] |= 0x10; /* Remote Host Supported
1534 * Features Notification
1538 if (lmp_le_capable(hdev))
1539 events[7] |= 0x20; /* LE Meta-Event */
1541 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1544 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1546 struct hci_dev *hdev = req->hdev;
1548 if (lmp_bredr_capable(hdev))
1549 bredr_setup(req);
1550 else
1551 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1553 if (lmp_le_capable(hdev))
1554 le_setup(req);
1556 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1557 * local supported commands HCI command.
1559 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1560 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1562 if (lmp_ssp_capable(hdev)) {
1563 /* When SSP is available, then the host features page
1564 * should also be available as well. However some
1565 * controllers list the max_page as 0 as long as SSP
1566 * has not been enabled. To achieve proper debugging
1567 * output, force the minimum max_page to 1 at least.
1569 hdev->max_page = 0x01;
1571 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1572 u8 mode = 0x01;
1573 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1574 sizeof(mode), &mode);
1575 } else {
1576 struct hci_cp_write_eir cp;
1578 memset(hdev->eir, 0, sizeof(hdev->eir));
1579 memset(&cp, 0, sizeof(cp));
1581 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1585 if (lmp_inq_rssi_capable(hdev))
1586 hci_setup_inquiry_mode(req);
1588 if (lmp_inq_tx_pwr_capable(hdev))
1589 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1591 if (lmp_ext_feat_capable(hdev)) {
1592 struct hci_cp_read_local_ext_features cp;
1594 cp.page = 0x01;
1595 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1596 sizeof(cp), &cp);
1599 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1600 u8 enable = 1;
1601 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1602 &enable);
1606 static void hci_setup_link_policy(struct hci_request *req)
1608 struct hci_dev *hdev = req->hdev;
1609 struct hci_cp_write_def_link_policy cp;
1610 u16 link_policy = 0;
1612 if (lmp_rswitch_capable(hdev))
1613 link_policy |= HCI_LP_RSWITCH;
1614 if (lmp_hold_capable(hdev))
1615 link_policy |= HCI_LP_HOLD;
1616 if (lmp_sniff_capable(hdev))
1617 link_policy |= HCI_LP_SNIFF;
1618 if (lmp_park_capable(hdev))
1619 link_policy |= HCI_LP_PARK;
1621 cp.policy = cpu_to_le16(link_policy);
1622 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1625 static void hci_set_le_support(struct hci_request *req)
1627 struct hci_dev *hdev = req->hdev;
1628 struct hci_cp_write_le_host_supported cp;
1630 /* LE-only devices do not support explicit enablement */
1631 if (!lmp_bredr_capable(hdev))
1632 return;
1634 memset(&cp, 0, sizeof(cp));
1636 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1637 cp.le = 0x01;
1638 cp.simul = 0x00;
1641 if (cp.le != lmp_host_le_capable(hdev))
1642 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1643 &cp);
1646 static void hci_set_event_mask_page_2(struct hci_request *req)
1648 struct hci_dev *hdev = req->hdev;
1649 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1651 /* If Connectionless Slave Broadcast master role is supported
1652 * enable all necessary events for it.
1654 if (lmp_csb_master_capable(hdev)) {
1655 events[1] |= 0x40; /* Triggered Clock Capture */
1656 events[1] |= 0x80; /* Synchronization Train Complete */
1657 events[2] |= 0x10; /* Slave Page Response Timeout */
1658 events[2] |= 0x20; /* CSB Channel Map Change */
1661 /* If Connectionless Slave Broadcast slave role is supported
1662 * enable all necessary events for it.
1664 if (lmp_csb_slave_capable(hdev)) {
1665 events[2] |= 0x01; /* Synchronization Train Received */
1666 events[2] |= 0x02; /* CSB Receive */
1667 events[2] |= 0x04; /* CSB Timeout */
1668 events[2] |= 0x08; /* Truncated Page Complete */
1671 /* Enable Authenticated Payload Timeout Expired event if supported */
1672 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1673 events[2] |= 0x80;
1675 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1678 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1680 struct hci_dev *hdev = req->hdev;
1681 u8 p;
1683 hci_setup_event_mask(req);
1685 /* Some Broadcom based Bluetooth controllers do not support the
1686 * Delete Stored Link Key command. They are clearly indicating its
1687 * absence in the bit mask of supported commands.
1689 * Check the supported commands and only if the the command is marked
1690 * as supported send it. If not supported assume that the controller
1691 * does not have actual support for stored link keys which makes this
1692 * command redundant anyway.
1694 * Some controllers indicate that they support handling deleting
1695 * stored link keys, but they don't. The quirk lets a driver
1696 * just disable this command.
1698 if (hdev->commands[6] & 0x80 &&
1699 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1700 struct hci_cp_delete_stored_link_key cp;
1702 bacpy(&cp.bdaddr, BDADDR_ANY);
1703 cp.delete_all = 0x01;
1704 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1705 sizeof(cp), &cp);
1708 if (hdev->commands[5] & 0x10)
1709 hci_setup_link_policy(req);
1711 if (hdev->commands[8] & 0x01)
1712 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1714 /* Some older Broadcom based Bluetooth 1.2 controllers do not
1715 * support the Read Page Scan Type command. Check support for
1716 * this command in the bit mask of supported commands.
1718 if (hdev->commands[13] & 0x01)
1719 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1721 if (lmp_le_capable(hdev)) {
1722 u8 events[8];
1724 memset(events, 0, sizeof(events));
1725 events[0] = 0x0f;
1727 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1728 events[0] |= 0x10; /* LE Long Term Key Request */
1730 /* If controller supports the Connection Parameters Request
1731 * Link Layer Procedure, enable the corresponding event.
1733 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1734 events[0] |= 0x20; /* LE Remote Connection
1735 * Parameter Request
1738 /* If the controller supports Extended Scanner Filter
1739 * Policies, enable the correspondig event.
1741 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
1742 events[1] |= 0x04; /* LE Direct Advertising
1743 * Report
1746 /* If the controller supports the LE Read Local P-256
1747 * Public Key command, enable the corresponding event.
1749 if (hdev->commands[34] & 0x02)
1750 events[0] |= 0x80; /* LE Read Local P-256
1751 * Public Key Complete
1754 /* If the controller supports the LE Generate DHKey
1755 * command, enable the corresponding event.
1757 if (hdev->commands[34] & 0x04)
1758 events[1] |= 0x01; /* LE Generate DHKey Complete */
1760 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1761 events);
1763 if (hdev->commands[25] & 0x40) {
1764 /* Read LE Advertising Channel TX Power */
1765 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1768 hci_set_le_support(req);
1771 /* Read features beyond page 1 if available */
1772 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1773 struct hci_cp_read_local_ext_features cp;
1775 cp.page = p;
1776 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1777 sizeof(cp), &cp);
1781 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1783 struct hci_dev *hdev = req->hdev;
1785 /* Set event mask page 2 if the HCI command for it is supported */
1786 if (hdev->commands[22] & 0x04)
1787 hci_set_event_mask_page_2(req);
1789 /* Read local codec list if the HCI command is supported */
1790 if (hdev->commands[29] & 0x20)
1791 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1793 /* Get MWS transport configuration if the HCI command is supported */
1794 if (hdev->commands[30] & 0x08)
1795 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1797 /* Check for Synchronization Train support */
1798 if (lmp_sync_train_capable(hdev))
1799 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1801 /* Enable Secure Connections if supported and configured */
1802 if (bredr_sc_enabled(hdev)) {
1803 u8 support = 0x01;
1804 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1805 sizeof(support), &support);
1809 static int __hci_init(struct hci_dev *hdev)
1811 int err;
1813 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1814 if (err < 0)
1815 return err;
1817 /* The Device Under Test (DUT) mode is special and available for
1818 * all controller types. So just create it early on.
1820 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1821 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1822 &dut_mode_fops);
1825 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1826 * BR/EDR/LE type controllers. AMP controllers only need the
1827 * first stage init.
1829 if (hdev->dev_type != HCI_BREDR)
1830 return 0;
1832 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1833 if (err < 0)
1834 return err;
1836 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1837 if (err < 0)
1838 return err;
1840 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1841 if (err < 0)
1842 return err;
1844 /* Only create debugfs entries during the initial setup
1845 * phase and not every time the controller gets powered on.
1847 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1848 return 0;
1850 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1851 &features_fops);
1852 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1853 &hdev->manufacturer);
1854 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1855 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1856 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1857 &device_list_fops);
1858 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1859 &blacklist_fops);
1860 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1862 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1863 &conn_info_min_age_fops);
1864 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1865 &conn_info_max_age_fops);
1867 if (lmp_bredr_capable(hdev)) {
1868 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1869 hdev, &inquiry_cache_fops);
1870 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1871 hdev, &link_keys_fops);
1872 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1873 hdev, &dev_class_fops);
1874 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1875 hdev, &voice_setting_fops);
1878 if (lmp_ssp_capable(hdev)) {
1879 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1880 hdev, &auto_accept_delay_fops);
1881 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1882 hdev, &force_sc_support_fops);
1883 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1884 hdev, &sc_only_mode_fops);
1885 if (lmp_le_capable(hdev))
1886 debugfs_create_file("force_lesc_support", 0644,
1887 hdev->debugfs, hdev,
1888 &force_lesc_support_fops);
1891 if (lmp_sniff_capable(hdev)) {
1892 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1893 hdev, &idle_timeout_fops);
1894 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1895 hdev, &sniff_min_interval_fops);
1896 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1897 hdev, &sniff_max_interval_fops);
1900 if (lmp_le_capable(hdev)) {
1901 debugfs_create_file("identity", 0400, hdev->debugfs,
1902 hdev, &identity_fops);
1903 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1904 hdev, &rpa_timeout_fops);
1905 debugfs_create_file("random_address", 0444, hdev->debugfs,
1906 hdev, &random_address_fops);
1907 debugfs_create_file("static_address", 0444, hdev->debugfs,
1908 hdev, &static_address_fops);
1910 /* For controllers with a public address, provide a debug
1911 * option to force the usage of the configured static
1912 * address. By default the public address is used.
1914 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1915 debugfs_create_file("force_static_address", 0644,
1916 hdev->debugfs, hdev,
1917 &force_static_address_fops);
1919 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1920 &hdev->le_white_list_size);
1921 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1922 &white_list_fops);
1923 debugfs_create_file("identity_resolving_keys", 0400,
1924 hdev->debugfs, hdev,
1925 &identity_resolving_keys_fops);
1926 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1927 hdev, &long_term_keys_fops);
1928 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1929 hdev, &conn_min_interval_fops);
1930 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1931 hdev, &conn_max_interval_fops);
1932 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1933 hdev, &conn_latency_fops);
1934 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1935 hdev, &supervision_timeout_fops);
1936 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1937 hdev, &adv_channel_map_fops);
1938 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1939 hdev, &adv_min_interval_fops);
1940 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1941 hdev, &adv_max_interval_fops);
1942 debugfs_create_u16("discov_interleaved_timeout", 0644,
1943 hdev->debugfs,
1944 &hdev->discov_interleaved_timeout);
1946 smp_register(hdev);
1949 return 0;
1952 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1954 struct hci_dev *hdev = req->hdev;
1956 BT_DBG("%s %ld", hdev->name, opt);
1958 /* Reset */
1959 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1960 hci_reset_req(req, 0);
1962 /* Read Local Version */
1963 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1965 /* Read BD Address */
1966 if (hdev->set_bdaddr)
1967 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1970 static int __hci_unconf_init(struct hci_dev *hdev)
1972 int err;
1974 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1975 return 0;
1977 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1978 if (err < 0)
1979 return err;
1981 return 0;
1984 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1986 __u8 scan = opt;
1988 BT_DBG("%s %x", req->hdev->name, scan);
1990 /* Inquiry and Page scans */
1991 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1994 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1996 __u8 auth = opt;
1998 BT_DBG("%s %x", req->hdev->name, auth);
2000 /* Authentication */
2001 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
2004 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
2006 __u8 encrypt = opt;
2008 BT_DBG("%s %x", req->hdev->name, encrypt);
2010 /* Encryption */
2011 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
2014 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
2016 __le16 policy = cpu_to_le16(opt);
2018 BT_DBG("%s %x", req->hdev->name, policy);
2020 /* Default link policy */
2021 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
2024 /* Get HCI device by index.
2025 * Device is held on return. */
2026 struct hci_dev *hci_dev_get(int index)
2028 struct hci_dev *hdev = NULL, *d;
2030 BT_DBG("%d", index);
2032 if (index < 0)
2033 return NULL;
2035 read_lock(&hci_dev_list_lock);
2036 list_for_each_entry(d, &hci_dev_list, list) {
2037 if (d->id == index) {
2038 hdev = hci_dev_hold(d);
2039 break;
2042 read_unlock(&hci_dev_list_lock);
2043 return hdev;
2046 /* ---- Inquiry support ---- */
2048 bool hci_discovery_active(struct hci_dev *hdev)
2050 struct discovery_state *discov = &hdev->discovery;
2052 switch (discov->state) {
2053 case DISCOVERY_FINDING:
2054 case DISCOVERY_RESOLVING:
2055 return true;
2057 default:
2058 return false;
2062 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2064 int old_state = hdev->discovery.state;
2066 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2068 if (old_state == state)
2069 return;
2071 hdev->discovery.state = state;
2073 switch (state) {
2074 case DISCOVERY_STOPPED:
2075 hci_update_background_scan(hdev);
2077 if (old_state != DISCOVERY_STARTING)
2078 mgmt_discovering(hdev, 0);
2079 break;
2080 case DISCOVERY_STARTING:
2081 break;
2082 case DISCOVERY_FINDING:
2083 mgmt_discovering(hdev, 1);
2084 break;
2085 case DISCOVERY_RESOLVING:
2086 break;
2087 case DISCOVERY_STOPPING:
2088 break;
2092 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2094 struct discovery_state *cache = &hdev->discovery;
2095 struct inquiry_entry *p, *n;
2097 list_for_each_entry_safe(p, n, &cache->all, all) {
2098 list_del(&p->all);
2099 kfree(p);
2102 INIT_LIST_HEAD(&cache->unknown);
2103 INIT_LIST_HEAD(&cache->resolve);
2106 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2107 bdaddr_t *bdaddr)
2109 struct discovery_state *cache = &hdev->discovery;
2110 struct inquiry_entry *e;
2112 BT_DBG("cache %p, %pMR", cache, bdaddr);
2114 list_for_each_entry(e, &cache->all, all) {
2115 if (!bacmp(&e->data.bdaddr, bdaddr))
2116 return e;
2119 return NULL;
2122 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2123 bdaddr_t *bdaddr)
2125 struct discovery_state *cache = &hdev->discovery;
2126 struct inquiry_entry *e;
2128 BT_DBG("cache %p, %pMR", cache, bdaddr);
2130 list_for_each_entry(e, &cache->unknown, list) {
2131 if (!bacmp(&e->data.bdaddr, bdaddr))
2132 return e;
2135 return NULL;
2138 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2139 bdaddr_t *bdaddr,
2140 int state)
2142 struct discovery_state *cache = &hdev->discovery;
2143 struct inquiry_entry *e;
2145 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2147 list_for_each_entry(e, &cache->resolve, list) {
2148 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2149 return e;
2150 if (!bacmp(&e->data.bdaddr, bdaddr))
2151 return e;
2154 return NULL;
2157 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2158 struct inquiry_entry *ie)
2160 struct discovery_state *cache = &hdev->discovery;
2161 struct list_head *pos = &cache->resolve;
2162 struct inquiry_entry *p;
2164 list_del(&ie->list);
2166 list_for_each_entry(p, &cache->resolve, list) {
2167 if (p->name_state != NAME_PENDING &&
2168 abs(p->data.rssi) >= abs(ie->data.rssi))
2169 break;
2170 pos = &p->list;
2173 list_add(&ie->list, pos);
2176 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2177 bool name_known)
2179 struct discovery_state *cache = &hdev->discovery;
2180 struct inquiry_entry *ie;
2181 u32 flags = 0;
2183 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2185 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2187 if (!data->ssp_mode)
2188 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2190 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2191 if (ie) {
2192 if (!ie->data.ssp_mode)
2193 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2195 if (ie->name_state == NAME_NEEDED &&
2196 data->rssi != ie->data.rssi) {
2197 ie->data.rssi = data->rssi;
2198 hci_inquiry_cache_update_resolve(hdev, ie);
2201 goto update;
2204 /* Entry not in the cache. Add new one. */
2205 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2206 if (!ie) {
2207 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2208 goto done;
2211 list_add(&ie->all, &cache->all);
2213 if (name_known) {
2214 ie->name_state = NAME_KNOWN;
2215 } else {
2216 ie->name_state = NAME_NOT_KNOWN;
2217 list_add(&ie->list, &cache->unknown);
2220 update:
2221 if (name_known && ie->name_state != NAME_KNOWN &&
2222 ie->name_state != NAME_PENDING) {
2223 ie->name_state = NAME_KNOWN;
2224 list_del(&ie->list);
2227 memcpy(&ie->data, data, sizeof(*data));
2228 ie->timestamp = jiffies;
2229 cache->timestamp = jiffies;
2231 if (ie->name_state == NAME_NOT_KNOWN)
2232 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2234 done:
2235 return flags;
2238 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2240 struct discovery_state *cache = &hdev->discovery;
2241 struct inquiry_info *info = (struct inquiry_info *) buf;
2242 struct inquiry_entry *e;
2243 int copied = 0;
2245 list_for_each_entry(e, &cache->all, all) {
2246 struct inquiry_data *data = &e->data;
2248 if (copied >= num)
2249 break;
2251 bacpy(&info->bdaddr, &data->bdaddr);
2252 info->pscan_rep_mode = data->pscan_rep_mode;
2253 info->pscan_period_mode = data->pscan_period_mode;
2254 info->pscan_mode = data->pscan_mode;
2255 memcpy(info->dev_class, data->dev_class, 3);
2256 info->clock_offset = data->clock_offset;
2258 info++;
2259 copied++;
2262 BT_DBG("cache %p, copied %d", cache, copied);
2263 return copied;
2266 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2268 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2269 struct hci_dev *hdev = req->hdev;
2270 struct hci_cp_inquiry cp;
2272 BT_DBG("%s", hdev->name);
2274 if (test_bit(HCI_INQUIRY, &hdev->flags))
2275 return;
2277 /* Start Inquiry */
2278 memcpy(&cp.lap, &ir->lap, 3);
2279 cp.length = ir->length;
2280 cp.num_rsp = ir->num_rsp;
2281 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2284 int hci_inquiry(void __user *arg)
2286 __u8 __user *ptr = arg;
2287 struct hci_inquiry_req ir;
2288 struct hci_dev *hdev;
2289 int err = 0, do_inquiry = 0, max_rsp;
2290 long timeo;
2291 __u8 *buf;
2293 if (copy_from_user(&ir, ptr, sizeof(ir)))
2294 return -EFAULT;
2296 hdev = hci_dev_get(ir.dev_id);
2297 if (!hdev)
2298 return -ENODEV;
2300 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2301 err = -EBUSY;
2302 goto done;
2305 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2306 err = -EOPNOTSUPP;
2307 goto done;
2310 if (hdev->dev_type != HCI_BREDR) {
2311 err = -EOPNOTSUPP;
2312 goto done;
2315 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2316 err = -EOPNOTSUPP;
2317 goto done;
2320 hci_dev_lock(hdev);
2321 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2322 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2323 hci_inquiry_cache_flush(hdev);
2324 do_inquiry = 1;
2326 hci_dev_unlock(hdev);
2328 timeo = ir.length * msecs_to_jiffies(2000);
2330 if (do_inquiry) {
2331 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2332 timeo);
2333 if (err < 0)
2334 goto done;
2336 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2337 * cleared). If it is interrupted by a signal, return -EINTR.
2339 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2340 TASK_INTERRUPTIBLE))
2341 return -EINTR;
2344 /* for unlimited number of responses we will use buffer with
2345 * 255 entries
2347 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2349 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2350 * copy it to the user space.
2352 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2353 if (!buf) {
2354 err = -ENOMEM;
2355 goto done;
2358 hci_dev_lock(hdev);
2359 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2360 hci_dev_unlock(hdev);
2362 BT_DBG("num_rsp %d", ir.num_rsp);
2364 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2365 ptr += sizeof(ir);
2366 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2367 ir.num_rsp))
2368 err = -EFAULT;
2369 } else
2370 err = -EFAULT;
2372 kfree(buf);
2374 done:
2375 hci_dev_put(hdev);
2376 return err;
2379 static int hci_dev_do_open(struct hci_dev *hdev)
2381 int ret = 0;
2383 BT_DBG("%s %p", hdev->name, hdev);
2385 hci_req_lock(hdev);
2387 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2388 ret = -ENODEV;
2389 goto done;
2392 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2393 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2394 /* Check for rfkill but allow the HCI setup stage to
2395 * proceed (which in itself doesn't cause any RF activity).
2397 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2398 ret = -ERFKILL;
2399 goto done;
2402 /* Check for valid public address or a configured static
2403 * random adddress, but let the HCI setup proceed to
2404 * be able to determine if there is a public address
2405 * or not.
2407 * In case of user channel usage, it is not important
2408 * if a public address or static random address is
2409 * available.
2411 * This check is only valid for BR/EDR controllers
2412 * since AMP controllers do not have an address.
2414 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2415 hdev->dev_type == HCI_BREDR &&
2416 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2417 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2418 ret = -EADDRNOTAVAIL;
2419 goto done;
2423 if (test_bit(HCI_UP, &hdev->flags)) {
2424 ret = -EALREADY;
2425 goto done;
2428 if (hdev->open(hdev)) {
2429 ret = -EIO;
2430 goto done;
2433 atomic_set(&hdev->cmd_cnt, 1);
2434 set_bit(HCI_INIT, &hdev->flags);
2436 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2437 if (hdev->setup)
2438 ret = hdev->setup(hdev);
2440 /* The transport driver can set these quirks before
2441 * creating the HCI device or in its setup callback.
2443 * In case any of them is set, the controller has to
2444 * start up as unconfigured.
2446 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2447 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2448 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2450 /* For an unconfigured controller it is required to
2451 * read at least the version information provided by
2452 * the Read Local Version Information command.
2454 * If the set_bdaddr driver callback is provided, then
2455 * also the original Bluetooth public device address
2456 * will be read using the Read BD Address command.
2458 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2459 ret = __hci_unconf_init(hdev);
2462 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2463 /* If public address change is configured, ensure that
2464 * the address gets programmed. If the driver does not
2465 * support changing the public address, fail the power
2466 * on procedure.
2468 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2469 hdev->set_bdaddr)
2470 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2471 else
2472 ret = -EADDRNOTAVAIL;
2475 if (!ret) {
2476 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2477 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2478 ret = __hci_init(hdev);
2481 clear_bit(HCI_INIT, &hdev->flags);
2483 if (!ret) {
2484 hci_dev_hold(hdev);
2485 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2486 set_bit(HCI_UP, &hdev->flags);
2487 hci_notify(hdev, HCI_DEV_UP);
2488 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2489 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2490 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2491 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2492 hdev->dev_type == HCI_BREDR) {
2493 hci_dev_lock(hdev);
2494 mgmt_powered(hdev, 1);
2495 hci_dev_unlock(hdev);
2497 } else {
2498 /* Init failed, cleanup */
2499 flush_work(&hdev->tx_work);
2500 flush_work(&hdev->cmd_work);
2501 flush_work(&hdev->rx_work);
2503 skb_queue_purge(&hdev->cmd_q);
2504 skb_queue_purge(&hdev->rx_q);
2506 if (hdev->flush)
2507 hdev->flush(hdev);
2509 if (hdev->sent_cmd) {
2510 kfree_skb(hdev->sent_cmd);
2511 hdev->sent_cmd = NULL;
2514 hdev->close(hdev);
2515 hdev->flags &= BIT(HCI_RAW);
2518 done:
2519 hci_req_unlock(hdev);
2520 return ret;
2523 /* ---- HCI ioctl helpers ---- */
2525 int hci_dev_open(__u16 dev)
2527 struct hci_dev *hdev;
2528 int err;
2530 hdev = hci_dev_get(dev);
2531 if (!hdev)
2532 return -ENODEV;
2534 /* Devices that are marked as unconfigured can only be powered
2535 * up as user channel. Trying to bring them up as normal devices
2536 * will result into a failure. Only user channel operation is
2537 * possible.
2539 * When this function is called for a user channel, the flag
2540 * HCI_USER_CHANNEL will be set first before attempting to
2541 * open the device.
2543 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2544 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2545 err = -EOPNOTSUPP;
2546 goto done;
2549 /* We need to ensure that no other power on/off work is pending
2550 * before proceeding to call hci_dev_do_open. This is
2551 * particularly important if the setup procedure has not yet
2552 * completed.
2554 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2555 cancel_delayed_work(&hdev->power_off);
2557 /* After this call it is guaranteed that the setup procedure
2558 * has finished. This means that error conditions like RFKILL
2559 * or no valid public or static random address apply.
2561 flush_workqueue(hdev->req_workqueue);
2563 /* For controllers not using the management interface and that
2564 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2565 * so that pairing works for them. Once the management interface
2566 * is in use this bit will be cleared again and userspace has
2567 * to explicitly enable it.
2569 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2570 !test_bit(HCI_MGMT, &hdev->dev_flags))
2571 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2573 err = hci_dev_do_open(hdev);
2575 done:
2576 hci_dev_put(hdev);
2577 return err;
2580 /* This function requires the caller holds hdev->lock */
2581 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2583 struct hci_conn_params *p;
2585 list_for_each_entry(p, &hdev->le_conn_params, list) {
2586 if (p->conn) {
2587 hci_conn_drop(p->conn);
2588 hci_conn_put(p->conn);
2589 p->conn = NULL;
2591 list_del_init(&p->action);
2594 BT_DBG("All LE pending actions cleared");
2597 static int hci_dev_do_close(struct hci_dev *hdev)
2599 BT_DBG("%s %p", hdev->name, hdev);
2601 cancel_delayed_work(&hdev->power_off);
2603 hci_req_cancel(hdev, ENODEV);
2604 hci_req_lock(hdev);
2606 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2607 cancel_delayed_work_sync(&hdev->cmd_timer);
2608 hci_req_unlock(hdev);
2609 return 0;
2612 /* Flush RX and TX works */
2613 flush_work(&hdev->tx_work);
2614 flush_work(&hdev->rx_work);
2616 if (hdev->discov_timeout > 0) {
2617 cancel_delayed_work(&hdev->discov_off);
2618 hdev->discov_timeout = 0;
2619 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2620 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2623 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2624 cancel_delayed_work(&hdev->service_cache);
2626 cancel_delayed_work_sync(&hdev->le_scan_disable);
2628 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2629 cancel_delayed_work_sync(&hdev->rpa_expired);
2631 /* Avoid potential lockdep warnings from the *_flush() calls by
2632 * ensuring the workqueue is empty up front.
2634 drain_workqueue(hdev->workqueue);
2636 hci_dev_lock(hdev);
2638 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2639 if (hdev->dev_type == HCI_BREDR)
2640 mgmt_powered(hdev, 0);
2643 hci_inquiry_cache_flush(hdev);
2644 hci_pend_le_actions_clear(hdev);
2645 hci_conn_hash_flush(hdev);
2646 hci_dev_unlock(hdev);
2648 hci_notify(hdev, HCI_DEV_DOWN);
2650 if (hdev->flush)
2651 hdev->flush(hdev);
2653 /* Reset device */
2654 skb_queue_purge(&hdev->cmd_q);
2655 atomic_set(&hdev->cmd_cnt, 1);
2656 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2657 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2658 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2659 set_bit(HCI_INIT, &hdev->flags);
2660 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2661 clear_bit(HCI_INIT, &hdev->flags);
2664 /* flush cmd work */
2665 flush_work(&hdev->cmd_work);
2667 /* Drop queues */
2668 skb_queue_purge(&hdev->rx_q);
2669 skb_queue_purge(&hdev->cmd_q);
2670 skb_queue_purge(&hdev->raw_q);
2672 /* Drop last sent command */
2673 if (hdev->sent_cmd) {
2674 cancel_delayed_work_sync(&hdev->cmd_timer);
2675 kfree_skb(hdev->sent_cmd);
2676 hdev->sent_cmd = NULL;
2679 kfree_skb(hdev->recv_evt);
2680 hdev->recv_evt = NULL;
2682 /* After this point our queues are empty
2683 * and no tasks are scheduled. */
2684 hdev->close(hdev);
2686 /* Clear flags */
2687 hdev->flags &= BIT(HCI_RAW);
2688 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2690 /* Controller radio is available but is currently powered down */
2691 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2693 memset(hdev->eir, 0, sizeof(hdev->eir));
2694 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2695 bacpy(&hdev->random_addr, BDADDR_ANY);
2697 hci_req_unlock(hdev);
2699 hci_dev_put(hdev);
2700 return 0;
2703 int hci_dev_close(__u16 dev)
2705 struct hci_dev *hdev;
2706 int err;
2708 hdev = hci_dev_get(dev);
2709 if (!hdev)
2710 return -ENODEV;
2712 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2713 err = -EBUSY;
2714 goto done;
2717 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2718 cancel_delayed_work(&hdev->power_off);
2720 err = hci_dev_do_close(hdev);
2722 done:
2723 hci_dev_put(hdev);
2724 return err;
2727 int hci_dev_reset(__u16 dev)
2729 struct hci_dev *hdev;
2730 int ret = 0;
2732 hdev = hci_dev_get(dev);
2733 if (!hdev)
2734 return -ENODEV;
2736 hci_req_lock(hdev);
2738 if (!test_bit(HCI_UP, &hdev->flags)) {
2739 ret = -ENETDOWN;
2740 goto done;
2743 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2744 ret = -EBUSY;
2745 goto done;
2748 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2749 ret = -EOPNOTSUPP;
2750 goto done;
2753 /* Drop queues */
2754 skb_queue_purge(&hdev->rx_q);
2755 skb_queue_purge(&hdev->cmd_q);
2757 /* Avoid potential lockdep warnings from the *_flush() calls by
2758 * ensuring the workqueue is empty up front.
2760 drain_workqueue(hdev->workqueue);
2762 hci_dev_lock(hdev);
2763 hci_inquiry_cache_flush(hdev);
2764 hci_conn_hash_flush(hdev);
2765 hci_dev_unlock(hdev);
2767 if (hdev->flush)
2768 hdev->flush(hdev);
2770 atomic_set(&hdev->cmd_cnt, 1);
2771 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2773 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2775 done:
2776 hci_req_unlock(hdev);
2777 hci_dev_put(hdev);
2778 return ret;
2781 int hci_dev_reset_stat(__u16 dev)
2783 struct hci_dev *hdev;
2784 int ret = 0;
2786 hdev = hci_dev_get(dev);
2787 if (!hdev)
2788 return -ENODEV;
2790 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2791 ret = -EBUSY;
2792 goto done;
2795 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2796 ret = -EOPNOTSUPP;
2797 goto done;
2800 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2802 done:
2803 hci_dev_put(hdev);
2804 return ret;
2807 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2809 bool conn_changed, discov_changed;
2811 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2813 if ((scan & SCAN_PAGE))
2814 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2815 &hdev->dev_flags);
2816 else
2817 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2818 &hdev->dev_flags);
2820 if ((scan & SCAN_INQUIRY)) {
2821 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2822 &hdev->dev_flags);
2823 } else {
2824 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2825 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2826 &hdev->dev_flags);
2829 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2830 return;
2832 if (conn_changed || discov_changed) {
2833 /* In case this was disabled through mgmt */
2834 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2836 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2837 mgmt_update_adv_data(hdev);
2839 mgmt_new_settings(hdev);
2843 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2845 struct hci_dev *hdev;
2846 struct hci_dev_req dr;
2847 int err = 0;
2849 if (copy_from_user(&dr, arg, sizeof(dr)))
2850 return -EFAULT;
2852 hdev = hci_dev_get(dr.dev_id);
2853 if (!hdev)
2854 return -ENODEV;
2856 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2857 err = -EBUSY;
2858 goto done;
2861 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2862 err = -EOPNOTSUPP;
2863 goto done;
2866 if (hdev->dev_type != HCI_BREDR) {
2867 err = -EOPNOTSUPP;
2868 goto done;
2871 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2872 err = -EOPNOTSUPP;
2873 goto done;
2876 switch (cmd) {
2877 case HCISETAUTH:
2878 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2879 HCI_INIT_TIMEOUT);
2880 break;
2882 case HCISETENCRYPT:
2883 if (!lmp_encrypt_capable(hdev)) {
2884 err = -EOPNOTSUPP;
2885 break;
2888 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2889 /* Auth must be enabled first */
2890 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2891 HCI_INIT_TIMEOUT);
2892 if (err)
2893 break;
2896 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2897 HCI_INIT_TIMEOUT);
2898 break;
2900 case HCISETSCAN:
2901 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2902 HCI_INIT_TIMEOUT);
2904 /* Ensure that the connectable and discoverable states
2905 * get correctly modified as this was a non-mgmt change.
2907 if (!err)
2908 hci_update_scan_state(hdev, dr.dev_opt);
2909 break;
2911 case HCISETLINKPOL:
2912 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2913 HCI_INIT_TIMEOUT);
2914 break;
2916 case HCISETLINKMODE:
2917 hdev->link_mode = ((__u16) dr.dev_opt) &
2918 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2919 break;
2921 case HCISETPTYPE:
2922 hdev->pkt_type = (__u16) dr.dev_opt;
2923 break;
2925 case HCISETACLMTU:
2926 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2927 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2928 break;
2930 case HCISETSCOMTU:
2931 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2932 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2933 break;
2935 default:
2936 err = -EINVAL;
2937 break;
2940 done:
2941 hci_dev_put(hdev);
2942 return err;
2945 int hci_get_dev_list(void __user *arg)
2947 struct hci_dev *hdev;
2948 struct hci_dev_list_req *dl;
2949 struct hci_dev_req *dr;
2950 int n = 0, size, err;
2951 __u16 dev_num;
2953 if (get_user(dev_num, (__u16 __user *) arg))
2954 return -EFAULT;
2956 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2957 return -EINVAL;
2959 size = sizeof(*dl) + dev_num * sizeof(*dr);
2961 dl = kzalloc(size, GFP_KERNEL);
2962 if (!dl)
2963 return -ENOMEM;
2965 dr = dl->dev_req;
2967 read_lock(&hci_dev_list_lock);
2968 list_for_each_entry(hdev, &hci_dev_list, list) {
2969 unsigned long flags = hdev->flags;
2971 /* When the auto-off is configured it means the transport
2972 * is running, but in that case still indicate that the
2973 * device is actually down.
2975 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2976 flags &= ~BIT(HCI_UP);
2978 (dr + n)->dev_id = hdev->id;
2979 (dr + n)->dev_opt = flags;
2981 if (++n >= dev_num)
2982 break;
2984 read_unlock(&hci_dev_list_lock);
2986 dl->dev_num = n;
2987 size = sizeof(*dl) + n * sizeof(*dr);
2989 err = copy_to_user(arg, dl, size);
2990 kfree(dl);
2992 return err ? -EFAULT : 0;
2995 int hci_get_dev_info(void __user *arg)
2997 struct hci_dev *hdev;
2998 struct hci_dev_info di;
2999 unsigned long flags;
3000 int err = 0;
3002 if (copy_from_user(&di, arg, sizeof(di)))
3003 return -EFAULT;
3005 hdev = hci_dev_get(di.dev_id);
3006 if (!hdev)
3007 return -ENODEV;
3009 /* When the auto-off is configured it means the transport
3010 * is running, but in that case still indicate that the
3011 * device is actually down.
3013 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3014 flags = hdev->flags & ~BIT(HCI_UP);
3015 else
3016 flags = hdev->flags;
3018 strcpy(di.name, hdev->name);
3019 di.bdaddr = hdev->bdaddr;
3020 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
3021 di.flags = flags;
3022 di.pkt_type = hdev->pkt_type;
3023 if (lmp_bredr_capable(hdev)) {
3024 di.acl_mtu = hdev->acl_mtu;
3025 di.acl_pkts = hdev->acl_pkts;
3026 di.sco_mtu = hdev->sco_mtu;
3027 di.sco_pkts = hdev->sco_pkts;
3028 } else {
3029 di.acl_mtu = hdev->le_mtu;
3030 di.acl_pkts = hdev->le_pkts;
3031 di.sco_mtu = 0;
3032 di.sco_pkts = 0;
3034 di.link_policy = hdev->link_policy;
3035 di.link_mode = hdev->link_mode;
3037 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3038 memcpy(&di.features, &hdev->features, sizeof(di.features));
3040 if (copy_to_user(arg, &di, sizeof(di)))
3041 err = -EFAULT;
3043 hci_dev_put(hdev);
3045 return err;
3048 /* ---- Interface to HCI drivers ---- */
3050 static int hci_rfkill_set_block(void *data, bool blocked)
3052 struct hci_dev *hdev = data;
3054 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3056 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3057 return -EBUSY;
3059 if (blocked) {
3060 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3061 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3062 !test_bit(HCI_CONFIG, &hdev->dev_flags))
3063 hci_dev_do_close(hdev);
3064 } else {
3065 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3068 return 0;
3071 static const struct rfkill_ops hci_rfkill_ops = {
3072 .set_block = hci_rfkill_set_block,
3075 static void hci_power_on(struct work_struct *work)
3077 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3078 int err;
3080 BT_DBG("%s", hdev->name);
3082 err = hci_dev_do_open(hdev);
3083 if (err < 0) {
3084 hci_dev_lock(hdev);
3085 mgmt_set_powered_failed(hdev, err);
3086 hci_dev_unlock(hdev);
3087 return;
3090 /* During the HCI setup phase, a few error conditions are
3091 * ignored and they need to be checked now. If they are still
3092 * valid, it is important to turn the device back off.
3094 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3095 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3096 (hdev->dev_type == HCI_BREDR &&
3097 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3098 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3099 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3100 hci_dev_do_close(hdev);
3101 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3102 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3103 HCI_AUTO_OFF_TIMEOUT);
3106 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3107 /* For unconfigured devices, set the HCI_RAW flag
3108 * so that userspace can easily identify them.
3110 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3111 set_bit(HCI_RAW, &hdev->flags);
3113 /* For fully configured devices, this will send
3114 * the Index Added event. For unconfigured devices,
3115 * it will send Unconfigued Index Added event.
3117 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3118 * and no event will be send.
3120 mgmt_index_added(hdev);
3121 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3122 /* When the controller is now configured, then it
3123 * is important to clear the HCI_RAW flag.
3125 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3126 clear_bit(HCI_RAW, &hdev->flags);
3128 /* Powering on the controller with HCI_CONFIG set only
3129 * happens with the transition from unconfigured to
3130 * configured. This will send the Index Added event.
3132 mgmt_index_added(hdev);
3136 static void hci_power_off(struct work_struct *work)
3138 struct hci_dev *hdev = container_of(work, struct hci_dev,
3139 power_off.work);
3141 BT_DBG("%s", hdev->name);
3143 hci_dev_do_close(hdev);
3146 static void hci_discov_off(struct work_struct *work)
3148 struct hci_dev *hdev;
3150 hdev = container_of(work, struct hci_dev, discov_off.work);
3152 BT_DBG("%s", hdev->name);
3154 mgmt_discoverable_timeout(hdev);
3157 void hci_uuids_clear(struct hci_dev *hdev)
3159 struct bt_uuid *uuid, *tmp;
3161 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3162 list_del(&uuid->list);
3163 kfree(uuid);
3167 void hci_link_keys_clear(struct hci_dev *hdev)
3169 struct link_key *key;
3171 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3172 list_del_rcu(&key->list);
3173 kfree_rcu(key, rcu);
3177 void hci_smp_ltks_clear(struct hci_dev *hdev)
3179 struct smp_ltk *k;
3181 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3182 list_del_rcu(&k->list);
3183 kfree_rcu(k, rcu);
3187 void hci_smp_irks_clear(struct hci_dev *hdev)
3189 struct smp_irk *k;
3191 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3192 list_del_rcu(&k->list);
3193 kfree_rcu(k, rcu);
3197 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3199 struct link_key *k;
3201 rcu_read_lock();
3202 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3203 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3204 rcu_read_unlock();
3205 return k;
3208 rcu_read_unlock();
3210 return NULL;
3213 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3214 u8 key_type, u8 old_key_type)
3216 /* Legacy key */
3217 if (key_type < 0x03)
3218 return true;
3220 /* Debug keys are insecure so don't store them persistently */
3221 if (key_type == HCI_LK_DEBUG_COMBINATION)
3222 return false;
3224 /* Changed combination key and there's no previous one */
3225 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3226 return false;
3228 /* Security mode 3 case */
3229 if (!conn)
3230 return true;
3232 /* BR/EDR key derived using SC from an LE link */
3233 if (conn->type == LE_LINK)
3234 return true;
3236 /* Neither local nor remote side had no-bonding as requirement */
3237 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3238 return true;
3240 /* Local side had dedicated bonding as requirement */
3241 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3242 return true;
3244 /* Remote side had dedicated bonding as requirement */
3245 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3246 return true;
3248 /* If none of the above criteria match, then don't store the key
3249 * persistently */
3250 return false;
3253 static u8 ltk_role(u8 type)
3255 if (type == SMP_LTK)
3256 return HCI_ROLE_MASTER;
3258 return HCI_ROLE_SLAVE;
3261 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3262 u8 addr_type, u8 role)
3264 struct smp_ltk *k;
3266 rcu_read_lock();
3267 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3268 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3269 continue;
3271 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
3272 rcu_read_unlock();
3273 return k;
3276 rcu_read_unlock();
3278 return NULL;
3281 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3283 struct smp_irk *irk;
3285 rcu_read_lock();
3286 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3287 if (!bacmp(&irk->rpa, rpa)) {
3288 rcu_read_unlock();
3289 return irk;
3293 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3294 if (smp_irk_matches(hdev, irk->val, rpa)) {
3295 bacpy(&irk->rpa, rpa);
3296 rcu_read_unlock();
3297 return irk;
3300 rcu_read_unlock();
3302 return NULL;
3305 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3306 u8 addr_type)
3308 struct smp_irk *irk;
3310 /* Identity Address must be public or static random */
3311 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3312 return NULL;
3314 rcu_read_lock();
3315 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3316 if (addr_type == irk->addr_type &&
3317 bacmp(bdaddr, &irk->bdaddr) == 0) {
3318 rcu_read_unlock();
3319 return irk;
3322 rcu_read_unlock();
3324 return NULL;
3327 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3328 bdaddr_t *bdaddr, u8 *val, u8 type,
3329 u8 pin_len, bool *persistent)
3331 struct link_key *key, *old_key;
3332 u8 old_key_type;
3334 old_key = hci_find_link_key(hdev, bdaddr);
3335 if (old_key) {
3336 old_key_type = old_key->type;
3337 key = old_key;
3338 } else {
3339 old_key_type = conn ? conn->key_type : 0xff;
3340 key = kzalloc(sizeof(*key), GFP_KERNEL);
3341 if (!key)
3342 return NULL;
3343 list_add_rcu(&key->list, &hdev->link_keys);
3346 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3348 /* Some buggy controller combinations generate a changed
3349 * combination key for legacy pairing even when there's no
3350 * previous key */
3351 if (type == HCI_LK_CHANGED_COMBINATION &&
3352 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3353 type = HCI_LK_COMBINATION;
3354 if (conn)
3355 conn->key_type = type;
3358 bacpy(&key->bdaddr, bdaddr);
3359 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3360 key->pin_len = pin_len;
3362 if (type == HCI_LK_CHANGED_COMBINATION)
3363 key->type = old_key_type;
3364 else
3365 key->type = type;
3367 if (persistent)
3368 *persistent = hci_persistent_key(hdev, conn, type,
3369 old_key_type);
3371 return key;
3374 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3375 u8 addr_type, u8 type, u8 authenticated,
3376 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3378 struct smp_ltk *key, *old_key;
3379 u8 role = ltk_role(type);
3381 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
3382 if (old_key)
3383 key = old_key;
3384 else {
3385 key = kzalloc(sizeof(*key), GFP_KERNEL);
3386 if (!key)
3387 return NULL;
3388 list_add_rcu(&key->list, &hdev->long_term_keys);
3391 bacpy(&key->bdaddr, bdaddr);
3392 key->bdaddr_type = addr_type;
3393 memcpy(key->val, tk, sizeof(key->val));
3394 key->authenticated = authenticated;
3395 key->ediv = ediv;
3396 key->rand = rand;
3397 key->enc_size = enc_size;
3398 key->type = type;
3400 return key;
3403 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3404 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3406 struct smp_irk *irk;
3408 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3409 if (!irk) {
3410 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3411 if (!irk)
3412 return NULL;
3414 bacpy(&irk->bdaddr, bdaddr);
3415 irk->addr_type = addr_type;
3417 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
3420 memcpy(irk->val, val, 16);
3421 bacpy(&irk->rpa, rpa);
3423 return irk;
3426 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3428 struct link_key *key;
3430 key = hci_find_link_key(hdev, bdaddr);
3431 if (!key)
3432 return -ENOENT;
3434 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3436 list_del_rcu(&key->list);
3437 kfree_rcu(key, rcu);
3439 return 0;
3442 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3444 struct smp_ltk *k;
3445 int removed = 0;
3447 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3448 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3449 continue;
3451 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3453 list_del_rcu(&k->list);
3454 kfree_rcu(k, rcu);
3455 removed++;
3458 return removed ? 0 : -ENOENT;
3461 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3463 struct smp_irk *k;
3465 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3466 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3467 continue;
3469 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3471 list_del_rcu(&k->list);
3472 kfree_rcu(k, rcu);
3476 /* HCI command timer function */
3477 static void hci_cmd_timeout(struct work_struct *work)
3479 struct hci_dev *hdev = container_of(work, struct hci_dev,
3480 cmd_timer.work);
3482 if (hdev->sent_cmd) {
3483 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3484 u16 opcode = __le16_to_cpu(sent->opcode);
3486 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3487 } else {
3488 BT_ERR("%s command tx timeout", hdev->name);
3491 atomic_set(&hdev->cmd_cnt, 1);
3492 queue_work(hdev->workqueue, &hdev->cmd_work);
3495 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3496 bdaddr_t *bdaddr, u8 bdaddr_type)
3498 struct oob_data *data;
3500 list_for_each_entry(data, &hdev->remote_oob_data, list) {
3501 if (bacmp(bdaddr, &data->bdaddr) != 0)
3502 continue;
3503 if (data->bdaddr_type != bdaddr_type)
3504 continue;
3505 return data;
3508 return NULL;
3511 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3512 u8 bdaddr_type)
3514 struct oob_data *data;
3516 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3517 if (!data)
3518 return -ENOENT;
3520 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
3522 list_del(&data->list);
3523 kfree(data);
3525 return 0;
3528 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3530 struct oob_data *data, *n;
3532 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3533 list_del(&data->list);
3534 kfree(data);
3538 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3539 u8 bdaddr_type, u8 *hash192, u8 *rand192,
3540 u8 *hash256, u8 *rand256)
3542 struct oob_data *data;
3544 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3545 if (!data) {
3546 data = kmalloc(sizeof(*data), GFP_KERNEL);
3547 if (!data)
3548 return -ENOMEM;
3550 bacpy(&data->bdaddr, bdaddr);
3551 data->bdaddr_type = bdaddr_type;
3552 list_add(&data->list, &hdev->remote_oob_data);
3555 if (hash192 && rand192) {
3556 memcpy(data->hash192, hash192, sizeof(data->hash192));
3557 memcpy(data->rand192, rand192, sizeof(data->rand192));
3558 } else {
3559 memset(data->hash192, 0, sizeof(data->hash192));
3560 memset(data->rand192, 0, sizeof(data->rand192));
3563 if (hash256 && rand256) {
3564 memcpy(data->hash256, hash256, sizeof(data->hash256));
3565 memcpy(data->rand256, rand256, sizeof(data->rand256));
3566 } else {
3567 memset(data->hash256, 0, sizeof(data->hash256));
3568 memset(data->rand256, 0, sizeof(data->rand256));
3571 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3573 return 0;
3576 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3577 bdaddr_t *bdaddr, u8 type)
3579 struct bdaddr_list *b;
3581 list_for_each_entry(b, bdaddr_list, list) {
3582 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3583 return b;
3586 return NULL;
3589 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3591 struct list_head *p, *n;
3593 list_for_each_safe(p, n, bdaddr_list) {
3594 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3596 list_del(p);
3597 kfree(b);
3601 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3603 struct bdaddr_list *entry;
3605 if (!bacmp(bdaddr, BDADDR_ANY))
3606 return -EBADF;
3608 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3609 return -EEXIST;
3611 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3612 if (!entry)
3613 return -ENOMEM;
3615 bacpy(&entry->bdaddr, bdaddr);
3616 entry->bdaddr_type = type;
3618 list_add(&entry->list, list);
3620 return 0;
3623 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3625 struct bdaddr_list *entry;
3627 if (!bacmp(bdaddr, BDADDR_ANY)) {
3628 hci_bdaddr_list_clear(list);
3629 return 0;
3632 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3633 if (!entry)
3634 return -ENOENT;
3636 list_del(&entry->list);
3637 kfree(entry);
3639 return 0;
3642 /* This function requires the caller holds hdev->lock */
3643 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3644 bdaddr_t *addr, u8 addr_type)
3646 struct hci_conn_params *params;
3648 /* The conn params list only contains identity addresses */
3649 if (!hci_is_identity_address(addr, addr_type))
3650 return NULL;
3652 list_for_each_entry(params, &hdev->le_conn_params, list) {
3653 if (bacmp(&params->addr, addr) == 0 &&
3654 params->addr_type == addr_type) {
3655 return params;
3659 return NULL;
3662 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3664 struct hci_conn *conn;
3666 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3667 if (!conn)
3668 return false;
3670 if (conn->dst_type != type)
3671 return false;
3673 if (conn->state != BT_CONNECTED)
3674 return false;
3676 return true;
3679 /* This function requires the caller holds hdev->lock */
3680 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3681 bdaddr_t *addr, u8 addr_type)
3683 struct hci_conn_params *param;
3685 /* The list only contains identity addresses */
3686 if (!hci_is_identity_address(addr, addr_type))
3687 return NULL;
3689 list_for_each_entry(param, list, action) {
3690 if (bacmp(&param->addr, addr) == 0 &&
3691 param->addr_type == addr_type)
3692 return param;
3695 return NULL;
3698 /* This function requires the caller holds hdev->lock */
3699 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3700 bdaddr_t *addr, u8 addr_type)
3702 struct hci_conn_params *params;
3704 if (!hci_is_identity_address(addr, addr_type))
3705 return NULL;
3707 params = hci_conn_params_lookup(hdev, addr, addr_type);
3708 if (params)
3709 return params;
3711 params = kzalloc(sizeof(*params), GFP_KERNEL);
3712 if (!params) {
3713 BT_ERR("Out of memory");
3714 return NULL;
3717 bacpy(&params->addr, addr);
3718 params->addr_type = addr_type;
3720 list_add(&params->list, &hdev->le_conn_params);
3721 INIT_LIST_HEAD(&params->action);
3723 params->conn_min_interval = hdev->le_conn_min_interval;
3724 params->conn_max_interval = hdev->le_conn_max_interval;
3725 params->conn_latency = hdev->le_conn_latency;
3726 params->supervision_timeout = hdev->le_supv_timeout;
3727 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3729 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3731 return params;
3734 /* This function requires the caller holds hdev->lock */
3735 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3736 u8 auto_connect)
3738 struct hci_conn_params *params;
3740 params = hci_conn_params_add(hdev, addr, addr_type);
3741 if (!params)
3742 return -EIO;
3744 if (params->auto_connect == auto_connect)
3745 return 0;
3747 list_del_init(&params->action);
3749 switch (auto_connect) {
3750 case HCI_AUTO_CONN_DISABLED:
3751 case HCI_AUTO_CONN_LINK_LOSS:
3752 hci_update_background_scan(hdev);
3753 break;
3754 case HCI_AUTO_CONN_REPORT:
3755 list_add(&params->action, &hdev->pend_le_reports);
3756 hci_update_background_scan(hdev);
3757 break;
3758 case HCI_AUTO_CONN_DIRECT:
3759 case HCI_AUTO_CONN_ALWAYS:
3760 if (!is_connected(hdev, addr, addr_type)) {
3761 list_add(&params->action, &hdev->pend_le_conns);
3762 hci_update_background_scan(hdev);
3764 break;
3767 params->auto_connect = auto_connect;
3769 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3770 auto_connect);
3772 return 0;
3775 static void hci_conn_params_free(struct hci_conn_params *params)
3777 if (params->conn) {
3778 hci_conn_drop(params->conn);
3779 hci_conn_put(params->conn);
3782 list_del(&params->action);
3783 list_del(&params->list);
3784 kfree(params);
3787 /* This function requires the caller holds hdev->lock */
3788 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3790 struct hci_conn_params *params;
3792 params = hci_conn_params_lookup(hdev, addr, addr_type);
3793 if (!params)
3794 return;
3796 hci_conn_params_free(params);
3798 hci_update_background_scan(hdev);
3800 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3803 /* This function requires the caller holds hdev->lock */
3804 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3806 struct hci_conn_params *params, *tmp;
3808 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3809 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3810 continue;
3811 list_del(&params->list);
3812 kfree(params);
3815 BT_DBG("All LE disabled connection parameters were removed");
3818 /* This function requires the caller holds hdev->lock */
3819 void hci_conn_params_clear_all(struct hci_dev *hdev)
3821 struct hci_conn_params *params, *tmp;
3823 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3824 hci_conn_params_free(params);
3826 hci_update_background_scan(hdev);
3828 BT_DBG("All LE connection parameters were removed");
3831 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3833 if (status) {
3834 BT_ERR("Failed to start inquiry: status %d", status);
3836 hci_dev_lock(hdev);
3837 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3838 hci_dev_unlock(hdev);
3839 return;
3843 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3845 /* General inquiry access code (GIAC) */
3846 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3847 struct hci_request req;
3848 struct hci_cp_inquiry cp;
3849 int err;
3851 if (status) {
3852 BT_ERR("Failed to disable LE scanning: status %d", status);
3853 return;
3856 switch (hdev->discovery.type) {
3857 case DISCOV_TYPE_LE:
3858 hci_dev_lock(hdev);
3859 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3860 hci_dev_unlock(hdev);
3861 break;
3863 case DISCOV_TYPE_INTERLEAVED:
3864 hci_req_init(&req, hdev);
3866 memset(&cp, 0, sizeof(cp));
3867 memcpy(&cp.lap, lap, sizeof(cp.lap));
3868 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3869 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3871 hci_dev_lock(hdev);
3873 hci_inquiry_cache_flush(hdev);
3875 err = hci_req_run(&req, inquiry_complete);
3876 if (err) {
3877 BT_ERR("Inquiry request failed: err %d", err);
3878 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3881 hci_dev_unlock(hdev);
3882 break;
3886 static void le_scan_disable_work(struct work_struct *work)
3888 struct hci_dev *hdev = container_of(work, struct hci_dev,
3889 le_scan_disable.work);
3890 struct hci_request req;
3891 int err;
3893 BT_DBG("%s", hdev->name);
3895 hci_req_init(&req, hdev);
3897 hci_req_add_le_scan_disable(&req);
3899 err = hci_req_run(&req, le_scan_disable_work_complete);
3900 if (err)
3901 BT_ERR("Disable LE scanning request failed: err %d", err);
3904 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3906 struct hci_dev *hdev = req->hdev;
3908 /* If we're advertising or initiating an LE connection we can't
3909 * go ahead and change the random address at this time. This is
3910 * because the eventual initiator address used for the
3911 * subsequently created connection will be undefined (some
3912 * controllers use the new address and others the one we had
3913 * when the operation started).
3915 * In this kind of scenario skip the update and let the random
3916 * address be updated at the next cycle.
3918 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3919 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3920 BT_DBG("Deferring random address update");
3921 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3922 return;
3925 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3928 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3929 u8 *own_addr_type)
3931 struct hci_dev *hdev = req->hdev;
3932 int err;
3934 /* If privacy is enabled use a resolvable private address. If
3935 * current RPA has expired or there is something else than
3936 * the current RPA in use, then generate a new one.
3938 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3939 int to;
3941 *own_addr_type = ADDR_LE_DEV_RANDOM;
3943 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3944 !bacmp(&hdev->random_addr, &hdev->rpa))
3945 return 0;
3947 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3948 if (err < 0) {
3949 BT_ERR("%s failed to generate new RPA", hdev->name);
3950 return err;
3953 set_random_addr(req, &hdev->rpa);
3955 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3956 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3958 return 0;
3961 /* In case of required privacy without resolvable private address,
3962 * use an non-resolvable private address. This is useful for active
3963 * scanning and non-connectable advertising.
3965 if (require_privacy) {
3966 bdaddr_t nrpa;
3968 while (true) {
3969 /* The non-resolvable private address is generated
3970 * from random six bytes with the two most significant
3971 * bits cleared.
3973 get_random_bytes(&nrpa, 6);
3974 nrpa.b[5] &= 0x3f;
3976 /* The non-resolvable private address shall not be
3977 * equal to the public address.
3979 if (bacmp(&hdev->bdaddr, &nrpa))
3980 break;
3983 *own_addr_type = ADDR_LE_DEV_RANDOM;
3984 set_random_addr(req, &nrpa);
3985 return 0;
3988 /* If forcing static address is in use or there is no public
3989 * address use the static address as random address (but skip
3990 * the HCI command if the current random address is already the
3991 * static one.
3993 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3994 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3995 *own_addr_type = ADDR_LE_DEV_RANDOM;
3996 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3997 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3998 &hdev->static_addr);
3999 return 0;
4002 /* Neither privacy nor static address is being used so use a
4003 * public address.
4005 *own_addr_type = ADDR_LE_DEV_PUBLIC;
4007 return 0;
4010 /* Copy the Identity Address of the controller.
4012 * If the controller has a public BD_ADDR, then by default use that one.
4013 * If this is a LE only controller without a public address, default to
4014 * the static random address.
4016 * For debugging purposes it is possible to force controllers with a
4017 * public address to use the static random address instead.
4019 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
4020 u8 *bdaddr_type)
4022 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
4023 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
4024 bacpy(bdaddr, &hdev->static_addr);
4025 *bdaddr_type = ADDR_LE_DEV_RANDOM;
4026 } else {
4027 bacpy(bdaddr, &hdev->bdaddr);
4028 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
4032 /* Alloc HCI device */
4033 struct hci_dev *hci_alloc_dev(void)
4035 struct hci_dev *hdev;
4037 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
4038 if (!hdev)
4039 return NULL;
4041 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4042 hdev->esco_type = (ESCO_HV1);
4043 hdev->link_mode = (HCI_LM_ACCEPT);
4044 hdev->num_iac = 0x01; /* One IAC support is mandatory */
4045 hdev->io_capability = 0x03; /* No Input No Output */
4046 hdev->manufacturer = 0xffff; /* Default to internal use */
4047 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4048 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
4050 hdev->sniff_max_interval = 800;
4051 hdev->sniff_min_interval = 80;
4053 hdev->le_adv_channel_map = 0x07;
4054 hdev->le_adv_min_interval = 0x0800;
4055 hdev->le_adv_max_interval = 0x0800;
4056 hdev->le_scan_interval = 0x0060;
4057 hdev->le_scan_window = 0x0030;
4058 hdev->le_conn_min_interval = 0x0028;
4059 hdev->le_conn_max_interval = 0x0038;
4060 hdev->le_conn_latency = 0x0000;
4061 hdev->le_supv_timeout = 0x002a;
4063 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4064 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4065 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4066 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4068 mutex_init(&hdev->lock);
4069 mutex_init(&hdev->req_lock);
4071 INIT_LIST_HEAD(&hdev->mgmt_pending);
4072 INIT_LIST_HEAD(&hdev->blacklist);
4073 INIT_LIST_HEAD(&hdev->whitelist);
4074 INIT_LIST_HEAD(&hdev->uuids);
4075 INIT_LIST_HEAD(&hdev->link_keys);
4076 INIT_LIST_HEAD(&hdev->long_term_keys);
4077 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4078 INIT_LIST_HEAD(&hdev->remote_oob_data);
4079 INIT_LIST_HEAD(&hdev->le_white_list);
4080 INIT_LIST_HEAD(&hdev->le_conn_params);
4081 INIT_LIST_HEAD(&hdev->pend_le_conns);
4082 INIT_LIST_HEAD(&hdev->pend_le_reports);
4083 INIT_LIST_HEAD(&hdev->conn_hash.list);
4085 INIT_WORK(&hdev->rx_work, hci_rx_work);
4086 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4087 INIT_WORK(&hdev->tx_work, hci_tx_work);
4088 INIT_WORK(&hdev->power_on, hci_power_on);
4090 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4091 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4092 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4094 skb_queue_head_init(&hdev->rx_q);
4095 skb_queue_head_init(&hdev->cmd_q);
4096 skb_queue_head_init(&hdev->raw_q);
4098 init_waitqueue_head(&hdev->req_wait_q);
4100 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4102 hci_init_sysfs(hdev);
4103 discovery_init(hdev);
4105 return hdev;
4107 EXPORT_SYMBOL(hci_alloc_dev);
4109 /* Free HCI device */
4110 void hci_free_dev(struct hci_dev *hdev)
4112 /* will free via device release */
4113 put_device(&hdev->dev);
4115 EXPORT_SYMBOL(hci_free_dev);
4117 /* Register HCI device */
4118 int hci_register_dev(struct hci_dev *hdev)
4120 int id, error;
4122 if (!hdev->open || !hdev->close || !hdev->send)
4123 return -EINVAL;
4125 /* Do not allow HCI_AMP devices to register at index 0,
4126 * so the index can be used as the AMP controller ID.
4128 switch (hdev->dev_type) {
4129 case HCI_BREDR:
4130 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4131 break;
4132 case HCI_AMP:
4133 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4134 break;
4135 default:
4136 return -EINVAL;
4139 if (id < 0)
4140 return id;
4142 sprintf(hdev->name, "hci%d", id);
4143 hdev->id = id;
4145 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4147 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4148 WQ_MEM_RECLAIM, 1, hdev->name);
4149 if (!hdev->workqueue) {
4150 error = -ENOMEM;
4151 goto err;
4154 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4155 WQ_MEM_RECLAIM, 1, hdev->name);
4156 if (!hdev->req_workqueue) {
4157 destroy_workqueue(hdev->workqueue);
4158 error = -ENOMEM;
4159 goto err;
4162 if (!IS_ERR_OR_NULL(bt_debugfs))
4163 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4165 dev_set_name(&hdev->dev, "%s", hdev->name);
4167 error = device_add(&hdev->dev);
4168 if (error < 0)
4169 goto err_wqueue;
4171 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4172 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4173 hdev);
4174 if (hdev->rfkill) {
4175 if (rfkill_register(hdev->rfkill) < 0) {
4176 rfkill_destroy(hdev->rfkill);
4177 hdev->rfkill = NULL;
4181 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4182 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4184 set_bit(HCI_SETUP, &hdev->dev_flags);
4185 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4187 if (hdev->dev_type == HCI_BREDR) {
4188 /* Assume BR/EDR support until proven otherwise (such as
4189 * through reading supported features during init.
4191 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4194 write_lock(&hci_dev_list_lock);
4195 list_add(&hdev->list, &hci_dev_list);
4196 write_unlock(&hci_dev_list_lock);
4198 /* Devices that are marked for raw-only usage are unconfigured
4199 * and should not be included in normal operation.
4201 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4202 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4204 hci_notify(hdev, HCI_DEV_REG);
4205 hci_dev_hold(hdev);
4207 queue_work(hdev->req_workqueue, &hdev->power_on);
4209 return id;
4211 err_wqueue:
4212 destroy_workqueue(hdev->workqueue);
4213 destroy_workqueue(hdev->req_workqueue);
4214 err:
4215 ida_simple_remove(&hci_index_ida, hdev->id);
4217 return error;
4219 EXPORT_SYMBOL(hci_register_dev);
4221 /* Unregister HCI device */
4222 void hci_unregister_dev(struct hci_dev *hdev)
4224 int i, id;
4226 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4228 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4230 id = hdev->id;
4232 write_lock(&hci_dev_list_lock);
4233 list_del(&hdev->list);
4234 write_unlock(&hci_dev_list_lock);
4236 hci_dev_do_close(hdev);
4238 for (i = 0; i < NUM_REASSEMBLY; i++)
4239 kfree_skb(hdev->reassembly[i]);
4241 cancel_work_sync(&hdev->power_on);
4243 if (!test_bit(HCI_INIT, &hdev->flags) &&
4244 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4245 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4246 hci_dev_lock(hdev);
4247 mgmt_index_removed(hdev);
4248 hci_dev_unlock(hdev);
4251 /* mgmt_index_removed should take care of emptying the
4252 * pending list */
4253 BUG_ON(!list_empty(&hdev->mgmt_pending));
4255 hci_notify(hdev, HCI_DEV_UNREG);
4257 if (hdev->rfkill) {
4258 rfkill_unregister(hdev->rfkill);
4259 rfkill_destroy(hdev->rfkill);
4262 smp_unregister(hdev);
4264 device_del(&hdev->dev);
4266 debugfs_remove_recursive(hdev->debugfs);
4268 destroy_workqueue(hdev->workqueue);
4269 destroy_workqueue(hdev->req_workqueue);
4271 hci_dev_lock(hdev);
4272 hci_bdaddr_list_clear(&hdev->blacklist);
4273 hci_bdaddr_list_clear(&hdev->whitelist);
4274 hci_uuids_clear(hdev);
4275 hci_link_keys_clear(hdev);
4276 hci_smp_ltks_clear(hdev);
4277 hci_smp_irks_clear(hdev);
4278 hci_remote_oob_data_clear(hdev);
4279 hci_bdaddr_list_clear(&hdev->le_white_list);
4280 hci_conn_params_clear_all(hdev);
4281 hci_discovery_filter_clear(hdev);
4282 hci_dev_unlock(hdev);
4284 hci_dev_put(hdev);
4286 ida_simple_remove(&hci_index_ida, id);
4288 EXPORT_SYMBOL(hci_unregister_dev);
4290 /* Suspend HCI device */
4291 int hci_suspend_dev(struct hci_dev *hdev)
4293 hci_notify(hdev, HCI_DEV_SUSPEND);
4294 return 0;
4296 EXPORT_SYMBOL(hci_suspend_dev);
4298 /* Resume HCI device */
4299 int hci_resume_dev(struct hci_dev *hdev)
4301 hci_notify(hdev, HCI_DEV_RESUME);
4302 return 0;
4304 EXPORT_SYMBOL(hci_resume_dev);
4306 /* Reset HCI device */
4307 int hci_reset_dev(struct hci_dev *hdev)
4309 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4310 struct sk_buff *skb;
4312 skb = bt_skb_alloc(3, GFP_ATOMIC);
4313 if (!skb)
4314 return -ENOMEM;
4316 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4317 memcpy(skb_put(skb, 3), hw_err, 3);
4319 /* Send Hardware Error to upper stack */
4320 return hci_recv_frame(hdev, skb);
4322 EXPORT_SYMBOL(hci_reset_dev);
4324 /* Receive frame from HCI drivers */
4325 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4327 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4328 && !test_bit(HCI_INIT, &hdev->flags))) {
4329 kfree_skb(skb);
4330 return -ENXIO;
4333 /* Incoming skb */
4334 bt_cb(skb)->incoming = 1;
4336 /* Time stamp */
4337 __net_timestamp(skb);
4339 skb_queue_tail(&hdev->rx_q, skb);
4340 queue_work(hdev->workqueue, &hdev->rx_work);
4342 return 0;
4344 EXPORT_SYMBOL(hci_recv_frame);
4346 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4347 int count, __u8 index)
4349 int len = 0;
4350 int hlen = 0;
4351 int remain = count;
4352 struct sk_buff *skb;
4353 struct bt_skb_cb *scb;
4355 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4356 index >= NUM_REASSEMBLY)
4357 return -EILSEQ;
4359 skb = hdev->reassembly[index];
4361 if (!skb) {
4362 switch (type) {
4363 case HCI_ACLDATA_PKT:
4364 len = HCI_MAX_FRAME_SIZE;
4365 hlen = HCI_ACL_HDR_SIZE;
4366 break;
4367 case HCI_EVENT_PKT:
4368 len = HCI_MAX_EVENT_SIZE;
4369 hlen = HCI_EVENT_HDR_SIZE;
4370 break;
4371 case HCI_SCODATA_PKT:
4372 len = HCI_MAX_SCO_SIZE;
4373 hlen = HCI_SCO_HDR_SIZE;
4374 break;
4377 skb = bt_skb_alloc(len, GFP_ATOMIC);
4378 if (!skb)
4379 return -ENOMEM;
4381 scb = (void *) skb->cb;
4382 scb->expect = hlen;
4383 scb->pkt_type = type;
4385 hdev->reassembly[index] = skb;
4388 while (count) {
4389 scb = (void *) skb->cb;
4390 len = min_t(uint, scb->expect, count);
4392 memcpy(skb_put(skb, len), data, len);
4394 count -= len;
4395 data += len;
4396 scb->expect -= len;
4397 remain = count;
4399 switch (type) {
4400 case HCI_EVENT_PKT:
4401 if (skb->len == HCI_EVENT_HDR_SIZE) {
4402 struct hci_event_hdr *h = hci_event_hdr(skb);
4403 scb->expect = h->plen;
4405 if (skb_tailroom(skb) < scb->expect) {
4406 kfree_skb(skb);
4407 hdev->reassembly[index] = NULL;
4408 return -ENOMEM;
4411 break;
4413 case HCI_ACLDATA_PKT:
4414 if (skb->len == HCI_ACL_HDR_SIZE) {
4415 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4416 scb->expect = __le16_to_cpu(h->dlen);
4418 if (skb_tailroom(skb) < scb->expect) {
4419 kfree_skb(skb);
4420 hdev->reassembly[index] = NULL;
4421 return -ENOMEM;
4424 break;
4426 case HCI_SCODATA_PKT:
4427 if (skb->len == HCI_SCO_HDR_SIZE) {
4428 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4429 scb->expect = h->dlen;
4431 if (skb_tailroom(skb) < scb->expect) {
4432 kfree_skb(skb);
4433 hdev->reassembly[index] = NULL;
4434 return -ENOMEM;
4437 break;
4440 if (scb->expect == 0) {
4441 /* Complete frame */
4443 bt_cb(skb)->pkt_type = type;
4444 hci_recv_frame(hdev, skb);
4446 hdev->reassembly[index] = NULL;
4447 return remain;
4451 return remain;
4454 #define STREAM_REASSEMBLY 0
4456 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4458 int type;
4459 int rem = 0;
4461 while (count) {
4462 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4464 if (!skb) {
4465 struct { char type; } *pkt;
4467 /* Start of the frame */
4468 pkt = data;
4469 type = pkt->type;
4471 data++;
4472 count--;
4473 } else
4474 type = bt_cb(skb)->pkt_type;
4476 rem = hci_reassembly(hdev, type, data, count,
4477 STREAM_REASSEMBLY);
4478 if (rem < 0)
4479 return rem;
4481 data += (count - rem);
4482 count = rem;
4485 return rem;
4487 EXPORT_SYMBOL(hci_recv_stream_fragment);
4489 /* ---- Interface to upper protocols ---- */
4491 int hci_register_cb(struct hci_cb *cb)
4493 BT_DBG("%p name %s", cb, cb->name);
4495 write_lock(&hci_cb_list_lock);
4496 list_add(&cb->list, &hci_cb_list);
4497 write_unlock(&hci_cb_list_lock);
4499 return 0;
4501 EXPORT_SYMBOL(hci_register_cb);
4503 int hci_unregister_cb(struct hci_cb *cb)
4505 BT_DBG("%p name %s", cb, cb->name);
4507 write_lock(&hci_cb_list_lock);
4508 list_del(&cb->list);
4509 write_unlock(&hci_cb_list_lock);
4511 return 0;
4513 EXPORT_SYMBOL(hci_unregister_cb);
4515 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4517 int err;
4519 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4521 /* Time stamp */
4522 __net_timestamp(skb);
4524 /* Send copy to monitor */
4525 hci_send_to_monitor(hdev, skb);
4527 if (atomic_read(&hdev->promisc)) {
4528 /* Send copy to the sockets */
4529 hci_send_to_sock(hdev, skb);
4532 /* Get rid of skb owner, prior to sending to the driver. */
4533 skb_orphan(skb);
4535 err = hdev->send(hdev, skb);
4536 if (err < 0) {
4537 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4538 kfree_skb(skb);
4542 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4544 skb_queue_head_init(&req->cmd_q);
4545 req->hdev = hdev;
4546 req->err = 0;
4549 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4551 struct hci_dev *hdev = req->hdev;
4552 struct sk_buff *skb;
4553 unsigned long flags;
4555 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4557 /* If an error occurred during request building, remove all HCI
4558 * commands queued on the HCI request queue.
4560 if (req->err) {
4561 skb_queue_purge(&req->cmd_q);
4562 return req->err;
4565 /* Do not allow empty requests */
4566 if (skb_queue_empty(&req->cmd_q))
4567 return -ENODATA;
4569 skb = skb_peek_tail(&req->cmd_q);
4570 bt_cb(skb)->req.complete = complete;
4572 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4573 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4574 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4576 queue_work(hdev->workqueue, &hdev->cmd_work);
4578 return 0;
4581 bool hci_req_pending(struct hci_dev *hdev)
4583 return (hdev->req_status == HCI_REQ_PEND);
4586 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4587 u32 plen, const void *param)
4589 int len = HCI_COMMAND_HDR_SIZE + plen;
4590 struct hci_command_hdr *hdr;
4591 struct sk_buff *skb;
4593 skb = bt_skb_alloc(len, GFP_ATOMIC);
4594 if (!skb)
4595 return NULL;
4597 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4598 hdr->opcode = cpu_to_le16(opcode);
4599 hdr->plen = plen;
4601 if (plen)
4602 memcpy(skb_put(skb, plen), param, plen);
4604 BT_DBG("skb len %d", skb->len);
4606 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4607 bt_cb(skb)->opcode = opcode;
4609 return skb;
4612 /* Send HCI command */
4613 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4614 const void *param)
4616 struct sk_buff *skb;
4618 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4620 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4621 if (!skb) {
4622 BT_ERR("%s no memory for command", hdev->name);
4623 return -ENOMEM;
4626 /* Stand-alone HCI commands must be flagged as
4627 * single-command requests.
4629 bt_cb(skb)->req.start = true;
4631 skb_queue_tail(&hdev->cmd_q, skb);
4632 queue_work(hdev->workqueue, &hdev->cmd_work);
4634 return 0;
4637 /* Queue a command to an asynchronous HCI request */
4638 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4639 const void *param, u8 event)
4641 struct hci_dev *hdev = req->hdev;
4642 struct sk_buff *skb;
4644 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4646 /* If an error occurred during request building, there is no point in
4647 * queueing the HCI command. We can simply return.
4649 if (req->err)
4650 return;
4652 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4653 if (!skb) {
4654 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4655 hdev->name, opcode);
4656 req->err = -ENOMEM;
4657 return;
4660 if (skb_queue_empty(&req->cmd_q))
4661 bt_cb(skb)->req.start = true;
4663 bt_cb(skb)->req.event = event;
4665 skb_queue_tail(&req->cmd_q, skb);
4668 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4669 const void *param)
4671 hci_req_add_ev(req, opcode, plen, param, 0);
4674 /* Get data from the previously sent command */
4675 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4677 struct hci_command_hdr *hdr;
4679 if (!hdev->sent_cmd)
4680 return NULL;
4682 hdr = (void *) hdev->sent_cmd->data;
4684 if (hdr->opcode != cpu_to_le16(opcode))
4685 return NULL;
4687 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4689 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4692 /* Send ACL data */
4693 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4695 struct hci_acl_hdr *hdr;
4696 int len = skb->len;
4698 skb_push(skb, HCI_ACL_HDR_SIZE);
4699 skb_reset_transport_header(skb);
4700 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4701 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4702 hdr->dlen = cpu_to_le16(len);
4705 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4706 struct sk_buff *skb, __u16 flags)
4708 struct hci_conn *conn = chan->conn;
4709 struct hci_dev *hdev = conn->hdev;
4710 struct sk_buff *list;
4712 skb->len = skb_headlen(skb);
4713 skb->data_len = 0;
4715 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4717 switch (hdev->dev_type) {
4718 case HCI_BREDR:
4719 hci_add_acl_hdr(skb, conn->handle, flags);
4720 break;
4721 case HCI_AMP:
4722 hci_add_acl_hdr(skb, chan->handle, flags);
4723 break;
4724 default:
4725 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4726 return;
4729 list = skb_shinfo(skb)->frag_list;
4730 if (!list) {
4731 /* Non fragmented */
4732 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4734 skb_queue_tail(queue, skb);
4735 } else {
4736 /* Fragmented */
4737 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4739 skb_shinfo(skb)->frag_list = NULL;
4741 /* Queue all fragments atomically. We need to use spin_lock_bh
4742 * here because of 6LoWPAN links, as there this function is
4743 * called from softirq and using normal spin lock could cause
4744 * deadlocks.
4746 spin_lock_bh(&queue->lock);
4748 __skb_queue_tail(queue, skb);
4750 flags &= ~ACL_START;
4751 flags |= ACL_CONT;
4752 do {
4753 skb = list; list = list->next;
4755 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4756 hci_add_acl_hdr(skb, conn->handle, flags);
4758 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4760 __skb_queue_tail(queue, skb);
4761 } while (list);
4763 spin_unlock_bh(&queue->lock);
4767 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4769 struct hci_dev *hdev = chan->conn->hdev;
4771 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4773 hci_queue_acl(chan, &chan->data_q, skb, flags);
4775 queue_work(hdev->workqueue, &hdev->tx_work);
4778 /* Send SCO data */
4779 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4781 struct hci_dev *hdev = conn->hdev;
4782 struct hci_sco_hdr hdr;
4784 BT_DBG("%s len %d", hdev->name, skb->len);
4786 hdr.handle = cpu_to_le16(conn->handle);
4787 hdr.dlen = skb->len;
4789 skb_push(skb, HCI_SCO_HDR_SIZE);
4790 skb_reset_transport_header(skb);
4791 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4793 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4795 skb_queue_tail(&conn->data_q, skb);
4796 queue_work(hdev->workqueue, &hdev->tx_work);
4799 /* ---- HCI TX task (outgoing data) ---- */
4801 /* HCI Connection scheduler */
4802 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4803 int *quote)
4805 struct hci_conn_hash *h = &hdev->conn_hash;
4806 struct hci_conn *conn = NULL, *c;
4807 unsigned int num = 0, min = ~0;
4809 /* We don't have to lock device here. Connections are always
4810 * added and removed with TX task disabled. */
4812 rcu_read_lock();
4814 list_for_each_entry_rcu(c, &h->list, list) {
4815 if (c->type != type || skb_queue_empty(&c->data_q))
4816 continue;
4818 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4819 continue;
4821 num++;
4823 if (c->sent < min) {
4824 min = c->sent;
4825 conn = c;
4828 if (hci_conn_num(hdev, type) == num)
4829 break;
4832 rcu_read_unlock();
4834 if (conn) {
4835 int cnt, q;
4837 switch (conn->type) {
4838 case ACL_LINK:
4839 cnt = hdev->acl_cnt;
4840 break;
4841 case SCO_LINK:
4842 case ESCO_LINK:
4843 cnt = hdev->sco_cnt;
4844 break;
4845 case LE_LINK:
4846 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4847 break;
4848 default:
4849 cnt = 0;
4850 BT_ERR("Unknown link type");
4853 q = cnt / num;
4854 *quote = q ? q : 1;
4855 } else
4856 *quote = 0;
4858 BT_DBG("conn %p quote %d", conn, *quote);
4859 return conn;
4862 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4864 struct hci_conn_hash *h = &hdev->conn_hash;
4865 struct hci_conn *c;
4867 BT_ERR("%s link tx timeout", hdev->name);
4869 rcu_read_lock();
4871 /* Kill stalled connections */
4872 list_for_each_entry_rcu(c, &h->list, list) {
4873 if (c->type == type && c->sent) {
4874 BT_ERR("%s killing stalled connection %pMR",
4875 hdev->name, &c->dst);
4876 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4880 rcu_read_unlock();
4883 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4884 int *quote)
4886 struct hci_conn_hash *h = &hdev->conn_hash;
4887 struct hci_chan *chan = NULL;
4888 unsigned int num = 0, min = ~0, cur_prio = 0;
4889 struct hci_conn *conn;
4890 int cnt, q, conn_num = 0;
4892 BT_DBG("%s", hdev->name);
4894 rcu_read_lock();
4896 list_for_each_entry_rcu(conn, &h->list, list) {
4897 struct hci_chan *tmp;
4899 if (conn->type != type)
4900 continue;
4902 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4903 continue;
4905 conn_num++;
4907 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4908 struct sk_buff *skb;
4910 if (skb_queue_empty(&tmp->data_q))
4911 continue;
4913 skb = skb_peek(&tmp->data_q);
4914 if (skb->priority < cur_prio)
4915 continue;
4917 if (skb->priority > cur_prio) {
4918 num = 0;
4919 min = ~0;
4920 cur_prio = skb->priority;
4923 num++;
4925 if (conn->sent < min) {
4926 min = conn->sent;
4927 chan = tmp;
4931 if (hci_conn_num(hdev, type) == conn_num)
4932 break;
4935 rcu_read_unlock();
4937 if (!chan)
4938 return NULL;
4940 switch (chan->conn->type) {
4941 case ACL_LINK:
4942 cnt = hdev->acl_cnt;
4943 break;
4944 case AMP_LINK:
4945 cnt = hdev->block_cnt;
4946 break;
4947 case SCO_LINK:
4948 case ESCO_LINK:
4949 cnt = hdev->sco_cnt;
4950 break;
4951 case LE_LINK:
4952 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4953 break;
4954 default:
4955 cnt = 0;
4956 BT_ERR("Unknown link type");
4959 q = cnt / num;
4960 *quote = q ? q : 1;
4961 BT_DBG("chan %p quote %d", chan, *quote);
4962 return chan;
4965 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4967 struct hci_conn_hash *h = &hdev->conn_hash;
4968 struct hci_conn *conn;
4969 int num = 0;
4971 BT_DBG("%s", hdev->name);
4973 rcu_read_lock();
4975 list_for_each_entry_rcu(conn, &h->list, list) {
4976 struct hci_chan *chan;
4978 if (conn->type != type)
4979 continue;
4981 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4982 continue;
4984 num++;
4986 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4987 struct sk_buff *skb;
4989 if (chan->sent) {
4990 chan->sent = 0;
4991 continue;
4994 if (skb_queue_empty(&chan->data_q))
4995 continue;
4997 skb = skb_peek(&chan->data_q);
4998 if (skb->priority >= HCI_PRIO_MAX - 1)
4999 continue;
5001 skb->priority = HCI_PRIO_MAX - 1;
5003 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
5004 skb->priority);
5007 if (hci_conn_num(hdev, type) == num)
5008 break;
5011 rcu_read_unlock();
5015 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
5017 /* Calculate count of blocks used by this packet */
5018 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
5021 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
5023 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5024 /* ACL tx timeout must be longer than maximum
5025 * link supervision timeout (40.9 seconds) */
5026 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5027 HCI_ACL_TX_TIMEOUT))
5028 hci_link_tx_to(hdev, ACL_LINK);
5032 static void hci_sched_acl_pkt(struct hci_dev *hdev)
5034 unsigned int cnt = hdev->acl_cnt;
5035 struct hci_chan *chan;
5036 struct sk_buff *skb;
5037 int quote;
5039 __check_timeout(hdev, cnt);
5041 while (hdev->acl_cnt &&
5042 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
5043 u32 priority = (skb_peek(&chan->data_q))->priority;
5044 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5045 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5046 skb->len, skb->priority);
5048 /* Stop if priority has changed */
5049 if (skb->priority < priority)
5050 break;
5052 skb = skb_dequeue(&chan->data_q);
5054 hci_conn_enter_active_mode(chan->conn,
5055 bt_cb(skb)->force_active);
5057 hci_send_frame(hdev, skb);
5058 hdev->acl_last_tx = jiffies;
5060 hdev->acl_cnt--;
5061 chan->sent++;
5062 chan->conn->sent++;
5066 if (cnt != hdev->acl_cnt)
5067 hci_prio_recalculate(hdev, ACL_LINK);
5070 static void hci_sched_acl_blk(struct hci_dev *hdev)
5072 unsigned int cnt = hdev->block_cnt;
5073 struct hci_chan *chan;
5074 struct sk_buff *skb;
5075 int quote;
5076 u8 type;
5078 __check_timeout(hdev, cnt);
5080 BT_DBG("%s", hdev->name);
5082 if (hdev->dev_type == HCI_AMP)
5083 type = AMP_LINK;
5084 else
5085 type = ACL_LINK;
5087 while (hdev->block_cnt > 0 &&
5088 (chan = hci_chan_sent(hdev, type, &quote))) {
5089 u32 priority = (skb_peek(&chan->data_q))->priority;
5090 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5091 int blocks;
5093 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5094 skb->len, skb->priority);
5096 /* Stop if priority has changed */
5097 if (skb->priority < priority)
5098 break;
5100 skb = skb_dequeue(&chan->data_q);
5102 blocks = __get_blocks(hdev, skb);
5103 if (blocks > hdev->block_cnt)
5104 return;
5106 hci_conn_enter_active_mode(chan->conn,
5107 bt_cb(skb)->force_active);
5109 hci_send_frame(hdev, skb);
5110 hdev->acl_last_tx = jiffies;
5112 hdev->block_cnt -= blocks;
5113 quote -= blocks;
5115 chan->sent += blocks;
5116 chan->conn->sent += blocks;
5120 if (cnt != hdev->block_cnt)
5121 hci_prio_recalculate(hdev, type);
5124 static void hci_sched_acl(struct hci_dev *hdev)
5126 BT_DBG("%s", hdev->name);
5128 /* No ACL link over BR/EDR controller */
5129 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5130 return;
5132 /* No AMP link over AMP controller */
5133 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5134 return;
5136 switch (hdev->flow_ctl_mode) {
5137 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5138 hci_sched_acl_pkt(hdev);
5139 break;
5141 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5142 hci_sched_acl_blk(hdev);
5143 break;
5147 /* Schedule SCO */
5148 static void hci_sched_sco(struct hci_dev *hdev)
5150 struct hci_conn *conn;
5151 struct sk_buff *skb;
5152 int quote;
5154 BT_DBG("%s", hdev->name);
5156 if (!hci_conn_num(hdev, SCO_LINK))
5157 return;
5159 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5160 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5161 BT_DBG("skb %p len %d", skb, skb->len);
5162 hci_send_frame(hdev, skb);
5164 conn->sent++;
5165 if (conn->sent == ~0)
5166 conn->sent = 0;
5171 static void hci_sched_esco(struct hci_dev *hdev)
5173 struct hci_conn *conn;
5174 struct sk_buff *skb;
5175 int quote;
5177 BT_DBG("%s", hdev->name);
5179 if (!hci_conn_num(hdev, ESCO_LINK))
5180 return;
5182 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5183 &quote))) {
5184 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5185 BT_DBG("skb %p len %d", skb, skb->len);
5186 hci_send_frame(hdev, skb);
5188 conn->sent++;
5189 if (conn->sent == ~0)
5190 conn->sent = 0;
5195 static void hci_sched_le(struct hci_dev *hdev)
5197 struct hci_chan *chan;
5198 struct sk_buff *skb;
5199 int quote, cnt, tmp;
5201 BT_DBG("%s", hdev->name);
5203 if (!hci_conn_num(hdev, LE_LINK))
5204 return;
5206 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5207 /* LE tx timeout must be longer than maximum
5208 * link supervision timeout (40.9 seconds) */
5209 if (!hdev->le_cnt && hdev->le_pkts &&
5210 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5211 hci_link_tx_to(hdev, LE_LINK);
5214 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5215 tmp = cnt;
5216 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5217 u32 priority = (skb_peek(&chan->data_q))->priority;
5218 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5219 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5220 skb->len, skb->priority);
5222 /* Stop if priority has changed */
5223 if (skb->priority < priority)
5224 break;
5226 skb = skb_dequeue(&chan->data_q);
5228 hci_send_frame(hdev, skb);
5229 hdev->le_last_tx = jiffies;
5231 cnt--;
5232 chan->sent++;
5233 chan->conn->sent++;
5237 if (hdev->le_pkts)
5238 hdev->le_cnt = cnt;
5239 else
5240 hdev->acl_cnt = cnt;
5242 if (cnt != tmp)
5243 hci_prio_recalculate(hdev, LE_LINK);
5246 static void hci_tx_work(struct work_struct *work)
5248 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5249 struct sk_buff *skb;
5251 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5252 hdev->sco_cnt, hdev->le_cnt);
5254 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5255 /* Schedule queues and send stuff to HCI driver */
5256 hci_sched_acl(hdev);
5257 hci_sched_sco(hdev);
5258 hci_sched_esco(hdev);
5259 hci_sched_le(hdev);
5262 /* Send next queued raw (unknown type) packet */
5263 while ((skb = skb_dequeue(&hdev->raw_q)))
5264 hci_send_frame(hdev, skb);
5267 /* ----- HCI RX task (incoming data processing) ----- */
5269 /* ACL data packet */
5270 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5272 struct hci_acl_hdr *hdr = (void *) skb->data;
5273 struct hci_conn *conn;
5274 __u16 handle, flags;
5276 skb_pull(skb, HCI_ACL_HDR_SIZE);
5278 handle = __le16_to_cpu(hdr->handle);
5279 flags = hci_flags(handle);
5280 handle = hci_handle(handle);
5282 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5283 handle, flags);
5285 hdev->stat.acl_rx++;
5287 hci_dev_lock(hdev);
5288 conn = hci_conn_hash_lookup_handle(hdev, handle);
5289 hci_dev_unlock(hdev);
5291 if (conn) {
5292 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5294 /* Send to upper protocol */
5295 l2cap_recv_acldata(conn, skb, flags);
5296 return;
5297 } else {
5298 BT_ERR("%s ACL packet for unknown connection handle %d",
5299 hdev->name, handle);
5302 kfree_skb(skb);
5305 /* SCO data packet */
5306 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5308 struct hci_sco_hdr *hdr = (void *) skb->data;
5309 struct hci_conn *conn;
5310 __u16 handle;
5312 skb_pull(skb, HCI_SCO_HDR_SIZE);
5314 handle = __le16_to_cpu(hdr->handle);
5316 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5318 hdev->stat.sco_rx++;
5320 hci_dev_lock(hdev);
5321 conn = hci_conn_hash_lookup_handle(hdev, handle);
5322 hci_dev_unlock(hdev);
5324 if (conn) {
5325 /* Send to upper protocol */
5326 sco_recv_scodata(conn, skb);
5327 return;
5328 } else {
5329 BT_ERR("%s SCO packet for unknown connection handle %d",
5330 hdev->name, handle);
5333 kfree_skb(skb);
5336 static bool hci_req_is_complete(struct hci_dev *hdev)
5338 struct sk_buff *skb;
5340 skb = skb_peek(&hdev->cmd_q);
5341 if (!skb)
5342 return true;
5344 return bt_cb(skb)->req.start;
5347 static void hci_resend_last(struct hci_dev *hdev)
5349 struct hci_command_hdr *sent;
5350 struct sk_buff *skb;
5351 u16 opcode;
5353 if (!hdev->sent_cmd)
5354 return;
5356 sent = (void *) hdev->sent_cmd->data;
5357 opcode = __le16_to_cpu(sent->opcode);
5358 if (opcode == HCI_OP_RESET)
5359 return;
5361 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5362 if (!skb)
5363 return;
5365 skb_queue_head(&hdev->cmd_q, skb);
5366 queue_work(hdev->workqueue, &hdev->cmd_work);
5369 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5371 hci_req_complete_t req_complete = NULL;
5372 struct sk_buff *skb;
5373 unsigned long flags;
5375 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5377 /* If the completed command doesn't match the last one that was
5378 * sent we need to do special handling of it.
5380 if (!hci_sent_cmd_data(hdev, opcode)) {
5381 /* Some CSR based controllers generate a spontaneous
5382 * reset complete event during init and any pending
5383 * command will never be completed. In such a case we
5384 * need to resend whatever was the last sent
5385 * command.
5387 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5388 hci_resend_last(hdev);
5390 return;
5393 /* If the command succeeded and there's still more commands in
5394 * this request the request is not yet complete.
5396 if (!status && !hci_req_is_complete(hdev))
5397 return;
5399 /* If this was the last command in a request the complete
5400 * callback would be found in hdev->sent_cmd instead of the
5401 * command queue (hdev->cmd_q).
5403 if (hdev->sent_cmd) {
5404 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5406 if (req_complete) {
5407 /* We must set the complete callback to NULL to
5408 * avoid calling the callback more than once if
5409 * this function gets called again.
5411 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5413 goto call_complete;
5417 /* Remove all pending commands belonging to this request */
5418 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5419 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5420 if (bt_cb(skb)->req.start) {
5421 __skb_queue_head(&hdev->cmd_q, skb);
5422 break;
5425 req_complete = bt_cb(skb)->req.complete;
5426 kfree_skb(skb);
5428 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5430 call_complete:
5431 if (req_complete)
5432 req_complete(hdev, status);
5435 static void hci_rx_work(struct work_struct *work)
5437 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5438 struct sk_buff *skb;
5440 BT_DBG("%s", hdev->name);
5442 while ((skb = skb_dequeue(&hdev->rx_q))) {
5443 /* Send copy to monitor */
5444 hci_send_to_monitor(hdev, skb);
5446 if (atomic_read(&hdev->promisc)) {
5447 /* Send copy to the sockets */
5448 hci_send_to_sock(hdev, skb);
5451 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5452 kfree_skb(skb);
5453 continue;
5456 if (test_bit(HCI_INIT, &hdev->flags)) {
5457 /* Don't process data packets in this states. */
5458 switch (bt_cb(skb)->pkt_type) {
5459 case HCI_ACLDATA_PKT:
5460 case HCI_SCODATA_PKT:
5461 kfree_skb(skb);
5462 continue;
5466 /* Process frame */
5467 switch (bt_cb(skb)->pkt_type) {
5468 case HCI_EVENT_PKT:
5469 BT_DBG("%s Event packet", hdev->name);
5470 hci_event_packet(hdev, skb);
5471 break;
5473 case HCI_ACLDATA_PKT:
5474 BT_DBG("%s ACL data packet", hdev->name);
5475 hci_acldata_packet(hdev, skb);
5476 break;
5478 case HCI_SCODATA_PKT:
5479 BT_DBG("%s SCO data packet", hdev->name);
5480 hci_scodata_packet(hdev, skb);
5481 break;
5483 default:
5484 kfree_skb(skb);
5485 break;
5490 static void hci_cmd_work(struct work_struct *work)
5492 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5493 struct sk_buff *skb;
5495 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5496 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5498 /* Send queued commands */
5499 if (atomic_read(&hdev->cmd_cnt)) {
5500 skb = skb_dequeue(&hdev->cmd_q);
5501 if (!skb)
5502 return;
5504 kfree_skb(hdev->sent_cmd);
5506 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5507 if (hdev->sent_cmd) {
5508 atomic_dec(&hdev->cmd_cnt);
5509 hci_send_frame(hdev, skb);
5510 if (test_bit(HCI_RESET, &hdev->flags))
5511 cancel_delayed_work(&hdev->cmd_timer);
5512 else
5513 schedule_delayed_work(&hdev->cmd_timer,
5514 HCI_CMD_TIMEOUT);
5515 } else {
5516 skb_queue_head(&hdev->cmd_q, skb);
5517 queue_work(hdev->workqueue, &hdev->cmd_work);
5522 void hci_req_add_le_scan_disable(struct hci_request *req)
5524 struct hci_cp_le_set_scan_enable cp;
5526 memset(&cp, 0, sizeof(cp));
5527 cp.enable = LE_SCAN_DISABLE;
5528 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5531 static void add_to_white_list(struct hci_request *req,
5532 struct hci_conn_params *params)
5534 struct hci_cp_le_add_to_white_list cp;
5536 cp.bdaddr_type = params->addr_type;
5537 bacpy(&cp.bdaddr, &params->addr);
5539 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5542 static u8 update_white_list(struct hci_request *req)
5544 struct hci_dev *hdev = req->hdev;
5545 struct hci_conn_params *params;
5546 struct bdaddr_list *b;
5547 uint8_t white_list_entries = 0;
5549 /* Go through the current white list programmed into the
5550 * controller one by one and check if that address is still
5551 * in the list of pending connections or list of devices to
5552 * report. If not present in either list, then queue the
5553 * command to remove it from the controller.
5555 list_for_each_entry(b, &hdev->le_white_list, list) {
5556 struct hci_cp_le_del_from_white_list cp;
5558 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5559 &b->bdaddr, b->bdaddr_type) ||
5560 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5561 &b->bdaddr, b->bdaddr_type)) {
5562 white_list_entries++;
5563 continue;
5566 cp.bdaddr_type = b->bdaddr_type;
5567 bacpy(&cp.bdaddr, &b->bdaddr);
5569 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5570 sizeof(cp), &cp);
5573 /* Since all no longer valid white list entries have been
5574 * removed, walk through the list of pending connections
5575 * and ensure that any new device gets programmed into
5576 * the controller.
5578 * If the list of the devices is larger than the list of
5579 * available white list entries in the controller, then
5580 * just abort and return filer policy value to not use the
5581 * white list.
5583 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5584 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5585 &params->addr, params->addr_type))
5586 continue;
5588 if (white_list_entries >= hdev->le_white_list_size) {
5589 /* Select filter policy to accept all advertising */
5590 return 0x00;
5593 if (hci_find_irk_by_addr(hdev, &params->addr,
5594 params->addr_type)) {
5595 /* White list can not be used with RPAs */
5596 return 0x00;
5599 white_list_entries++;
5600 add_to_white_list(req, params);
5603 /* After adding all new pending connections, walk through
5604 * the list of pending reports and also add these to the
5605 * white list if there is still space.
5607 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5608 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5609 &params->addr, params->addr_type))
5610 continue;
5612 if (white_list_entries >= hdev->le_white_list_size) {
5613 /* Select filter policy to accept all advertising */
5614 return 0x00;
5617 if (hci_find_irk_by_addr(hdev, &params->addr,
5618 params->addr_type)) {
5619 /* White list can not be used with RPAs */
5620 return 0x00;
5623 white_list_entries++;
5624 add_to_white_list(req, params);
5627 /* Select filter policy to use white list */
5628 return 0x01;
5631 void hci_req_add_le_passive_scan(struct hci_request *req)
5633 struct hci_cp_le_set_scan_param param_cp;
5634 struct hci_cp_le_set_scan_enable enable_cp;
5635 struct hci_dev *hdev = req->hdev;
5636 u8 own_addr_type;
5637 u8 filter_policy;
5639 /* Set require_privacy to false since no SCAN_REQ are send
5640 * during passive scanning. Not using an non-resolvable address
5641 * here is important so that peer devices using direct
5642 * advertising with our address will be correctly reported
5643 * by the controller.
5645 if (hci_update_random_address(req, false, &own_addr_type))
5646 return;
5648 /* Adding or removing entries from the white list must
5649 * happen before enabling scanning. The controller does
5650 * not allow white list modification while scanning.
5652 filter_policy = update_white_list(req);
5654 /* When the controller is using random resolvable addresses and
5655 * with that having LE privacy enabled, then controllers with
5656 * Extended Scanner Filter Policies support can now enable support
5657 * for handling directed advertising.
5659 * So instead of using filter polices 0x00 (no whitelist)
5660 * and 0x01 (whitelist enabled) use the new filter policies
5661 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
5663 if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
5664 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
5665 filter_policy |= 0x02;
5667 memset(&param_cp, 0, sizeof(param_cp));
5668 param_cp.type = LE_SCAN_PASSIVE;
5669 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5670 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5671 param_cp.own_address_type = own_addr_type;
5672 param_cp.filter_policy = filter_policy;
5673 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5674 &param_cp);
5676 memset(&enable_cp, 0, sizeof(enable_cp));
5677 enable_cp.enable = LE_SCAN_ENABLE;
5678 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5679 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5680 &enable_cp);
5683 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5685 if (status)
5686 BT_DBG("HCI request failed to update background scanning: "
5687 "status 0x%2.2x", status);
5690 /* This function controls the background scanning based on hdev->pend_le_conns
5691 * list. If there are pending LE connection we start the background scanning,
5692 * otherwise we stop it.
5694 * This function requires the caller holds hdev->lock.
5696 void hci_update_background_scan(struct hci_dev *hdev)
5698 struct hci_request req;
5699 struct hci_conn *conn;
5700 int err;
5702 if (!test_bit(HCI_UP, &hdev->flags) ||
5703 test_bit(HCI_INIT, &hdev->flags) ||
5704 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5705 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5706 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5707 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5708 return;
5710 /* No point in doing scanning if LE support hasn't been enabled */
5711 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5712 return;
5714 /* If discovery is active don't interfere with it */
5715 if (hdev->discovery.state != DISCOVERY_STOPPED)
5716 return;
5718 /* Reset RSSI and UUID filters when starting background scanning
5719 * since these filters are meant for service discovery only.
5721 * The Start Discovery and Start Service Discovery operations
5722 * ensure to set proper values for RSSI threshold and UUID
5723 * filter list. So it is safe to just reset them here.
5725 hci_discovery_filter_clear(hdev);
5727 hci_req_init(&req, hdev);
5729 if (list_empty(&hdev->pend_le_conns) &&
5730 list_empty(&hdev->pend_le_reports)) {
5731 /* If there is no pending LE connections or devices
5732 * to be scanned for, we should stop the background
5733 * scanning.
5736 /* If controller is not scanning we are done. */
5737 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5738 return;
5740 hci_req_add_le_scan_disable(&req);
5742 BT_DBG("%s stopping background scanning", hdev->name);
5743 } else {
5744 /* If there is at least one pending LE connection, we should
5745 * keep the background scan running.
5748 /* If controller is connecting, we should not start scanning
5749 * since some controllers are not able to scan and connect at
5750 * the same time.
5752 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5753 if (conn)
5754 return;
5756 /* If controller is currently scanning, we stop it to ensure we
5757 * don't miss any advertising (due to duplicates filter).
5759 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5760 hci_req_add_le_scan_disable(&req);
5762 hci_req_add_le_passive_scan(&req);
5764 BT_DBG("%s starting background scanning", hdev->name);
5767 err = hci_req_run(&req, update_background_scan_complete);
5768 if (err)
5769 BT_ERR("Failed to run HCI request: err %d", err);
5772 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5774 struct bdaddr_list *b;
5776 list_for_each_entry(b, &hdev->whitelist, list) {
5777 struct hci_conn *conn;
5779 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5780 if (!conn)
5781 return true;
5783 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5784 return true;
5787 return false;
5790 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5792 u8 scan;
5794 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5795 return;
5797 if (!hdev_is_powered(hdev))
5798 return;
5800 if (mgmt_powering_down(hdev))
5801 return;
5803 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5804 disconnected_whitelist_entries(hdev))
5805 scan = SCAN_PAGE;
5806 else
5807 scan = SCAN_DISABLED;
5809 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5810 return;
5812 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5813 scan |= SCAN_INQUIRY;
5815 if (req)
5816 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5817 else
5818 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);