drm/i915: move check of intel_crtc_cursor_set_obj() out
[linux-2.6/btrfs-unstable.git] / net / bluetooth / hci_core.c
blob1d9c29a00568d9b6ac3eea93848ec6637e5a5a5d
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "smp.h"
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ----- HCI requests ----- */
59 #define HCI_REQ_DONE 0
60 #define HCI_REQ_PEND 1
61 #define HCI_REQ_CANCELED 2
63 #define hci_req_lock(d) mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
66 /* ---- HCI notifications ---- */
68 static void hci_notify(struct hci_dev *hdev, int event)
70 hci_sock_dev_event(hdev, event);
73 /* ---- HCI debugfs entries ---- */
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108 return -EALREADY;
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
125 if (err < 0)
126 return err;
128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
130 return count;
133 static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
140 static int features_show(struct seq_file *f, void *ptr)
142 struct hci_dev *hdev = f->private;
143 u8 p;
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
161 hci_dev_unlock(hdev);
163 return 0;
166 static int features_open(struct inode *inode, struct file *file)
168 return single_open(file, features_show, inode->i_private);
171 static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
178 static int blacklist_show(struct seq_file *f, void *p)
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186 hci_dev_unlock(hdev);
188 return 0;
191 static int blacklist_open(struct inode *inode, struct file *file)
193 return single_open(file, blacklist_show, inode->i_private);
196 static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
203 static int whitelist_show(struct seq_file *f, void *p)
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
213 return 0;
216 static int whitelist_open(struct inode *inode, struct file *file)
218 return single_open(file, whitelist_show, inode->i_private);
221 static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
228 static int uuids_show(struct seq_file *f, void *p)
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
235 u8 i, val[16];
237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
244 seq_printf(f, "%pUb\n", val);
246 hci_dev_unlock(hdev);
248 return 0;
251 static int uuids_open(struct inode *inode, struct file *file)
253 return single_open(file, uuids_show, inode->i_private);
256 static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
263 static int inquiry_cache_show(struct seq_file *f, void *p)
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
269 hci_dev_lock(hdev);
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
282 hci_dev_unlock(hdev);
284 return 0;
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
289 return single_open(file, inquiry_cache_show, inode->i_private);
292 static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
299 static int link_keys_show(struct seq_file *f, void *ptr)
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
310 hci_dev_unlock(hdev);
312 return 0;
315 static int link_keys_open(struct inode *inode, struct file *file)
317 return single_open(file, link_keys_show, inode->i_private);
320 static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
327 static int dev_class_show(struct seq_file *f, void *ptr)
329 struct hci_dev *hdev = f->private;
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
336 return 0;
339 static int dev_class_open(struct inode *inode, struct file *file)
341 return single_open(file, dev_class_show, inode->i_private);
344 static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
351 static int voice_setting_get(void *data, u64 *val)
353 struct hci_dev *hdev = data;
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
359 return 0;
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
365 static int auto_accept_delay_set(void *data, u64 val)
367 struct hci_dev *hdev = data;
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
373 return 0;
376 static int auto_accept_delay_get(void *data, u64 *val)
378 struct hci_dev *hdev = data;
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
384 return 0;
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
402 static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
422 return -EALREADY;
424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
426 return count;
429 static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
448 static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
454 static int idle_timeout_set(void *data, u64 val)
456 struct hci_dev *hdev = data;
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
461 hci_dev_lock(hdev);
462 hdev->idle_timeout = val;
463 hci_dev_unlock(hdev);
465 return 0;
468 static int idle_timeout_get(void *data, u64 *val)
470 struct hci_dev *hdev = data;
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
476 return 0;
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
482 static int rpa_timeout_set(void *data, u64 val)
484 struct hci_dev *hdev = data;
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
496 return 0;
499 static int rpa_timeout_get(void *data, u64 *val)
501 struct hci_dev *hdev = data;
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
507 return 0;
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
513 static int sniff_min_interval_set(void *data, u64 val)
515 struct hci_dev *hdev = data;
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
520 hci_dev_lock(hdev);
521 hdev->sniff_min_interval = val;
522 hci_dev_unlock(hdev);
524 return 0;
527 static int sniff_min_interval_get(void *data, u64 *val)
529 struct hci_dev *hdev = data;
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
535 return 0;
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
541 static int sniff_max_interval_set(void *data, u64 val)
543 struct hci_dev *hdev = data;
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
548 hci_dev_lock(hdev);
549 hdev->sniff_max_interval = val;
550 hci_dev_unlock(hdev);
552 return 0;
555 static int sniff_max_interval_get(void *data, u64 *val)
557 struct hci_dev *hdev = data;
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
563 return 0;
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
569 static int conn_info_min_age_set(void *data, u64 val)
571 struct hci_dev *hdev = data;
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
580 return 0;
583 static int conn_info_min_age_get(void *data, u64 *val)
585 struct hci_dev *hdev = data;
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
591 return 0;
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
597 static int conn_info_max_age_set(void *data, u64 val)
599 struct hci_dev *hdev = data;
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
608 return 0;
611 static int conn_info_max_age_get(void *data, u64 *val)
613 struct hci_dev *hdev = data;
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
619 return 0;
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
625 static int identity_show(struct seq_file *f, void *p)
627 struct hci_dev *hdev = f->private;
628 bdaddr_t addr;
629 u8 addr_type;
631 hci_dev_lock(hdev);
633 hci_copy_identity_address(hdev, &addr, &addr_type);
635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636 16, hdev->irk, &hdev->rpa);
638 hci_dev_unlock(hdev);
640 return 0;
643 static int identity_open(struct inode *inode, struct file *file)
645 return single_open(file, identity_show, inode->i_private);
648 static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
655 static int random_address_show(struct seq_file *f, void *p)
657 struct hci_dev *hdev = f->private;
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
663 return 0;
666 static int random_address_open(struct inode *inode, struct file *file)
668 return single_open(file, random_address_show, inode->i_private);
671 static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
678 static int static_address_show(struct seq_file *f, void *p)
680 struct hci_dev *hdev = f->private;
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
686 return 0;
689 static int static_address_open(struct inode *inode, struct file *file)
691 return single_open(file, static_address_show, inode->i_private);
694 static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
701 static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
714 static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
731 return -EINVAL;
733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
734 return -EALREADY;
736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
738 return count;
741 static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
748 static int white_list_show(struct seq_file *f, void *ptr)
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
758 return 0;
761 static int white_list_open(struct inode *inode, struct file *file)
763 return single_open(file, white_list_show, inode->i_private);
766 static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
785 hci_dev_unlock(hdev);
787 return 0;
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
796 static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
808 hci_dev_lock(hdev);
809 list_for_each_safe(p, n, &hdev->long_term_keys) {
810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814 __le64_to_cpu(ltk->rand), 16, ltk->val);
816 hci_dev_unlock(hdev);
818 return 0;
821 static int long_term_keys_open(struct inode *inode, struct file *file)
823 return single_open(file, long_term_keys_show, inode->i_private);
826 static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
833 static int conn_min_interval_set(void *data, u64 val)
835 struct hci_dev *hdev = data;
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
840 hci_dev_lock(hdev);
841 hdev->le_conn_min_interval = val;
842 hci_dev_unlock(hdev);
844 return 0;
847 static int conn_min_interval_get(void *data, u64 *val)
849 struct hci_dev *hdev = data;
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
855 return 0;
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
861 static int conn_max_interval_set(void *data, u64 val)
863 struct hci_dev *hdev = data;
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
868 hci_dev_lock(hdev);
869 hdev->le_conn_max_interval = val;
870 hci_dev_unlock(hdev);
872 return 0;
875 static int conn_max_interval_get(void *data, u64 *val)
877 struct hci_dev *hdev = data;
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
883 return 0;
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
889 static int conn_latency_set(void *data, u64 val)
891 struct hci_dev *hdev = data;
893 if (val > 0x01f3)
894 return -EINVAL;
896 hci_dev_lock(hdev);
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
900 return 0;
903 static int conn_latency_get(void *data, u64 *val)
905 struct hci_dev *hdev = data;
907 hci_dev_lock(hdev);
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
911 return 0;
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
917 static int supervision_timeout_set(void *data, u64 val)
919 struct hci_dev *hdev = data;
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
928 return 0;
931 static int supervision_timeout_get(void *data, u64 *val)
933 struct hci_dev *hdev = data;
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
939 return 0;
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
945 static int adv_channel_map_set(void *data, u64 val)
947 struct hci_dev *hdev = data;
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
956 return 0;
959 static int adv_channel_map_get(void *data, u64 *val)
961 struct hci_dev *hdev = data;
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
967 return 0;
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
973 static int adv_min_interval_set(void *data, u64 val)
975 struct hci_dev *hdev = data;
977 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
978 return -EINVAL;
980 hci_dev_lock(hdev);
981 hdev->le_adv_min_interval = val;
982 hci_dev_unlock(hdev);
984 return 0;
987 static int adv_min_interval_get(void *data, u64 *val)
989 struct hci_dev *hdev = data;
991 hci_dev_lock(hdev);
992 *val = hdev->le_adv_min_interval;
993 hci_dev_unlock(hdev);
995 return 0;
998 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999 adv_min_interval_set, "%llu\n");
1001 static int adv_max_interval_set(void *data, u64 val)
1003 struct hci_dev *hdev = data;
1005 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1006 return -EINVAL;
1008 hci_dev_lock(hdev);
1009 hdev->le_adv_max_interval = val;
1010 hci_dev_unlock(hdev);
1012 return 0;
1015 static int adv_max_interval_get(void *data, u64 *val)
1017 struct hci_dev *hdev = data;
1019 hci_dev_lock(hdev);
1020 *val = hdev->le_adv_max_interval;
1021 hci_dev_unlock(hdev);
1023 return 0;
1026 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027 adv_max_interval_set, "%llu\n");
1029 static int device_list_show(struct seq_file *f, void *ptr)
1031 struct hci_dev *hdev = f->private;
1032 struct hci_conn_params *p;
1034 hci_dev_lock(hdev);
1035 list_for_each_entry(p, &hdev->le_conn_params, list) {
1036 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
1037 p->auto_connect);
1039 hci_dev_unlock(hdev);
1041 return 0;
1044 static int device_list_open(struct inode *inode, struct file *file)
1046 return single_open(file, device_list_show, inode->i_private);
1049 static const struct file_operations device_list_fops = {
1050 .open = device_list_open,
1051 .read = seq_read,
1052 .llseek = seq_lseek,
1053 .release = single_release,
1056 /* ---- HCI requests ---- */
1058 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1060 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1062 if (hdev->req_status == HCI_REQ_PEND) {
1063 hdev->req_result = result;
1064 hdev->req_status = HCI_REQ_DONE;
1065 wake_up_interruptible(&hdev->req_wait_q);
1069 static void hci_req_cancel(struct hci_dev *hdev, int err)
1071 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1073 if (hdev->req_status == HCI_REQ_PEND) {
1074 hdev->req_result = err;
1075 hdev->req_status = HCI_REQ_CANCELED;
1076 wake_up_interruptible(&hdev->req_wait_q);
1080 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1081 u8 event)
1083 struct hci_ev_cmd_complete *ev;
1084 struct hci_event_hdr *hdr;
1085 struct sk_buff *skb;
1087 hci_dev_lock(hdev);
1089 skb = hdev->recv_evt;
1090 hdev->recv_evt = NULL;
1092 hci_dev_unlock(hdev);
1094 if (!skb)
1095 return ERR_PTR(-ENODATA);
1097 if (skb->len < sizeof(*hdr)) {
1098 BT_ERR("Too short HCI event");
1099 goto failed;
1102 hdr = (void *) skb->data;
1103 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1105 if (event) {
1106 if (hdr->evt != event)
1107 goto failed;
1108 return skb;
1111 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1113 goto failed;
1116 if (skb->len < sizeof(*ev)) {
1117 BT_ERR("Too short cmd_complete event");
1118 goto failed;
1121 ev = (void *) skb->data;
1122 skb_pull(skb, sizeof(*ev));
1124 if (opcode == __le16_to_cpu(ev->opcode))
1125 return skb;
1127 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128 __le16_to_cpu(ev->opcode));
1130 failed:
1131 kfree_skb(skb);
1132 return ERR_PTR(-ENODATA);
1135 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1136 const void *param, u8 event, u32 timeout)
1138 DECLARE_WAITQUEUE(wait, current);
1139 struct hci_request req;
1140 int err = 0;
1142 BT_DBG("%s", hdev->name);
1144 hci_req_init(&req, hdev);
1146 hci_req_add_ev(&req, opcode, plen, param, event);
1148 hdev->req_status = HCI_REQ_PEND;
1150 err = hci_req_run(&req, hci_req_sync_complete);
1151 if (err < 0)
1152 return ERR_PTR(err);
1154 add_wait_queue(&hdev->req_wait_q, &wait);
1155 set_current_state(TASK_INTERRUPTIBLE);
1157 schedule_timeout(timeout);
1159 remove_wait_queue(&hdev->req_wait_q, &wait);
1161 if (signal_pending(current))
1162 return ERR_PTR(-EINTR);
1164 switch (hdev->req_status) {
1165 case HCI_REQ_DONE:
1166 err = -bt_to_errno(hdev->req_result);
1167 break;
1169 case HCI_REQ_CANCELED:
1170 err = -hdev->req_result;
1171 break;
1173 default:
1174 err = -ETIMEDOUT;
1175 break;
1178 hdev->req_status = hdev->req_result = 0;
1180 BT_DBG("%s end: err %d", hdev->name, err);
1182 if (err < 0)
1183 return ERR_PTR(err);
1185 return hci_get_cmd_complete(hdev, opcode, event);
1187 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1189 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1190 const void *param, u32 timeout)
1192 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1194 EXPORT_SYMBOL(__hci_cmd_sync);
1196 /* Execute request and wait for completion. */
1197 static int __hci_req_sync(struct hci_dev *hdev,
1198 void (*func)(struct hci_request *req,
1199 unsigned long opt),
1200 unsigned long opt, __u32 timeout)
1202 struct hci_request req;
1203 DECLARE_WAITQUEUE(wait, current);
1204 int err = 0;
1206 BT_DBG("%s start", hdev->name);
1208 hci_req_init(&req, hdev);
1210 hdev->req_status = HCI_REQ_PEND;
1212 func(&req, opt);
1214 err = hci_req_run(&req, hci_req_sync_complete);
1215 if (err < 0) {
1216 hdev->req_status = 0;
1218 /* ENODATA means the HCI request command queue is empty.
1219 * This can happen when a request with conditionals doesn't
1220 * trigger any commands to be sent. This is normal behavior
1221 * and should not trigger an error return.
1223 if (err == -ENODATA)
1224 return 0;
1226 return err;
1229 add_wait_queue(&hdev->req_wait_q, &wait);
1230 set_current_state(TASK_INTERRUPTIBLE);
1232 schedule_timeout(timeout);
1234 remove_wait_queue(&hdev->req_wait_q, &wait);
1236 if (signal_pending(current))
1237 return -EINTR;
1239 switch (hdev->req_status) {
1240 case HCI_REQ_DONE:
1241 err = -bt_to_errno(hdev->req_result);
1242 break;
1244 case HCI_REQ_CANCELED:
1245 err = -hdev->req_result;
1246 break;
1248 default:
1249 err = -ETIMEDOUT;
1250 break;
1253 hdev->req_status = hdev->req_result = 0;
1255 BT_DBG("%s end: err %d", hdev->name, err);
1257 return err;
1260 static int hci_req_sync(struct hci_dev *hdev,
1261 void (*req)(struct hci_request *req,
1262 unsigned long opt),
1263 unsigned long opt, __u32 timeout)
1265 int ret;
1267 if (!test_bit(HCI_UP, &hdev->flags))
1268 return -ENETDOWN;
1270 /* Serialize all requests */
1271 hci_req_lock(hdev);
1272 ret = __hci_req_sync(hdev, req, opt, timeout);
1273 hci_req_unlock(hdev);
1275 return ret;
1278 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1280 BT_DBG("%s %ld", req->hdev->name, opt);
1282 /* Reset device */
1283 set_bit(HCI_RESET, &req->hdev->flags);
1284 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1287 static void bredr_init(struct hci_request *req)
1289 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1291 /* Read Local Supported Features */
1292 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1294 /* Read Local Version */
1295 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1297 /* Read BD Address */
1298 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1301 static void amp_init(struct hci_request *req)
1303 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1305 /* Read Local Version */
1306 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1308 /* Read Local Supported Commands */
1309 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1311 /* Read Local Supported Features */
1312 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1314 /* Read Local AMP Info */
1315 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1317 /* Read Data Blk size */
1318 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1320 /* Read Flow Control Mode */
1321 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1323 /* Read Location Data */
1324 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1327 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1329 struct hci_dev *hdev = req->hdev;
1331 BT_DBG("%s %ld", hdev->name, opt);
1333 /* Reset */
1334 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1335 hci_reset_req(req, 0);
1337 switch (hdev->dev_type) {
1338 case HCI_BREDR:
1339 bredr_init(req);
1340 break;
1342 case HCI_AMP:
1343 amp_init(req);
1344 break;
1346 default:
1347 BT_ERR("Unknown device type %d", hdev->dev_type);
1348 break;
1352 static void bredr_setup(struct hci_request *req)
1354 struct hci_dev *hdev = req->hdev;
1356 __le16 param;
1357 __u8 flt_type;
1359 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1360 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1362 /* Read Class of Device */
1363 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1365 /* Read Local Name */
1366 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1368 /* Read Voice Setting */
1369 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1371 /* Read Number of Supported IAC */
1372 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1374 /* Read Current IAC LAP */
1375 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1377 /* Clear Event Filters */
1378 flt_type = HCI_FLT_CLEAR_ALL;
1379 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1381 /* Connection accept timeout ~20 secs */
1382 param = cpu_to_le16(0x7d00);
1383 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1385 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1386 * but it does not support page scan related HCI commands.
1388 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1389 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1390 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1394 static void le_setup(struct hci_request *req)
1396 struct hci_dev *hdev = req->hdev;
1398 /* Read LE Buffer Size */
1399 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1401 /* Read LE Local Supported Features */
1402 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1404 /* Read LE Supported States */
1405 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1407 /* Read LE White List Size */
1408 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1410 /* Clear LE White List */
1411 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1413 /* LE-only controllers have LE implicitly enabled */
1414 if (!lmp_bredr_capable(hdev))
1415 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1418 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1420 if (lmp_ext_inq_capable(hdev))
1421 return 0x02;
1423 if (lmp_inq_rssi_capable(hdev))
1424 return 0x01;
1426 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1427 hdev->lmp_subver == 0x0757)
1428 return 0x01;
1430 if (hdev->manufacturer == 15) {
1431 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1432 return 0x01;
1433 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1434 return 0x01;
1435 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1436 return 0x01;
1439 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1440 hdev->lmp_subver == 0x1805)
1441 return 0x01;
1443 return 0x00;
1446 static void hci_setup_inquiry_mode(struct hci_request *req)
1448 u8 mode;
1450 mode = hci_get_inquiry_mode(req->hdev);
1452 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1455 static void hci_setup_event_mask(struct hci_request *req)
1457 struct hci_dev *hdev = req->hdev;
1459 /* The second byte is 0xff instead of 0x9f (two reserved bits
1460 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1461 * command otherwise.
1463 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1465 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1466 * any event mask for pre 1.2 devices.
1468 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1469 return;
1471 if (lmp_bredr_capable(hdev)) {
1472 events[4] |= 0x01; /* Flow Specification Complete */
1473 events[4] |= 0x02; /* Inquiry Result with RSSI */
1474 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1475 events[5] |= 0x08; /* Synchronous Connection Complete */
1476 events[5] |= 0x10; /* Synchronous Connection Changed */
1477 } else {
1478 /* Use a different default for LE-only devices */
1479 memset(events, 0, sizeof(events));
1480 events[0] |= 0x10; /* Disconnection Complete */
1481 events[1] |= 0x08; /* Read Remote Version Information Complete */
1482 events[1] |= 0x20; /* Command Complete */
1483 events[1] |= 0x40; /* Command Status */
1484 events[1] |= 0x80; /* Hardware Error */
1485 events[2] |= 0x04; /* Number of Completed Packets */
1486 events[3] |= 0x02; /* Data Buffer Overflow */
1488 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1489 events[0] |= 0x80; /* Encryption Change */
1490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1494 if (lmp_inq_rssi_capable(hdev))
1495 events[4] |= 0x02; /* Inquiry Result with RSSI */
1497 if (lmp_sniffsubr_capable(hdev))
1498 events[5] |= 0x20; /* Sniff Subrating */
1500 if (lmp_pause_enc_capable(hdev))
1501 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1503 if (lmp_ext_inq_capable(hdev))
1504 events[5] |= 0x40; /* Extended Inquiry Result */
1506 if (lmp_no_flush_capable(hdev))
1507 events[7] |= 0x01; /* Enhanced Flush Complete */
1509 if (lmp_lsto_capable(hdev))
1510 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1512 if (lmp_ssp_capable(hdev)) {
1513 events[6] |= 0x01; /* IO Capability Request */
1514 events[6] |= 0x02; /* IO Capability Response */
1515 events[6] |= 0x04; /* User Confirmation Request */
1516 events[6] |= 0x08; /* User Passkey Request */
1517 events[6] |= 0x10; /* Remote OOB Data Request */
1518 events[6] |= 0x20; /* Simple Pairing Complete */
1519 events[7] |= 0x04; /* User Passkey Notification */
1520 events[7] |= 0x08; /* Keypress Notification */
1521 events[7] |= 0x10; /* Remote Host Supported
1522 * Features Notification
1526 if (lmp_le_capable(hdev))
1527 events[7] |= 0x20; /* LE Meta-Event */
1529 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1532 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1534 struct hci_dev *hdev = req->hdev;
1536 if (lmp_bredr_capable(hdev))
1537 bredr_setup(req);
1538 else
1539 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1541 if (lmp_le_capable(hdev))
1542 le_setup(req);
1544 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1545 * local supported commands HCI command.
1547 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1548 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1550 if (lmp_ssp_capable(hdev)) {
1551 /* When SSP is available, then the host features page
1552 * should also be available as well. However some
1553 * controllers list the max_page as 0 as long as SSP
1554 * has not been enabled. To achieve proper debugging
1555 * output, force the minimum max_page to 1 at least.
1557 hdev->max_page = 0x01;
1559 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1560 u8 mode = 0x01;
1561 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1562 sizeof(mode), &mode);
1563 } else {
1564 struct hci_cp_write_eir cp;
1566 memset(hdev->eir, 0, sizeof(hdev->eir));
1567 memset(&cp, 0, sizeof(cp));
1569 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1573 if (lmp_inq_rssi_capable(hdev))
1574 hci_setup_inquiry_mode(req);
1576 if (lmp_inq_tx_pwr_capable(hdev))
1577 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1579 if (lmp_ext_feat_capable(hdev)) {
1580 struct hci_cp_read_local_ext_features cp;
1582 cp.page = 0x01;
1583 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1584 sizeof(cp), &cp);
1587 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1588 u8 enable = 1;
1589 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1590 &enable);
1594 static void hci_setup_link_policy(struct hci_request *req)
1596 struct hci_dev *hdev = req->hdev;
1597 struct hci_cp_write_def_link_policy cp;
1598 u16 link_policy = 0;
1600 if (lmp_rswitch_capable(hdev))
1601 link_policy |= HCI_LP_RSWITCH;
1602 if (lmp_hold_capable(hdev))
1603 link_policy |= HCI_LP_HOLD;
1604 if (lmp_sniff_capable(hdev))
1605 link_policy |= HCI_LP_SNIFF;
1606 if (lmp_park_capable(hdev))
1607 link_policy |= HCI_LP_PARK;
1609 cp.policy = cpu_to_le16(link_policy);
1610 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1613 static void hci_set_le_support(struct hci_request *req)
1615 struct hci_dev *hdev = req->hdev;
1616 struct hci_cp_write_le_host_supported cp;
1618 /* LE-only devices do not support explicit enablement */
1619 if (!lmp_bredr_capable(hdev))
1620 return;
1622 memset(&cp, 0, sizeof(cp));
1624 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1625 cp.le = 0x01;
1626 cp.simul = 0x00;
1629 if (cp.le != lmp_host_le_capable(hdev))
1630 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1631 &cp);
1634 static void hci_set_event_mask_page_2(struct hci_request *req)
1636 struct hci_dev *hdev = req->hdev;
1637 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1639 /* If Connectionless Slave Broadcast master role is supported
1640 * enable all necessary events for it.
1642 if (lmp_csb_master_capable(hdev)) {
1643 events[1] |= 0x40; /* Triggered Clock Capture */
1644 events[1] |= 0x80; /* Synchronization Train Complete */
1645 events[2] |= 0x10; /* Slave Page Response Timeout */
1646 events[2] |= 0x20; /* CSB Channel Map Change */
1649 /* If Connectionless Slave Broadcast slave role is supported
1650 * enable all necessary events for it.
1652 if (lmp_csb_slave_capable(hdev)) {
1653 events[2] |= 0x01; /* Synchronization Train Received */
1654 events[2] |= 0x02; /* CSB Receive */
1655 events[2] |= 0x04; /* CSB Timeout */
1656 events[2] |= 0x08; /* Truncated Page Complete */
1659 /* Enable Authenticated Payload Timeout Expired event if supported */
1660 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1661 events[2] |= 0x80;
1663 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1666 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1668 struct hci_dev *hdev = req->hdev;
1669 u8 p;
1671 hci_setup_event_mask(req);
1673 /* Some Broadcom based Bluetooth controllers do not support the
1674 * Delete Stored Link Key command. They are clearly indicating its
1675 * absence in the bit mask of supported commands.
1677 * Check the supported commands and only if the the command is marked
1678 * as supported send it. If not supported assume that the controller
1679 * does not have actual support for stored link keys which makes this
1680 * command redundant anyway.
1682 * Some controllers indicate that they support handling deleting
1683 * stored link keys, but they don't. The quirk lets a driver
1684 * just disable this command.
1686 if (hdev->commands[6] & 0x80 &&
1687 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1688 struct hci_cp_delete_stored_link_key cp;
1690 bacpy(&cp.bdaddr, BDADDR_ANY);
1691 cp.delete_all = 0x01;
1692 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1693 sizeof(cp), &cp);
1696 if (hdev->commands[5] & 0x10)
1697 hci_setup_link_policy(req);
1699 if (lmp_le_capable(hdev)) {
1700 u8 events[8];
1702 memset(events, 0, sizeof(events));
1703 events[0] = 0x0f;
1705 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1706 events[0] |= 0x10; /* LE Long Term Key Request */
1708 /* If controller supports the Connection Parameters Request
1709 * Link Layer Procedure, enable the corresponding event.
1711 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1712 events[0] |= 0x20; /* LE Remote Connection
1713 * Parameter Request
1716 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1717 events);
1719 if (hdev->commands[25] & 0x40) {
1720 /* Read LE Advertising Channel TX Power */
1721 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1724 hci_set_le_support(req);
1727 /* Read features beyond page 1 if available */
1728 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1729 struct hci_cp_read_local_ext_features cp;
1731 cp.page = p;
1732 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1733 sizeof(cp), &cp);
1737 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1739 struct hci_dev *hdev = req->hdev;
1741 /* Set event mask page 2 if the HCI command for it is supported */
1742 if (hdev->commands[22] & 0x04)
1743 hci_set_event_mask_page_2(req);
1745 /* Read local codec list if the HCI command is supported */
1746 if (hdev->commands[29] & 0x20)
1747 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1749 /* Get MWS transport configuration if the HCI command is supported */
1750 if (hdev->commands[30] & 0x08)
1751 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1753 /* Check for Synchronization Train support */
1754 if (lmp_sync_train_capable(hdev))
1755 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1757 /* Enable Secure Connections if supported and configured */
1758 if ((lmp_sc_capable(hdev) ||
1759 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1760 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1761 u8 support = 0x01;
1762 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1763 sizeof(support), &support);
1767 static int __hci_init(struct hci_dev *hdev)
1769 int err;
1771 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1772 if (err < 0)
1773 return err;
1775 /* The Device Under Test (DUT) mode is special and available for
1776 * all controller types. So just create it early on.
1778 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1779 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1780 &dut_mode_fops);
1783 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1784 * BR/EDR/LE type controllers. AMP controllers only need the
1785 * first stage init.
1787 if (hdev->dev_type != HCI_BREDR)
1788 return 0;
1790 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1791 if (err < 0)
1792 return err;
1794 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1795 if (err < 0)
1796 return err;
1798 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1799 if (err < 0)
1800 return err;
1802 /* Only create debugfs entries during the initial setup
1803 * phase and not every time the controller gets powered on.
1805 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1806 return 0;
1808 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1809 &features_fops);
1810 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1811 &hdev->manufacturer);
1812 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1813 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1814 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1815 &blacklist_fops);
1816 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1817 &whitelist_fops);
1818 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1820 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1821 &conn_info_min_age_fops);
1822 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1823 &conn_info_max_age_fops);
1825 if (lmp_bredr_capable(hdev)) {
1826 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1827 hdev, &inquiry_cache_fops);
1828 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1829 hdev, &link_keys_fops);
1830 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1831 hdev, &dev_class_fops);
1832 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1833 hdev, &voice_setting_fops);
1836 if (lmp_ssp_capable(hdev)) {
1837 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1838 hdev, &auto_accept_delay_fops);
1839 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1840 hdev, &force_sc_support_fops);
1841 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1842 hdev, &sc_only_mode_fops);
1845 if (lmp_sniff_capable(hdev)) {
1846 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1847 hdev, &idle_timeout_fops);
1848 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1849 hdev, &sniff_min_interval_fops);
1850 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1851 hdev, &sniff_max_interval_fops);
1854 if (lmp_le_capable(hdev)) {
1855 debugfs_create_file("identity", 0400, hdev->debugfs,
1856 hdev, &identity_fops);
1857 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1858 hdev, &rpa_timeout_fops);
1859 debugfs_create_file("random_address", 0444, hdev->debugfs,
1860 hdev, &random_address_fops);
1861 debugfs_create_file("static_address", 0444, hdev->debugfs,
1862 hdev, &static_address_fops);
1864 /* For controllers with a public address, provide a debug
1865 * option to force the usage of the configured static
1866 * address. By default the public address is used.
1868 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1869 debugfs_create_file("force_static_address", 0644,
1870 hdev->debugfs, hdev,
1871 &force_static_address_fops);
1873 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1874 &hdev->le_white_list_size);
1875 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1876 &white_list_fops);
1877 debugfs_create_file("identity_resolving_keys", 0400,
1878 hdev->debugfs, hdev,
1879 &identity_resolving_keys_fops);
1880 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1881 hdev, &long_term_keys_fops);
1882 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1883 hdev, &conn_min_interval_fops);
1884 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1885 hdev, &conn_max_interval_fops);
1886 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1887 hdev, &conn_latency_fops);
1888 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1889 hdev, &supervision_timeout_fops);
1890 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1891 hdev, &adv_channel_map_fops);
1892 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1893 hdev, &adv_min_interval_fops);
1894 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1895 hdev, &adv_max_interval_fops);
1896 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1897 &device_list_fops);
1898 debugfs_create_u16("discov_interleaved_timeout", 0644,
1899 hdev->debugfs,
1900 &hdev->discov_interleaved_timeout);
1903 return 0;
1906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1908 struct hci_dev *hdev = req->hdev;
1910 BT_DBG("%s %ld", hdev->name, opt);
1912 /* Reset */
1913 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1914 hci_reset_req(req, 0);
1916 /* Read Local Version */
1917 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1919 /* Read BD Address */
1920 if (hdev->set_bdaddr)
1921 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1924 static int __hci_unconf_init(struct hci_dev *hdev)
1926 int err;
1928 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1929 return 0;
1931 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1932 if (err < 0)
1933 return err;
1935 return 0;
1938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1940 __u8 scan = opt;
1942 BT_DBG("%s %x", req->hdev->name, scan);
1944 /* Inquiry and Page scans */
1945 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1950 __u8 auth = opt;
1952 BT_DBG("%s %x", req->hdev->name, auth);
1954 /* Authentication */
1955 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1960 __u8 encrypt = opt;
1962 BT_DBG("%s %x", req->hdev->name, encrypt);
1964 /* Encryption */
1965 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1970 __le16 policy = cpu_to_le16(opt);
1972 BT_DBG("%s %x", req->hdev->name, policy);
1974 /* Default link policy */
1975 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1978 /* Get HCI device by index.
1979 * Device is held on return. */
1980 struct hci_dev *hci_dev_get(int index)
1982 struct hci_dev *hdev = NULL, *d;
1984 BT_DBG("%d", index);
1986 if (index < 0)
1987 return NULL;
1989 read_lock(&hci_dev_list_lock);
1990 list_for_each_entry(d, &hci_dev_list, list) {
1991 if (d->id == index) {
1992 hdev = hci_dev_hold(d);
1993 break;
1996 read_unlock(&hci_dev_list_lock);
1997 return hdev;
2000 /* ---- Inquiry support ---- */
2002 bool hci_discovery_active(struct hci_dev *hdev)
2004 struct discovery_state *discov = &hdev->discovery;
2006 switch (discov->state) {
2007 case DISCOVERY_FINDING:
2008 case DISCOVERY_RESOLVING:
2009 return true;
2011 default:
2012 return false;
2016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2018 int old_state = hdev->discovery.state;
2020 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2022 if (old_state == state)
2023 return;
2025 hdev->discovery.state = state;
2027 switch (state) {
2028 case DISCOVERY_STOPPED:
2029 hci_update_background_scan(hdev);
2031 if (old_state != DISCOVERY_STARTING)
2032 mgmt_discovering(hdev, 0);
2033 break;
2034 case DISCOVERY_STARTING:
2035 break;
2036 case DISCOVERY_FINDING:
2037 mgmt_discovering(hdev, 1);
2038 break;
2039 case DISCOVERY_RESOLVING:
2040 break;
2041 case DISCOVERY_STOPPING:
2042 break;
2046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2048 struct discovery_state *cache = &hdev->discovery;
2049 struct inquiry_entry *p, *n;
2051 list_for_each_entry_safe(p, n, &cache->all, all) {
2052 list_del(&p->all);
2053 kfree(p);
2056 INIT_LIST_HEAD(&cache->unknown);
2057 INIT_LIST_HEAD(&cache->resolve);
2060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2061 bdaddr_t *bdaddr)
2063 struct discovery_state *cache = &hdev->discovery;
2064 struct inquiry_entry *e;
2066 BT_DBG("cache %p, %pMR", cache, bdaddr);
2068 list_for_each_entry(e, &cache->all, all) {
2069 if (!bacmp(&e->data.bdaddr, bdaddr))
2070 return e;
2073 return NULL;
2076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2077 bdaddr_t *bdaddr)
2079 struct discovery_state *cache = &hdev->discovery;
2080 struct inquiry_entry *e;
2082 BT_DBG("cache %p, %pMR", cache, bdaddr);
2084 list_for_each_entry(e, &cache->unknown, list) {
2085 if (!bacmp(&e->data.bdaddr, bdaddr))
2086 return e;
2089 return NULL;
2092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2093 bdaddr_t *bdaddr,
2094 int state)
2096 struct discovery_state *cache = &hdev->discovery;
2097 struct inquiry_entry *e;
2099 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2101 list_for_each_entry(e, &cache->resolve, list) {
2102 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2103 return e;
2104 if (!bacmp(&e->data.bdaddr, bdaddr))
2105 return e;
2108 return NULL;
2111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2112 struct inquiry_entry *ie)
2114 struct discovery_state *cache = &hdev->discovery;
2115 struct list_head *pos = &cache->resolve;
2116 struct inquiry_entry *p;
2118 list_del(&ie->list);
2120 list_for_each_entry(p, &cache->resolve, list) {
2121 if (p->name_state != NAME_PENDING &&
2122 abs(p->data.rssi) >= abs(ie->data.rssi))
2123 break;
2124 pos = &p->list;
2127 list_add(&ie->list, pos);
2130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2131 bool name_known)
2133 struct discovery_state *cache = &hdev->discovery;
2134 struct inquiry_entry *ie;
2135 u32 flags = 0;
2137 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2139 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2141 if (!data->ssp_mode)
2142 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2144 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2145 if (ie) {
2146 if (!ie->data.ssp_mode)
2147 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2149 if (ie->name_state == NAME_NEEDED &&
2150 data->rssi != ie->data.rssi) {
2151 ie->data.rssi = data->rssi;
2152 hci_inquiry_cache_update_resolve(hdev, ie);
2155 goto update;
2158 /* Entry not in the cache. Add new one. */
2159 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2160 if (!ie) {
2161 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2162 goto done;
2165 list_add(&ie->all, &cache->all);
2167 if (name_known) {
2168 ie->name_state = NAME_KNOWN;
2169 } else {
2170 ie->name_state = NAME_NOT_KNOWN;
2171 list_add(&ie->list, &cache->unknown);
2174 update:
2175 if (name_known && ie->name_state != NAME_KNOWN &&
2176 ie->name_state != NAME_PENDING) {
2177 ie->name_state = NAME_KNOWN;
2178 list_del(&ie->list);
2181 memcpy(&ie->data, data, sizeof(*data));
2182 ie->timestamp = jiffies;
2183 cache->timestamp = jiffies;
2185 if (ie->name_state == NAME_NOT_KNOWN)
2186 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2188 done:
2189 return flags;
2192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2194 struct discovery_state *cache = &hdev->discovery;
2195 struct inquiry_info *info = (struct inquiry_info *) buf;
2196 struct inquiry_entry *e;
2197 int copied = 0;
2199 list_for_each_entry(e, &cache->all, all) {
2200 struct inquiry_data *data = &e->data;
2202 if (copied >= num)
2203 break;
2205 bacpy(&info->bdaddr, &data->bdaddr);
2206 info->pscan_rep_mode = data->pscan_rep_mode;
2207 info->pscan_period_mode = data->pscan_period_mode;
2208 info->pscan_mode = data->pscan_mode;
2209 memcpy(info->dev_class, data->dev_class, 3);
2210 info->clock_offset = data->clock_offset;
2212 info++;
2213 copied++;
2216 BT_DBG("cache %p, copied %d", cache, copied);
2217 return copied;
2220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2222 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2223 struct hci_dev *hdev = req->hdev;
2224 struct hci_cp_inquiry cp;
2226 BT_DBG("%s", hdev->name);
2228 if (test_bit(HCI_INQUIRY, &hdev->flags))
2229 return;
2231 /* Start Inquiry */
2232 memcpy(&cp.lap, &ir->lap, 3);
2233 cp.length = ir->length;
2234 cp.num_rsp = ir->num_rsp;
2235 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2238 int hci_inquiry(void __user *arg)
2240 __u8 __user *ptr = arg;
2241 struct hci_inquiry_req ir;
2242 struct hci_dev *hdev;
2243 int err = 0, do_inquiry = 0, max_rsp;
2244 long timeo;
2245 __u8 *buf;
2247 if (copy_from_user(&ir, ptr, sizeof(ir)))
2248 return -EFAULT;
2250 hdev = hci_dev_get(ir.dev_id);
2251 if (!hdev)
2252 return -ENODEV;
2254 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2255 err = -EBUSY;
2256 goto done;
2259 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2260 err = -EOPNOTSUPP;
2261 goto done;
2264 if (hdev->dev_type != HCI_BREDR) {
2265 err = -EOPNOTSUPP;
2266 goto done;
2269 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2270 err = -EOPNOTSUPP;
2271 goto done;
2274 hci_dev_lock(hdev);
2275 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2276 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2277 hci_inquiry_cache_flush(hdev);
2278 do_inquiry = 1;
2280 hci_dev_unlock(hdev);
2282 timeo = ir.length * msecs_to_jiffies(2000);
2284 if (do_inquiry) {
2285 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2286 timeo);
2287 if (err < 0)
2288 goto done;
2290 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2291 * cleared). If it is interrupted by a signal, return -EINTR.
2293 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2294 TASK_INTERRUPTIBLE))
2295 return -EINTR;
2298 /* for unlimited number of responses we will use buffer with
2299 * 255 entries
2301 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2303 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2304 * copy it to the user space.
2306 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2307 if (!buf) {
2308 err = -ENOMEM;
2309 goto done;
2312 hci_dev_lock(hdev);
2313 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2314 hci_dev_unlock(hdev);
2316 BT_DBG("num_rsp %d", ir.num_rsp);
2318 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2319 ptr += sizeof(ir);
2320 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2321 ir.num_rsp))
2322 err = -EFAULT;
2323 } else
2324 err = -EFAULT;
2326 kfree(buf);
2328 done:
2329 hci_dev_put(hdev);
2330 return err;
2333 static int hci_dev_do_open(struct hci_dev *hdev)
2335 int ret = 0;
2337 BT_DBG("%s %p", hdev->name, hdev);
2339 hci_req_lock(hdev);
2341 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2342 ret = -ENODEV;
2343 goto done;
2346 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2347 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2348 /* Check for rfkill but allow the HCI setup stage to
2349 * proceed (which in itself doesn't cause any RF activity).
2351 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2352 ret = -ERFKILL;
2353 goto done;
2356 /* Check for valid public address or a configured static
2357 * random adddress, but let the HCI setup proceed to
2358 * be able to determine if there is a public address
2359 * or not.
2361 * In case of user channel usage, it is not important
2362 * if a public address or static random address is
2363 * available.
2365 * This check is only valid for BR/EDR controllers
2366 * since AMP controllers do not have an address.
2368 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2369 hdev->dev_type == HCI_BREDR &&
2370 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2371 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2372 ret = -EADDRNOTAVAIL;
2373 goto done;
2377 if (test_bit(HCI_UP, &hdev->flags)) {
2378 ret = -EALREADY;
2379 goto done;
2382 if (hdev->open(hdev)) {
2383 ret = -EIO;
2384 goto done;
2387 atomic_set(&hdev->cmd_cnt, 1);
2388 set_bit(HCI_INIT, &hdev->flags);
2390 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2391 if (hdev->setup)
2392 ret = hdev->setup(hdev);
2394 /* The transport driver can set these quirks before
2395 * creating the HCI device or in its setup callback.
2397 * In case any of them is set, the controller has to
2398 * start up as unconfigured.
2400 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2401 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2402 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2404 /* For an unconfigured controller it is required to
2405 * read at least the version information provided by
2406 * the Read Local Version Information command.
2408 * If the set_bdaddr driver callback is provided, then
2409 * also the original Bluetooth public device address
2410 * will be read using the Read BD Address command.
2412 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2413 ret = __hci_unconf_init(hdev);
2416 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2417 /* If public address change is configured, ensure that
2418 * the address gets programmed. If the driver does not
2419 * support changing the public address, fail the power
2420 * on procedure.
2422 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2423 hdev->set_bdaddr)
2424 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2425 else
2426 ret = -EADDRNOTAVAIL;
2429 if (!ret) {
2430 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2431 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2432 ret = __hci_init(hdev);
2435 clear_bit(HCI_INIT, &hdev->flags);
2437 if (!ret) {
2438 hci_dev_hold(hdev);
2439 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2440 set_bit(HCI_UP, &hdev->flags);
2441 hci_notify(hdev, HCI_DEV_UP);
2442 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2443 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2444 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2445 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2446 hdev->dev_type == HCI_BREDR) {
2447 hci_dev_lock(hdev);
2448 mgmt_powered(hdev, 1);
2449 hci_dev_unlock(hdev);
2451 } else {
2452 /* Init failed, cleanup */
2453 flush_work(&hdev->tx_work);
2454 flush_work(&hdev->cmd_work);
2455 flush_work(&hdev->rx_work);
2457 skb_queue_purge(&hdev->cmd_q);
2458 skb_queue_purge(&hdev->rx_q);
2460 if (hdev->flush)
2461 hdev->flush(hdev);
2463 if (hdev->sent_cmd) {
2464 kfree_skb(hdev->sent_cmd);
2465 hdev->sent_cmd = NULL;
2468 hdev->close(hdev);
2469 hdev->flags &= BIT(HCI_RAW);
2472 done:
2473 hci_req_unlock(hdev);
2474 return ret;
2477 /* ---- HCI ioctl helpers ---- */
2479 int hci_dev_open(__u16 dev)
2481 struct hci_dev *hdev;
2482 int err;
2484 hdev = hci_dev_get(dev);
2485 if (!hdev)
2486 return -ENODEV;
2488 /* Devices that are marked as unconfigured can only be powered
2489 * up as user channel. Trying to bring them up as normal devices
2490 * will result into a failure. Only user channel operation is
2491 * possible.
2493 * When this function is called for a user channel, the flag
2494 * HCI_USER_CHANNEL will be set first before attempting to
2495 * open the device.
2497 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2498 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2499 err = -EOPNOTSUPP;
2500 goto done;
2503 /* We need to ensure that no other power on/off work is pending
2504 * before proceeding to call hci_dev_do_open. This is
2505 * particularly important if the setup procedure has not yet
2506 * completed.
2508 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2509 cancel_delayed_work(&hdev->power_off);
2511 /* After this call it is guaranteed that the setup procedure
2512 * has finished. This means that error conditions like RFKILL
2513 * or no valid public or static random address apply.
2515 flush_workqueue(hdev->req_workqueue);
2517 /* For controllers not using the management interface and that
2518 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2519 * so that pairing works for them. Once the management interface
2520 * is in use this bit will be cleared again and userspace has
2521 * to explicitly enable it.
2523 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2524 !test_bit(HCI_MGMT, &hdev->dev_flags))
2525 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2527 err = hci_dev_do_open(hdev);
2529 done:
2530 hci_dev_put(hdev);
2531 return err;
2534 /* This function requires the caller holds hdev->lock */
2535 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2537 struct hci_conn_params *p;
2539 list_for_each_entry(p, &hdev->le_conn_params, list) {
2540 if (p->conn) {
2541 hci_conn_drop(p->conn);
2542 p->conn = NULL;
2544 list_del_init(&p->action);
2547 BT_DBG("All LE pending actions cleared");
2550 static int hci_dev_do_close(struct hci_dev *hdev)
2552 BT_DBG("%s %p", hdev->name, hdev);
2554 cancel_delayed_work(&hdev->power_off);
2556 hci_req_cancel(hdev, ENODEV);
2557 hci_req_lock(hdev);
2559 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2560 cancel_delayed_work_sync(&hdev->cmd_timer);
2561 hci_req_unlock(hdev);
2562 return 0;
2565 /* Flush RX and TX works */
2566 flush_work(&hdev->tx_work);
2567 flush_work(&hdev->rx_work);
2569 if (hdev->discov_timeout > 0) {
2570 cancel_delayed_work(&hdev->discov_off);
2571 hdev->discov_timeout = 0;
2572 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2573 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2576 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2577 cancel_delayed_work(&hdev->service_cache);
2579 cancel_delayed_work_sync(&hdev->le_scan_disable);
2581 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2582 cancel_delayed_work_sync(&hdev->rpa_expired);
2584 hci_dev_lock(hdev);
2585 hci_inquiry_cache_flush(hdev);
2586 hci_pend_le_actions_clear(hdev);
2587 hci_conn_hash_flush(hdev);
2588 hci_dev_unlock(hdev);
2590 hci_notify(hdev, HCI_DEV_DOWN);
2592 if (hdev->flush)
2593 hdev->flush(hdev);
2595 /* Reset device */
2596 skb_queue_purge(&hdev->cmd_q);
2597 atomic_set(&hdev->cmd_cnt, 1);
2598 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2599 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2600 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2601 set_bit(HCI_INIT, &hdev->flags);
2602 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2603 clear_bit(HCI_INIT, &hdev->flags);
2606 /* flush cmd work */
2607 flush_work(&hdev->cmd_work);
2609 /* Drop queues */
2610 skb_queue_purge(&hdev->rx_q);
2611 skb_queue_purge(&hdev->cmd_q);
2612 skb_queue_purge(&hdev->raw_q);
2614 /* Drop last sent command */
2615 if (hdev->sent_cmd) {
2616 cancel_delayed_work_sync(&hdev->cmd_timer);
2617 kfree_skb(hdev->sent_cmd);
2618 hdev->sent_cmd = NULL;
2621 kfree_skb(hdev->recv_evt);
2622 hdev->recv_evt = NULL;
2624 /* After this point our queues are empty
2625 * and no tasks are scheduled. */
2626 hdev->close(hdev);
2628 /* Clear flags */
2629 hdev->flags &= BIT(HCI_RAW);
2630 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2632 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2633 if (hdev->dev_type == HCI_BREDR) {
2634 hci_dev_lock(hdev);
2635 mgmt_powered(hdev, 0);
2636 hci_dev_unlock(hdev);
2640 /* Controller radio is available but is currently powered down */
2641 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2643 memset(hdev->eir, 0, sizeof(hdev->eir));
2644 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2645 bacpy(&hdev->random_addr, BDADDR_ANY);
2647 hci_req_unlock(hdev);
2649 hci_dev_put(hdev);
2650 return 0;
2653 int hci_dev_close(__u16 dev)
2655 struct hci_dev *hdev;
2656 int err;
2658 hdev = hci_dev_get(dev);
2659 if (!hdev)
2660 return -ENODEV;
2662 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2663 err = -EBUSY;
2664 goto done;
2667 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2668 cancel_delayed_work(&hdev->power_off);
2670 err = hci_dev_do_close(hdev);
2672 done:
2673 hci_dev_put(hdev);
2674 return err;
2677 int hci_dev_reset(__u16 dev)
2679 struct hci_dev *hdev;
2680 int ret = 0;
2682 hdev = hci_dev_get(dev);
2683 if (!hdev)
2684 return -ENODEV;
2686 hci_req_lock(hdev);
2688 if (!test_bit(HCI_UP, &hdev->flags)) {
2689 ret = -ENETDOWN;
2690 goto done;
2693 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2694 ret = -EBUSY;
2695 goto done;
2698 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2699 ret = -EOPNOTSUPP;
2700 goto done;
2703 /* Drop queues */
2704 skb_queue_purge(&hdev->rx_q);
2705 skb_queue_purge(&hdev->cmd_q);
2707 hci_dev_lock(hdev);
2708 hci_inquiry_cache_flush(hdev);
2709 hci_conn_hash_flush(hdev);
2710 hci_dev_unlock(hdev);
2712 if (hdev->flush)
2713 hdev->flush(hdev);
2715 atomic_set(&hdev->cmd_cnt, 1);
2716 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2718 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2720 done:
2721 hci_req_unlock(hdev);
2722 hci_dev_put(hdev);
2723 return ret;
2726 int hci_dev_reset_stat(__u16 dev)
2728 struct hci_dev *hdev;
2729 int ret = 0;
2731 hdev = hci_dev_get(dev);
2732 if (!hdev)
2733 return -ENODEV;
2735 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2736 ret = -EBUSY;
2737 goto done;
2740 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2741 ret = -EOPNOTSUPP;
2742 goto done;
2745 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2747 done:
2748 hci_dev_put(hdev);
2749 return ret;
2752 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2754 bool conn_changed, discov_changed;
2756 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2758 if ((scan & SCAN_PAGE))
2759 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2760 &hdev->dev_flags);
2761 else
2762 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2763 &hdev->dev_flags);
2765 if ((scan & SCAN_INQUIRY)) {
2766 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2767 &hdev->dev_flags);
2768 } else {
2769 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2770 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2771 &hdev->dev_flags);
2774 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2775 return;
2777 if (conn_changed || discov_changed) {
2778 /* In case this was disabled through mgmt */
2779 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2781 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2782 mgmt_update_adv_data(hdev);
2784 mgmt_new_settings(hdev);
2788 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2790 struct hci_dev *hdev;
2791 struct hci_dev_req dr;
2792 int err = 0;
2794 if (copy_from_user(&dr, arg, sizeof(dr)))
2795 return -EFAULT;
2797 hdev = hci_dev_get(dr.dev_id);
2798 if (!hdev)
2799 return -ENODEV;
2801 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2802 err = -EBUSY;
2803 goto done;
2806 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2807 err = -EOPNOTSUPP;
2808 goto done;
2811 if (hdev->dev_type != HCI_BREDR) {
2812 err = -EOPNOTSUPP;
2813 goto done;
2816 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2817 err = -EOPNOTSUPP;
2818 goto done;
2821 switch (cmd) {
2822 case HCISETAUTH:
2823 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2824 HCI_INIT_TIMEOUT);
2825 break;
2827 case HCISETENCRYPT:
2828 if (!lmp_encrypt_capable(hdev)) {
2829 err = -EOPNOTSUPP;
2830 break;
2833 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2834 /* Auth must be enabled first */
2835 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2836 HCI_INIT_TIMEOUT);
2837 if (err)
2838 break;
2841 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2842 HCI_INIT_TIMEOUT);
2843 break;
2845 case HCISETSCAN:
2846 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2847 HCI_INIT_TIMEOUT);
2849 /* Ensure that the connectable and discoverable states
2850 * get correctly modified as this was a non-mgmt change.
2852 if (!err)
2853 hci_update_scan_state(hdev, dr.dev_opt);
2854 break;
2856 case HCISETLINKPOL:
2857 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2858 HCI_INIT_TIMEOUT);
2859 break;
2861 case HCISETLINKMODE:
2862 hdev->link_mode = ((__u16) dr.dev_opt) &
2863 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2864 break;
2866 case HCISETPTYPE:
2867 hdev->pkt_type = (__u16) dr.dev_opt;
2868 break;
2870 case HCISETACLMTU:
2871 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2872 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2873 break;
2875 case HCISETSCOMTU:
2876 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2877 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2878 break;
2880 default:
2881 err = -EINVAL;
2882 break;
2885 done:
2886 hci_dev_put(hdev);
2887 return err;
2890 int hci_get_dev_list(void __user *arg)
2892 struct hci_dev *hdev;
2893 struct hci_dev_list_req *dl;
2894 struct hci_dev_req *dr;
2895 int n = 0, size, err;
2896 __u16 dev_num;
2898 if (get_user(dev_num, (__u16 __user *) arg))
2899 return -EFAULT;
2901 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2902 return -EINVAL;
2904 size = sizeof(*dl) + dev_num * sizeof(*dr);
2906 dl = kzalloc(size, GFP_KERNEL);
2907 if (!dl)
2908 return -ENOMEM;
2910 dr = dl->dev_req;
2912 read_lock(&hci_dev_list_lock);
2913 list_for_each_entry(hdev, &hci_dev_list, list) {
2914 unsigned long flags = hdev->flags;
2916 /* When the auto-off is configured it means the transport
2917 * is running, but in that case still indicate that the
2918 * device is actually down.
2920 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2921 flags &= ~BIT(HCI_UP);
2923 (dr + n)->dev_id = hdev->id;
2924 (dr + n)->dev_opt = flags;
2926 if (++n >= dev_num)
2927 break;
2929 read_unlock(&hci_dev_list_lock);
2931 dl->dev_num = n;
2932 size = sizeof(*dl) + n * sizeof(*dr);
2934 err = copy_to_user(arg, dl, size);
2935 kfree(dl);
2937 return err ? -EFAULT : 0;
2940 int hci_get_dev_info(void __user *arg)
2942 struct hci_dev *hdev;
2943 struct hci_dev_info di;
2944 unsigned long flags;
2945 int err = 0;
2947 if (copy_from_user(&di, arg, sizeof(di)))
2948 return -EFAULT;
2950 hdev = hci_dev_get(di.dev_id);
2951 if (!hdev)
2952 return -ENODEV;
2954 /* When the auto-off is configured it means the transport
2955 * is running, but in that case still indicate that the
2956 * device is actually down.
2958 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2959 flags = hdev->flags & ~BIT(HCI_UP);
2960 else
2961 flags = hdev->flags;
2963 strcpy(di.name, hdev->name);
2964 di.bdaddr = hdev->bdaddr;
2965 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2966 di.flags = flags;
2967 di.pkt_type = hdev->pkt_type;
2968 if (lmp_bredr_capable(hdev)) {
2969 di.acl_mtu = hdev->acl_mtu;
2970 di.acl_pkts = hdev->acl_pkts;
2971 di.sco_mtu = hdev->sco_mtu;
2972 di.sco_pkts = hdev->sco_pkts;
2973 } else {
2974 di.acl_mtu = hdev->le_mtu;
2975 di.acl_pkts = hdev->le_pkts;
2976 di.sco_mtu = 0;
2977 di.sco_pkts = 0;
2979 di.link_policy = hdev->link_policy;
2980 di.link_mode = hdev->link_mode;
2982 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2983 memcpy(&di.features, &hdev->features, sizeof(di.features));
2985 if (copy_to_user(arg, &di, sizeof(di)))
2986 err = -EFAULT;
2988 hci_dev_put(hdev);
2990 return err;
2993 /* ---- Interface to HCI drivers ---- */
2995 static int hci_rfkill_set_block(void *data, bool blocked)
2997 struct hci_dev *hdev = data;
2999 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3001 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3002 return -EBUSY;
3004 if (blocked) {
3005 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3006 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3007 !test_bit(HCI_CONFIG, &hdev->dev_flags))
3008 hci_dev_do_close(hdev);
3009 } else {
3010 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3013 return 0;
3016 static const struct rfkill_ops hci_rfkill_ops = {
3017 .set_block = hci_rfkill_set_block,
3020 static void hci_power_on(struct work_struct *work)
3022 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3023 int err;
3025 BT_DBG("%s", hdev->name);
3027 err = hci_dev_do_open(hdev);
3028 if (err < 0) {
3029 mgmt_set_powered_failed(hdev, err);
3030 return;
3033 /* During the HCI setup phase, a few error conditions are
3034 * ignored and they need to be checked now. If they are still
3035 * valid, it is important to turn the device back off.
3037 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3038 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3039 (hdev->dev_type == HCI_BREDR &&
3040 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3041 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3042 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3043 hci_dev_do_close(hdev);
3044 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3045 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3046 HCI_AUTO_OFF_TIMEOUT);
3049 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3050 /* For unconfigured devices, set the HCI_RAW flag
3051 * so that userspace can easily identify them.
3053 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3054 set_bit(HCI_RAW, &hdev->flags);
3056 /* For fully configured devices, this will send
3057 * the Index Added event. For unconfigured devices,
3058 * it will send Unconfigued Index Added event.
3060 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3061 * and no event will be send.
3063 mgmt_index_added(hdev);
3064 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3065 /* When the controller is now configured, then it
3066 * is important to clear the HCI_RAW flag.
3068 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3069 clear_bit(HCI_RAW, &hdev->flags);
3071 /* Powering on the controller with HCI_CONFIG set only
3072 * happens with the transition from unconfigured to
3073 * configured. This will send the Index Added event.
3075 mgmt_index_added(hdev);
3079 static void hci_power_off(struct work_struct *work)
3081 struct hci_dev *hdev = container_of(work, struct hci_dev,
3082 power_off.work);
3084 BT_DBG("%s", hdev->name);
3086 hci_dev_do_close(hdev);
3089 static void hci_discov_off(struct work_struct *work)
3091 struct hci_dev *hdev;
3093 hdev = container_of(work, struct hci_dev, discov_off.work);
3095 BT_DBG("%s", hdev->name);
3097 mgmt_discoverable_timeout(hdev);
3100 void hci_uuids_clear(struct hci_dev *hdev)
3102 struct bt_uuid *uuid, *tmp;
3104 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3105 list_del(&uuid->list);
3106 kfree(uuid);
3110 void hci_link_keys_clear(struct hci_dev *hdev)
3112 struct list_head *p, *n;
3114 list_for_each_safe(p, n, &hdev->link_keys) {
3115 struct link_key *key;
3117 key = list_entry(p, struct link_key, list);
3119 list_del(p);
3120 kfree(key);
3124 void hci_smp_ltks_clear(struct hci_dev *hdev)
3126 struct smp_ltk *k, *tmp;
3128 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3129 list_del(&k->list);
3130 kfree(k);
3134 void hci_smp_irks_clear(struct hci_dev *hdev)
3136 struct smp_irk *k, *tmp;
3138 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3139 list_del(&k->list);
3140 kfree(k);
3144 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3146 struct link_key *k;
3148 list_for_each_entry(k, &hdev->link_keys, list)
3149 if (bacmp(bdaddr, &k->bdaddr) == 0)
3150 return k;
3152 return NULL;
3155 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3156 u8 key_type, u8 old_key_type)
3158 /* Legacy key */
3159 if (key_type < 0x03)
3160 return true;
3162 /* Debug keys are insecure so don't store them persistently */
3163 if (key_type == HCI_LK_DEBUG_COMBINATION)
3164 return false;
3166 /* Changed combination key and there's no previous one */
3167 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3168 return false;
3170 /* Security mode 3 case */
3171 if (!conn)
3172 return true;
3174 /* Neither local nor remote side had no-bonding as requirement */
3175 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3176 return true;
3178 /* Local side had dedicated bonding as requirement */
3179 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3180 return true;
3182 /* Remote side had dedicated bonding as requirement */
3183 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3184 return true;
3186 /* If none of the above criteria match, then don't store the key
3187 * persistently */
3188 return false;
3191 static u8 ltk_role(u8 type)
3193 if (type == SMP_LTK)
3194 return HCI_ROLE_MASTER;
3196 return HCI_ROLE_SLAVE;
3199 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3200 u8 role)
3202 struct smp_ltk *k;
3204 list_for_each_entry(k, &hdev->long_term_keys, list) {
3205 if (k->ediv != ediv || k->rand != rand)
3206 continue;
3208 if (ltk_role(k->type) != role)
3209 continue;
3211 return k;
3214 return NULL;
3217 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3218 u8 addr_type, u8 role)
3220 struct smp_ltk *k;
3222 list_for_each_entry(k, &hdev->long_term_keys, list)
3223 if (addr_type == k->bdaddr_type &&
3224 bacmp(bdaddr, &k->bdaddr) == 0 &&
3225 ltk_role(k->type) == role)
3226 return k;
3228 return NULL;
3231 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3233 struct smp_irk *irk;
3235 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3236 if (!bacmp(&irk->rpa, rpa))
3237 return irk;
3240 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3241 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3242 bacpy(&irk->rpa, rpa);
3243 return irk;
3247 return NULL;
3250 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3251 u8 addr_type)
3253 struct smp_irk *irk;
3255 /* Identity Address must be public or static random */
3256 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3257 return NULL;
3259 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3260 if (addr_type == irk->addr_type &&
3261 bacmp(bdaddr, &irk->bdaddr) == 0)
3262 return irk;
3265 return NULL;
3268 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3269 bdaddr_t *bdaddr, u8 *val, u8 type,
3270 u8 pin_len, bool *persistent)
3272 struct link_key *key, *old_key;
3273 u8 old_key_type;
3275 old_key = hci_find_link_key(hdev, bdaddr);
3276 if (old_key) {
3277 old_key_type = old_key->type;
3278 key = old_key;
3279 } else {
3280 old_key_type = conn ? conn->key_type : 0xff;
3281 key = kzalloc(sizeof(*key), GFP_KERNEL);
3282 if (!key)
3283 return NULL;
3284 list_add(&key->list, &hdev->link_keys);
3287 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3289 /* Some buggy controller combinations generate a changed
3290 * combination key for legacy pairing even when there's no
3291 * previous key */
3292 if (type == HCI_LK_CHANGED_COMBINATION &&
3293 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3294 type = HCI_LK_COMBINATION;
3295 if (conn)
3296 conn->key_type = type;
3299 bacpy(&key->bdaddr, bdaddr);
3300 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3301 key->pin_len = pin_len;
3303 if (type == HCI_LK_CHANGED_COMBINATION)
3304 key->type = old_key_type;
3305 else
3306 key->type = type;
3308 if (persistent)
3309 *persistent = hci_persistent_key(hdev, conn, type,
3310 old_key_type);
3312 return key;
3315 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3316 u8 addr_type, u8 type, u8 authenticated,
3317 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3319 struct smp_ltk *key, *old_key;
3320 u8 role = ltk_role(type);
3322 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3323 if (old_key)
3324 key = old_key;
3325 else {
3326 key = kzalloc(sizeof(*key), GFP_KERNEL);
3327 if (!key)
3328 return NULL;
3329 list_add(&key->list, &hdev->long_term_keys);
3332 bacpy(&key->bdaddr, bdaddr);
3333 key->bdaddr_type = addr_type;
3334 memcpy(key->val, tk, sizeof(key->val));
3335 key->authenticated = authenticated;
3336 key->ediv = ediv;
3337 key->rand = rand;
3338 key->enc_size = enc_size;
3339 key->type = type;
3341 return key;
3344 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3345 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3347 struct smp_irk *irk;
3349 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3350 if (!irk) {
3351 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3352 if (!irk)
3353 return NULL;
3355 bacpy(&irk->bdaddr, bdaddr);
3356 irk->addr_type = addr_type;
3358 list_add(&irk->list, &hdev->identity_resolving_keys);
3361 memcpy(irk->val, val, 16);
3362 bacpy(&irk->rpa, rpa);
3364 return irk;
3367 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3369 struct link_key *key;
3371 key = hci_find_link_key(hdev, bdaddr);
3372 if (!key)
3373 return -ENOENT;
3375 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3377 list_del(&key->list);
3378 kfree(key);
3380 return 0;
3383 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3385 struct smp_ltk *k, *tmp;
3386 int removed = 0;
3388 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3389 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3390 continue;
3392 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3394 list_del(&k->list);
3395 kfree(k);
3396 removed++;
3399 return removed ? 0 : -ENOENT;
3402 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3404 struct smp_irk *k, *tmp;
3406 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3407 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3408 continue;
3410 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3412 list_del(&k->list);
3413 kfree(k);
3417 /* HCI command timer function */
3418 static void hci_cmd_timeout(struct work_struct *work)
3420 struct hci_dev *hdev = container_of(work, struct hci_dev,
3421 cmd_timer.work);
3423 if (hdev->sent_cmd) {
3424 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3425 u16 opcode = __le16_to_cpu(sent->opcode);
3427 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3428 } else {
3429 BT_ERR("%s command tx timeout", hdev->name);
3432 atomic_set(&hdev->cmd_cnt, 1);
3433 queue_work(hdev->workqueue, &hdev->cmd_work);
3436 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3437 bdaddr_t *bdaddr)
3439 struct oob_data *data;
3441 list_for_each_entry(data, &hdev->remote_oob_data, list)
3442 if (bacmp(bdaddr, &data->bdaddr) == 0)
3443 return data;
3445 return NULL;
3448 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3450 struct oob_data *data;
3452 data = hci_find_remote_oob_data(hdev, bdaddr);
3453 if (!data)
3454 return -ENOENT;
3456 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3458 list_del(&data->list);
3459 kfree(data);
3461 return 0;
3464 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3466 struct oob_data *data, *n;
3468 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3469 list_del(&data->list);
3470 kfree(data);
3474 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3475 u8 *hash, u8 *randomizer)
3477 struct oob_data *data;
3479 data = hci_find_remote_oob_data(hdev, bdaddr);
3480 if (!data) {
3481 data = kmalloc(sizeof(*data), GFP_KERNEL);
3482 if (!data)
3483 return -ENOMEM;
3485 bacpy(&data->bdaddr, bdaddr);
3486 list_add(&data->list, &hdev->remote_oob_data);
3489 memcpy(data->hash192, hash, sizeof(data->hash192));
3490 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3492 memset(data->hash256, 0, sizeof(data->hash256));
3493 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3495 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3497 return 0;
3500 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3501 u8 *hash192, u8 *randomizer192,
3502 u8 *hash256, u8 *randomizer256)
3504 struct oob_data *data;
3506 data = hci_find_remote_oob_data(hdev, bdaddr);
3507 if (!data) {
3508 data = kmalloc(sizeof(*data), GFP_KERNEL);
3509 if (!data)
3510 return -ENOMEM;
3512 bacpy(&data->bdaddr, bdaddr);
3513 list_add(&data->list, &hdev->remote_oob_data);
3516 memcpy(data->hash192, hash192, sizeof(data->hash192));
3517 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3519 memcpy(data->hash256, hash256, sizeof(data->hash256));
3520 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3522 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3524 return 0;
3527 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3528 bdaddr_t *bdaddr, u8 type)
3530 struct bdaddr_list *b;
3532 list_for_each_entry(b, bdaddr_list, list) {
3533 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3534 return b;
3537 return NULL;
3540 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3542 struct list_head *p, *n;
3544 list_for_each_safe(p, n, bdaddr_list) {
3545 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3547 list_del(p);
3548 kfree(b);
3552 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3554 struct bdaddr_list *entry;
3556 if (!bacmp(bdaddr, BDADDR_ANY))
3557 return -EBADF;
3559 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3560 return -EEXIST;
3562 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3563 if (!entry)
3564 return -ENOMEM;
3566 bacpy(&entry->bdaddr, bdaddr);
3567 entry->bdaddr_type = type;
3569 list_add(&entry->list, list);
3571 return 0;
3574 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3576 struct bdaddr_list *entry;
3578 if (!bacmp(bdaddr, BDADDR_ANY)) {
3579 hci_bdaddr_list_clear(list);
3580 return 0;
3583 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3584 if (!entry)
3585 return -ENOENT;
3587 list_del(&entry->list);
3588 kfree(entry);
3590 return 0;
3593 /* This function requires the caller holds hdev->lock */
3594 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3595 bdaddr_t *addr, u8 addr_type)
3597 struct hci_conn_params *params;
3599 /* The conn params list only contains identity addresses */
3600 if (!hci_is_identity_address(addr, addr_type))
3601 return NULL;
3603 list_for_each_entry(params, &hdev->le_conn_params, list) {
3604 if (bacmp(&params->addr, addr) == 0 &&
3605 params->addr_type == addr_type) {
3606 return params;
3610 return NULL;
3613 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3615 struct hci_conn *conn;
3617 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3618 if (!conn)
3619 return false;
3621 if (conn->dst_type != type)
3622 return false;
3624 if (conn->state != BT_CONNECTED)
3625 return false;
3627 return true;
3630 /* This function requires the caller holds hdev->lock */
3631 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3632 bdaddr_t *addr, u8 addr_type)
3634 struct hci_conn_params *param;
3636 /* The list only contains identity addresses */
3637 if (!hci_is_identity_address(addr, addr_type))
3638 return NULL;
3640 list_for_each_entry(param, list, action) {
3641 if (bacmp(&param->addr, addr) == 0 &&
3642 param->addr_type == addr_type)
3643 return param;
3646 return NULL;
3649 /* This function requires the caller holds hdev->lock */
3650 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3651 bdaddr_t *addr, u8 addr_type)
3653 struct hci_conn_params *params;
3655 if (!hci_is_identity_address(addr, addr_type))
3656 return NULL;
3658 params = hci_conn_params_lookup(hdev, addr, addr_type);
3659 if (params)
3660 return params;
3662 params = kzalloc(sizeof(*params), GFP_KERNEL);
3663 if (!params) {
3664 BT_ERR("Out of memory");
3665 return NULL;
3668 bacpy(&params->addr, addr);
3669 params->addr_type = addr_type;
3671 list_add(&params->list, &hdev->le_conn_params);
3672 INIT_LIST_HEAD(&params->action);
3674 params->conn_min_interval = hdev->le_conn_min_interval;
3675 params->conn_max_interval = hdev->le_conn_max_interval;
3676 params->conn_latency = hdev->le_conn_latency;
3677 params->supervision_timeout = hdev->le_supv_timeout;
3678 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3680 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3682 return params;
3685 /* This function requires the caller holds hdev->lock */
3686 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3687 u8 auto_connect)
3689 struct hci_conn_params *params;
3691 params = hci_conn_params_add(hdev, addr, addr_type);
3692 if (!params)
3693 return -EIO;
3695 if (params->auto_connect == auto_connect)
3696 return 0;
3698 list_del_init(&params->action);
3700 switch (auto_connect) {
3701 case HCI_AUTO_CONN_DISABLED:
3702 case HCI_AUTO_CONN_LINK_LOSS:
3703 hci_update_background_scan(hdev);
3704 break;
3705 case HCI_AUTO_CONN_REPORT:
3706 list_add(&params->action, &hdev->pend_le_reports);
3707 hci_update_background_scan(hdev);
3708 break;
3709 case HCI_AUTO_CONN_DIRECT:
3710 case HCI_AUTO_CONN_ALWAYS:
3711 if (!is_connected(hdev, addr, addr_type)) {
3712 list_add(&params->action, &hdev->pend_le_conns);
3713 hci_update_background_scan(hdev);
3715 break;
3718 params->auto_connect = auto_connect;
3720 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3721 auto_connect);
3723 return 0;
3726 /* This function requires the caller holds hdev->lock */
3727 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3729 struct hci_conn_params *params;
3731 params = hci_conn_params_lookup(hdev, addr, addr_type);
3732 if (!params)
3733 return;
3735 if (params->conn)
3736 hci_conn_drop(params->conn);
3738 list_del(&params->action);
3739 list_del(&params->list);
3740 kfree(params);
3742 hci_update_background_scan(hdev);
3744 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3747 /* This function requires the caller holds hdev->lock */
3748 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3750 struct hci_conn_params *params, *tmp;
3752 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3753 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3754 continue;
3755 list_del(&params->list);
3756 kfree(params);
3759 BT_DBG("All LE disabled connection parameters were removed");
3762 /* This function requires the caller holds hdev->lock */
3763 void hci_conn_params_clear_all(struct hci_dev *hdev)
3765 struct hci_conn_params *params, *tmp;
3767 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3768 if (params->conn)
3769 hci_conn_drop(params->conn);
3770 list_del(&params->action);
3771 list_del(&params->list);
3772 kfree(params);
3775 hci_update_background_scan(hdev);
3777 BT_DBG("All LE connection parameters were removed");
3780 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3782 if (status) {
3783 BT_ERR("Failed to start inquiry: status %d", status);
3785 hci_dev_lock(hdev);
3786 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3787 hci_dev_unlock(hdev);
3788 return;
3792 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3794 /* General inquiry access code (GIAC) */
3795 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3796 struct hci_request req;
3797 struct hci_cp_inquiry cp;
3798 int err;
3800 if (status) {
3801 BT_ERR("Failed to disable LE scanning: status %d", status);
3802 return;
3805 switch (hdev->discovery.type) {
3806 case DISCOV_TYPE_LE:
3807 hci_dev_lock(hdev);
3808 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3809 hci_dev_unlock(hdev);
3810 break;
3812 case DISCOV_TYPE_INTERLEAVED:
3813 hci_req_init(&req, hdev);
3815 memset(&cp, 0, sizeof(cp));
3816 memcpy(&cp.lap, lap, sizeof(cp.lap));
3817 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3818 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3820 hci_dev_lock(hdev);
3822 hci_inquiry_cache_flush(hdev);
3824 err = hci_req_run(&req, inquiry_complete);
3825 if (err) {
3826 BT_ERR("Inquiry request failed: err %d", err);
3827 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3830 hci_dev_unlock(hdev);
3831 break;
3835 static void le_scan_disable_work(struct work_struct *work)
3837 struct hci_dev *hdev = container_of(work, struct hci_dev,
3838 le_scan_disable.work);
3839 struct hci_request req;
3840 int err;
3842 BT_DBG("%s", hdev->name);
3844 hci_req_init(&req, hdev);
3846 hci_req_add_le_scan_disable(&req);
3848 err = hci_req_run(&req, le_scan_disable_work_complete);
3849 if (err)
3850 BT_ERR("Disable LE scanning request failed: err %d", err);
3853 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3855 struct hci_dev *hdev = req->hdev;
3857 /* If we're advertising or initiating an LE connection we can't
3858 * go ahead and change the random address at this time. This is
3859 * because the eventual initiator address used for the
3860 * subsequently created connection will be undefined (some
3861 * controllers use the new address and others the one we had
3862 * when the operation started).
3864 * In this kind of scenario skip the update and let the random
3865 * address be updated at the next cycle.
3867 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3868 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3869 BT_DBG("Deferring random address update");
3870 return;
3873 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3876 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3877 u8 *own_addr_type)
3879 struct hci_dev *hdev = req->hdev;
3880 int err;
3882 /* If privacy is enabled use a resolvable private address. If
3883 * current RPA has expired or there is something else than
3884 * the current RPA in use, then generate a new one.
3886 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3887 int to;
3889 *own_addr_type = ADDR_LE_DEV_RANDOM;
3891 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3892 !bacmp(&hdev->random_addr, &hdev->rpa))
3893 return 0;
3895 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3896 if (err < 0) {
3897 BT_ERR("%s failed to generate new RPA", hdev->name);
3898 return err;
3901 set_random_addr(req, &hdev->rpa);
3903 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3904 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3906 return 0;
3909 /* In case of required privacy without resolvable private address,
3910 * use an unresolvable private address. This is useful for active
3911 * scanning and non-connectable advertising.
3913 if (require_privacy) {
3914 bdaddr_t urpa;
3916 get_random_bytes(&urpa, 6);
3917 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3919 *own_addr_type = ADDR_LE_DEV_RANDOM;
3920 set_random_addr(req, &urpa);
3921 return 0;
3924 /* If forcing static address is in use or there is no public
3925 * address use the static address as random address (but skip
3926 * the HCI command if the current random address is already the
3927 * static one.
3929 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3930 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3931 *own_addr_type = ADDR_LE_DEV_RANDOM;
3932 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3933 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3934 &hdev->static_addr);
3935 return 0;
3938 /* Neither privacy nor static address is being used so use a
3939 * public address.
3941 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3943 return 0;
3946 /* Copy the Identity Address of the controller.
3948 * If the controller has a public BD_ADDR, then by default use that one.
3949 * If this is a LE only controller without a public address, default to
3950 * the static random address.
3952 * For debugging purposes it is possible to force controllers with a
3953 * public address to use the static random address instead.
3955 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3956 u8 *bdaddr_type)
3958 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3959 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3960 bacpy(bdaddr, &hdev->static_addr);
3961 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3962 } else {
3963 bacpy(bdaddr, &hdev->bdaddr);
3964 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3968 /* Alloc HCI device */
3969 struct hci_dev *hci_alloc_dev(void)
3971 struct hci_dev *hdev;
3973 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3974 if (!hdev)
3975 return NULL;
3977 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3978 hdev->esco_type = (ESCO_HV1);
3979 hdev->link_mode = (HCI_LM_ACCEPT);
3980 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3981 hdev->io_capability = 0x03; /* No Input No Output */
3982 hdev->manufacturer = 0xffff; /* Default to internal use */
3983 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3984 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3986 hdev->sniff_max_interval = 800;
3987 hdev->sniff_min_interval = 80;
3989 hdev->le_adv_channel_map = 0x07;
3990 hdev->le_adv_min_interval = 0x0800;
3991 hdev->le_adv_max_interval = 0x0800;
3992 hdev->le_scan_interval = 0x0060;
3993 hdev->le_scan_window = 0x0030;
3994 hdev->le_conn_min_interval = 0x0028;
3995 hdev->le_conn_max_interval = 0x0038;
3996 hdev->le_conn_latency = 0x0000;
3997 hdev->le_supv_timeout = 0x002a;
3999 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4000 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4001 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4002 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4004 mutex_init(&hdev->lock);
4005 mutex_init(&hdev->req_lock);
4007 INIT_LIST_HEAD(&hdev->mgmt_pending);
4008 INIT_LIST_HEAD(&hdev->blacklist);
4009 INIT_LIST_HEAD(&hdev->whitelist);
4010 INIT_LIST_HEAD(&hdev->uuids);
4011 INIT_LIST_HEAD(&hdev->link_keys);
4012 INIT_LIST_HEAD(&hdev->long_term_keys);
4013 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4014 INIT_LIST_HEAD(&hdev->remote_oob_data);
4015 INIT_LIST_HEAD(&hdev->le_white_list);
4016 INIT_LIST_HEAD(&hdev->le_conn_params);
4017 INIT_LIST_HEAD(&hdev->pend_le_conns);
4018 INIT_LIST_HEAD(&hdev->pend_le_reports);
4019 INIT_LIST_HEAD(&hdev->conn_hash.list);
4021 INIT_WORK(&hdev->rx_work, hci_rx_work);
4022 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4023 INIT_WORK(&hdev->tx_work, hci_tx_work);
4024 INIT_WORK(&hdev->power_on, hci_power_on);
4026 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4027 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4028 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4030 skb_queue_head_init(&hdev->rx_q);
4031 skb_queue_head_init(&hdev->cmd_q);
4032 skb_queue_head_init(&hdev->raw_q);
4034 init_waitqueue_head(&hdev->req_wait_q);
4036 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4038 hci_init_sysfs(hdev);
4039 discovery_init(hdev);
4041 return hdev;
4043 EXPORT_SYMBOL(hci_alloc_dev);
4045 /* Free HCI device */
4046 void hci_free_dev(struct hci_dev *hdev)
4048 /* will free via device release */
4049 put_device(&hdev->dev);
4051 EXPORT_SYMBOL(hci_free_dev);
4053 /* Register HCI device */
4054 int hci_register_dev(struct hci_dev *hdev)
4056 int id, error;
4058 if (!hdev->open || !hdev->close || !hdev->send)
4059 return -EINVAL;
4061 /* Do not allow HCI_AMP devices to register at index 0,
4062 * so the index can be used as the AMP controller ID.
4064 switch (hdev->dev_type) {
4065 case HCI_BREDR:
4066 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4067 break;
4068 case HCI_AMP:
4069 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4070 break;
4071 default:
4072 return -EINVAL;
4075 if (id < 0)
4076 return id;
4078 sprintf(hdev->name, "hci%d", id);
4079 hdev->id = id;
4081 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4083 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4084 WQ_MEM_RECLAIM, 1, hdev->name);
4085 if (!hdev->workqueue) {
4086 error = -ENOMEM;
4087 goto err;
4090 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4091 WQ_MEM_RECLAIM, 1, hdev->name);
4092 if (!hdev->req_workqueue) {
4093 destroy_workqueue(hdev->workqueue);
4094 error = -ENOMEM;
4095 goto err;
4098 if (!IS_ERR_OR_NULL(bt_debugfs))
4099 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4101 dev_set_name(&hdev->dev, "%s", hdev->name);
4103 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4104 CRYPTO_ALG_ASYNC);
4105 if (IS_ERR(hdev->tfm_aes)) {
4106 BT_ERR("Unable to create crypto context");
4107 error = PTR_ERR(hdev->tfm_aes);
4108 hdev->tfm_aes = NULL;
4109 goto err_wqueue;
4112 error = device_add(&hdev->dev);
4113 if (error < 0)
4114 goto err_tfm;
4116 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4117 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4118 hdev);
4119 if (hdev->rfkill) {
4120 if (rfkill_register(hdev->rfkill) < 0) {
4121 rfkill_destroy(hdev->rfkill);
4122 hdev->rfkill = NULL;
4126 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4127 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4129 set_bit(HCI_SETUP, &hdev->dev_flags);
4130 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4132 if (hdev->dev_type == HCI_BREDR) {
4133 /* Assume BR/EDR support until proven otherwise (such as
4134 * through reading supported features during init.
4136 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4139 write_lock(&hci_dev_list_lock);
4140 list_add(&hdev->list, &hci_dev_list);
4141 write_unlock(&hci_dev_list_lock);
4143 /* Devices that are marked for raw-only usage are unconfigured
4144 * and should not be included in normal operation.
4146 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4147 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4149 hci_notify(hdev, HCI_DEV_REG);
4150 hci_dev_hold(hdev);
4152 queue_work(hdev->req_workqueue, &hdev->power_on);
4154 return id;
4156 err_tfm:
4157 crypto_free_blkcipher(hdev->tfm_aes);
4158 err_wqueue:
4159 destroy_workqueue(hdev->workqueue);
4160 destroy_workqueue(hdev->req_workqueue);
4161 err:
4162 ida_simple_remove(&hci_index_ida, hdev->id);
4164 return error;
4166 EXPORT_SYMBOL(hci_register_dev);
4168 /* Unregister HCI device */
4169 void hci_unregister_dev(struct hci_dev *hdev)
4171 int i, id;
4173 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4175 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4177 id = hdev->id;
4179 write_lock(&hci_dev_list_lock);
4180 list_del(&hdev->list);
4181 write_unlock(&hci_dev_list_lock);
4183 hci_dev_do_close(hdev);
4185 for (i = 0; i < NUM_REASSEMBLY; i++)
4186 kfree_skb(hdev->reassembly[i]);
4188 cancel_work_sync(&hdev->power_on);
4190 if (!test_bit(HCI_INIT, &hdev->flags) &&
4191 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4192 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4193 hci_dev_lock(hdev);
4194 mgmt_index_removed(hdev);
4195 hci_dev_unlock(hdev);
4198 /* mgmt_index_removed should take care of emptying the
4199 * pending list */
4200 BUG_ON(!list_empty(&hdev->mgmt_pending));
4202 hci_notify(hdev, HCI_DEV_UNREG);
4204 if (hdev->rfkill) {
4205 rfkill_unregister(hdev->rfkill);
4206 rfkill_destroy(hdev->rfkill);
4209 if (hdev->tfm_aes)
4210 crypto_free_blkcipher(hdev->tfm_aes);
4212 device_del(&hdev->dev);
4214 debugfs_remove_recursive(hdev->debugfs);
4216 destroy_workqueue(hdev->workqueue);
4217 destroy_workqueue(hdev->req_workqueue);
4219 hci_dev_lock(hdev);
4220 hci_bdaddr_list_clear(&hdev->blacklist);
4221 hci_bdaddr_list_clear(&hdev->whitelist);
4222 hci_uuids_clear(hdev);
4223 hci_link_keys_clear(hdev);
4224 hci_smp_ltks_clear(hdev);
4225 hci_smp_irks_clear(hdev);
4226 hci_remote_oob_data_clear(hdev);
4227 hci_bdaddr_list_clear(&hdev->le_white_list);
4228 hci_conn_params_clear_all(hdev);
4229 hci_dev_unlock(hdev);
4231 hci_dev_put(hdev);
4233 ida_simple_remove(&hci_index_ida, id);
4235 EXPORT_SYMBOL(hci_unregister_dev);
4237 /* Suspend HCI device */
4238 int hci_suspend_dev(struct hci_dev *hdev)
4240 hci_notify(hdev, HCI_DEV_SUSPEND);
4241 return 0;
4243 EXPORT_SYMBOL(hci_suspend_dev);
4245 /* Resume HCI device */
4246 int hci_resume_dev(struct hci_dev *hdev)
4248 hci_notify(hdev, HCI_DEV_RESUME);
4249 return 0;
4251 EXPORT_SYMBOL(hci_resume_dev);
4253 /* Receive frame from HCI drivers */
4254 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4256 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4257 && !test_bit(HCI_INIT, &hdev->flags))) {
4258 kfree_skb(skb);
4259 return -ENXIO;
4262 /* Incoming skb */
4263 bt_cb(skb)->incoming = 1;
4265 /* Time stamp */
4266 __net_timestamp(skb);
4268 skb_queue_tail(&hdev->rx_q, skb);
4269 queue_work(hdev->workqueue, &hdev->rx_work);
4271 return 0;
4273 EXPORT_SYMBOL(hci_recv_frame);
4275 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4276 int count, __u8 index)
4278 int len = 0;
4279 int hlen = 0;
4280 int remain = count;
4281 struct sk_buff *skb;
4282 struct bt_skb_cb *scb;
4284 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4285 index >= NUM_REASSEMBLY)
4286 return -EILSEQ;
4288 skb = hdev->reassembly[index];
4290 if (!skb) {
4291 switch (type) {
4292 case HCI_ACLDATA_PKT:
4293 len = HCI_MAX_FRAME_SIZE;
4294 hlen = HCI_ACL_HDR_SIZE;
4295 break;
4296 case HCI_EVENT_PKT:
4297 len = HCI_MAX_EVENT_SIZE;
4298 hlen = HCI_EVENT_HDR_SIZE;
4299 break;
4300 case HCI_SCODATA_PKT:
4301 len = HCI_MAX_SCO_SIZE;
4302 hlen = HCI_SCO_HDR_SIZE;
4303 break;
4306 skb = bt_skb_alloc(len, GFP_ATOMIC);
4307 if (!skb)
4308 return -ENOMEM;
4310 scb = (void *) skb->cb;
4311 scb->expect = hlen;
4312 scb->pkt_type = type;
4314 hdev->reassembly[index] = skb;
4317 while (count) {
4318 scb = (void *) skb->cb;
4319 len = min_t(uint, scb->expect, count);
4321 memcpy(skb_put(skb, len), data, len);
4323 count -= len;
4324 data += len;
4325 scb->expect -= len;
4326 remain = count;
4328 switch (type) {
4329 case HCI_EVENT_PKT:
4330 if (skb->len == HCI_EVENT_HDR_SIZE) {
4331 struct hci_event_hdr *h = hci_event_hdr(skb);
4332 scb->expect = h->plen;
4334 if (skb_tailroom(skb) < scb->expect) {
4335 kfree_skb(skb);
4336 hdev->reassembly[index] = NULL;
4337 return -ENOMEM;
4340 break;
4342 case HCI_ACLDATA_PKT:
4343 if (skb->len == HCI_ACL_HDR_SIZE) {
4344 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4345 scb->expect = __le16_to_cpu(h->dlen);
4347 if (skb_tailroom(skb) < scb->expect) {
4348 kfree_skb(skb);
4349 hdev->reassembly[index] = NULL;
4350 return -ENOMEM;
4353 break;
4355 case HCI_SCODATA_PKT:
4356 if (skb->len == HCI_SCO_HDR_SIZE) {
4357 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4358 scb->expect = h->dlen;
4360 if (skb_tailroom(skb) < scb->expect) {
4361 kfree_skb(skb);
4362 hdev->reassembly[index] = NULL;
4363 return -ENOMEM;
4366 break;
4369 if (scb->expect == 0) {
4370 /* Complete frame */
4372 bt_cb(skb)->pkt_type = type;
4373 hci_recv_frame(hdev, skb);
4375 hdev->reassembly[index] = NULL;
4376 return remain;
4380 return remain;
4383 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4385 int rem = 0;
4387 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4388 return -EILSEQ;
4390 while (count) {
4391 rem = hci_reassembly(hdev, type, data, count, type - 1);
4392 if (rem < 0)
4393 return rem;
4395 data += (count - rem);
4396 count = rem;
4399 return rem;
4401 EXPORT_SYMBOL(hci_recv_fragment);
4403 #define STREAM_REASSEMBLY 0
4405 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4407 int type;
4408 int rem = 0;
4410 while (count) {
4411 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4413 if (!skb) {
4414 struct { char type; } *pkt;
4416 /* Start of the frame */
4417 pkt = data;
4418 type = pkt->type;
4420 data++;
4421 count--;
4422 } else
4423 type = bt_cb(skb)->pkt_type;
4425 rem = hci_reassembly(hdev, type, data, count,
4426 STREAM_REASSEMBLY);
4427 if (rem < 0)
4428 return rem;
4430 data += (count - rem);
4431 count = rem;
4434 return rem;
4436 EXPORT_SYMBOL(hci_recv_stream_fragment);
4438 /* ---- Interface to upper protocols ---- */
4440 int hci_register_cb(struct hci_cb *cb)
4442 BT_DBG("%p name %s", cb, cb->name);
4444 write_lock(&hci_cb_list_lock);
4445 list_add(&cb->list, &hci_cb_list);
4446 write_unlock(&hci_cb_list_lock);
4448 return 0;
4450 EXPORT_SYMBOL(hci_register_cb);
4452 int hci_unregister_cb(struct hci_cb *cb)
4454 BT_DBG("%p name %s", cb, cb->name);
4456 write_lock(&hci_cb_list_lock);
4457 list_del(&cb->list);
4458 write_unlock(&hci_cb_list_lock);
4460 return 0;
4462 EXPORT_SYMBOL(hci_unregister_cb);
4464 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4466 int err;
4468 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4470 /* Time stamp */
4471 __net_timestamp(skb);
4473 /* Send copy to monitor */
4474 hci_send_to_monitor(hdev, skb);
4476 if (atomic_read(&hdev->promisc)) {
4477 /* Send copy to the sockets */
4478 hci_send_to_sock(hdev, skb);
4481 /* Get rid of skb owner, prior to sending to the driver. */
4482 skb_orphan(skb);
4484 err = hdev->send(hdev, skb);
4485 if (err < 0) {
4486 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4487 kfree_skb(skb);
4491 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4493 skb_queue_head_init(&req->cmd_q);
4494 req->hdev = hdev;
4495 req->err = 0;
4498 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4500 struct hci_dev *hdev = req->hdev;
4501 struct sk_buff *skb;
4502 unsigned long flags;
4504 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4506 /* If an error occured during request building, remove all HCI
4507 * commands queued on the HCI request queue.
4509 if (req->err) {
4510 skb_queue_purge(&req->cmd_q);
4511 return req->err;
4514 /* Do not allow empty requests */
4515 if (skb_queue_empty(&req->cmd_q))
4516 return -ENODATA;
4518 skb = skb_peek_tail(&req->cmd_q);
4519 bt_cb(skb)->req.complete = complete;
4521 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4522 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4523 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4525 queue_work(hdev->workqueue, &hdev->cmd_work);
4527 return 0;
4530 bool hci_req_pending(struct hci_dev *hdev)
4532 return (hdev->req_status == HCI_REQ_PEND);
4535 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4536 u32 plen, const void *param)
4538 int len = HCI_COMMAND_HDR_SIZE + plen;
4539 struct hci_command_hdr *hdr;
4540 struct sk_buff *skb;
4542 skb = bt_skb_alloc(len, GFP_ATOMIC);
4543 if (!skb)
4544 return NULL;
4546 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4547 hdr->opcode = cpu_to_le16(opcode);
4548 hdr->plen = plen;
4550 if (plen)
4551 memcpy(skb_put(skb, plen), param, plen);
4553 BT_DBG("skb len %d", skb->len);
4555 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4557 return skb;
4560 /* Send HCI command */
4561 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4562 const void *param)
4564 struct sk_buff *skb;
4566 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4568 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4569 if (!skb) {
4570 BT_ERR("%s no memory for command", hdev->name);
4571 return -ENOMEM;
4574 /* Stand-alone HCI commands must be flaged as
4575 * single-command requests.
4577 bt_cb(skb)->req.start = true;
4579 skb_queue_tail(&hdev->cmd_q, skb);
4580 queue_work(hdev->workqueue, &hdev->cmd_work);
4582 return 0;
4585 /* Queue a command to an asynchronous HCI request */
4586 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4587 const void *param, u8 event)
4589 struct hci_dev *hdev = req->hdev;
4590 struct sk_buff *skb;
4592 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4594 /* If an error occured during request building, there is no point in
4595 * queueing the HCI command. We can simply return.
4597 if (req->err)
4598 return;
4600 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4601 if (!skb) {
4602 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4603 hdev->name, opcode);
4604 req->err = -ENOMEM;
4605 return;
4608 if (skb_queue_empty(&req->cmd_q))
4609 bt_cb(skb)->req.start = true;
4611 bt_cb(skb)->req.event = event;
4613 skb_queue_tail(&req->cmd_q, skb);
4616 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4617 const void *param)
4619 hci_req_add_ev(req, opcode, plen, param, 0);
4622 /* Get data from the previously sent command */
4623 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4625 struct hci_command_hdr *hdr;
4627 if (!hdev->sent_cmd)
4628 return NULL;
4630 hdr = (void *) hdev->sent_cmd->data;
4632 if (hdr->opcode != cpu_to_le16(opcode))
4633 return NULL;
4635 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4637 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4640 /* Send ACL data */
4641 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4643 struct hci_acl_hdr *hdr;
4644 int len = skb->len;
4646 skb_push(skb, HCI_ACL_HDR_SIZE);
4647 skb_reset_transport_header(skb);
4648 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4649 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4650 hdr->dlen = cpu_to_le16(len);
4653 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4654 struct sk_buff *skb, __u16 flags)
4656 struct hci_conn *conn = chan->conn;
4657 struct hci_dev *hdev = conn->hdev;
4658 struct sk_buff *list;
4660 skb->len = skb_headlen(skb);
4661 skb->data_len = 0;
4663 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4665 switch (hdev->dev_type) {
4666 case HCI_BREDR:
4667 hci_add_acl_hdr(skb, conn->handle, flags);
4668 break;
4669 case HCI_AMP:
4670 hci_add_acl_hdr(skb, chan->handle, flags);
4671 break;
4672 default:
4673 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4674 return;
4677 list = skb_shinfo(skb)->frag_list;
4678 if (!list) {
4679 /* Non fragmented */
4680 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4682 skb_queue_tail(queue, skb);
4683 } else {
4684 /* Fragmented */
4685 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4687 skb_shinfo(skb)->frag_list = NULL;
4689 /* Queue all fragments atomically */
4690 spin_lock(&queue->lock);
4692 __skb_queue_tail(queue, skb);
4694 flags &= ~ACL_START;
4695 flags |= ACL_CONT;
4696 do {
4697 skb = list; list = list->next;
4699 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4700 hci_add_acl_hdr(skb, conn->handle, flags);
4702 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4704 __skb_queue_tail(queue, skb);
4705 } while (list);
4707 spin_unlock(&queue->lock);
4711 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4713 struct hci_dev *hdev = chan->conn->hdev;
4715 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4717 hci_queue_acl(chan, &chan->data_q, skb, flags);
4719 queue_work(hdev->workqueue, &hdev->tx_work);
4722 /* Send SCO data */
4723 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4725 struct hci_dev *hdev = conn->hdev;
4726 struct hci_sco_hdr hdr;
4728 BT_DBG("%s len %d", hdev->name, skb->len);
4730 hdr.handle = cpu_to_le16(conn->handle);
4731 hdr.dlen = skb->len;
4733 skb_push(skb, HCI_SCO_HDR_SIZE);
4734 skb_reset_transport_header(skb);
4735 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4737 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4739 skb_queue_tail(&conn->data_q, skb);
4740 queue_work(hdev->workqueue, &hdev->tx_work);
4743 /* ---- HCI TX task (outgoing data) ---- */
4745 /* HCI Connection scheduler */
4746 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4747 int *quote)
4749 struct hci_conn_hash *h = &hdev->conn_hash;
4750 struct hci_conn *conn = NULL, *c;
4751 unsigned int num = 0, min = ~0;
4753 /* We don't have to lock device here. Connections are always
4754 * added and removed with TX task disabled. */
4756 rcu_read_lock();
4758 list_for_each_entry_rcu(c, &h->list, list) {
4759 if (c->type != type || skb_queue_empty(&c->data_q))
4760 continue;
4762 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4763 continue;
4765 num++;
4767 if (c->sent < min) {
4768 min = c->sent;
4769 conn = c;
4772 if (hci_conn_num(hdev, type) == num)
4773 break;
4776 rcu_read_unlock();
4778 if (conn) {
4779 int cnt, q;
4781 switch (conn->type) {
4782 case ACL_LINK:
4783 cnt = hdev->acl_cnt;
4784 break;
4785 case SCO_LINK:
4786 case ESCO_LINK:
4787 cnt = hdev->sco_cnt;
4788 break;
4789 case LE_LINK:
4790 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4791 break;
4792 default:
4793 cnt = 0;
4794 BT_ERR("Unknown link type");
4797 q = cnt / num;
4798 *quote = q ? q : 1;
4799 } else
4800 *quote = 0;
4802 BT_DBG("conn %p quote %d", conn, *quote);
4803 return conn;
4806 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4808 struct hci_conn_hash *h = &hdev->conn_hash;
4809 struct hci_conn *c;
4811 BT_ERR("%s link tx timeout", hdev->name);
4813 rcu_read_lock();
4815 /* Kill stalled connections */
4816 list_for_each_entry_rcu(c, &h->list, list) {
4817 if (c->type == type && c->sent) {
4818 BT_ERR("%s killing stalled connection %pMR",
4819 hdev->name, &c->dst);
4820 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4824 rcu_read_unlock();
4827 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4828 int *quote)
4830 struct hci_conn_hash *h = &hdev->conn_hash;
4831 struct hci_chan *chan = NULL;
4832 unsigned int num = 0, min = ~0, cur_prio = 0;
4833 struct hci_conn *conn;
4834 int cnt, q, conn_num = 0;
4836 BT_DBG("%s", hdev->name);
4838 rcu_read_lock();
4840 list_for_each_entry_rcu(conn, &h->list, list) {
4841 struct hci_chan *tmp;
4843 if (conn->type != type)
4844 continue;
4846 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4847 continue;
4849 conn_num++;
4851 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4852 struct sk_buff *skb;
4854 if (skb_queue_empty(&tmp->data_q))
4855 continue;
4857 skb = skb_peek(&tmp->data_q);
4858 if (skb->priority < cur_prio)
4859 continue;
4861 if (skb->priority > cur_prio) {
4862 num = 0;
4863 min = ~0;
4864 cur_prio = skb->priority;
4867 num++;
4869 if (conn->sent < min) {
4870 min = conn->sent;
4871 chan = tmp;
4875 if (hci_conn_num(hdev, type) == conn_num)
4876 break;
4879 rcu_read_unlock();
4881 if (!chan)
4882 return NULL;
4884 switch (chan->conn->type) {
4885 case ACL_LINK:
4886 cnt = hdev->acl_cnt;
4887 break;
4888 case AMP_LINK:
4889 cnt = hdev->block_cnt;
4890 break;
4891 case SCO_LINK:
4892 case ESCO_LINK:
4893 cnt = hdev->sco_cnt;
4894 break;
4895 case LE_LINK:
4896 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4897 break;
4898 default:
4899 cnt = 0;
4900 BT_ERR("Unknown link type");
4903 q = cnt / num;
4904 *quote = q ? q : 1;
4905 BT_DBG("chan %p quote %d", chan, *quote);
4906 return chan;
4909 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4911 struct hci_conn_hash *h = &hdev->conn_hash;
4912 struct hci_conn *conn;
4913 int num = 0;
4915 BT_DBG("%s", hdev->name);
4917 rcu_read_lock();
4919 list_for_each_entry_rcu(conn, &h->list, list) {
4920 struct hci_chan *chan;
4922 if (conn->type != type)
4923 continue;
4925 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4926 continue;
4928 num++;
4930 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4931 struct sk_buff *skb;
4933 if (chan->sent) {
4934 chan->sent = 0;
4935 continue;
4938 if (skb_queue_empty(&chan->data_q))
4939 continue;
4941 skb = skb_peek(&chan->data_q);
4942 if (skb->priority >= HCI_PRIO_MAX - 1)
4943 continue;
4945 skb->priority = HCI_PRIO_MAX - 1;
4947 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4948 skb->priority);
4951 if (hci_conn_num(hdev, type) == num)
4952 break;
4955 rcu_read_unlock();
4959 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4961 /* Calculate count of blocks used by this packet */
4962 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4965 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4967 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4968 /* ACL tx timeout must be longer than maximum
4969 * link supervision timeout (40.9 seconds) */
4970 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4971 HCI_ACL_TX_TIMEOUT))
4972 hci_link_tx_to(hdev, ACL_LINK);
4976 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4978 unsigned int cnt = hdev->acl_cnt;
4979 struct hci_chan *chan;
4980 struct sk_buff *skb;
4981 int quote;
4983 __check_timeout(hdev, cnt);
4985 while (hdev->acl_cnt &&
4986 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4987 u32 priority = (skb_peek(&chan->data_q))->priority;
4988 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4989 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4990 skb->len, skb->priority);
4992 /* Stop if priority has changed */
4993 if (skb->priority < priority)
4994 break;
4996 skb = skb_dequeue(&chan->data_q);
4998 hci_conn_enter_active_mode(chan->conn,
4999 bt_cb(skb)->force_active);
5001 hci_send_frame(hdev, skb);
5002 hdev->acl_last_tx = jiffies;
5004 hdev->acl_cnt--;
5005 chan->sent++;
5006 chan->conn->sent++;
5010 if (cnt != hdev->acl_cnt)
5011 hci_prio_recalculate(hdev, ACL_LINK);
5014 static void hci_sched_acl_blk(struct hci_dev *hdev)
5016 unsigned int cnt = hdev->block_cnt;
5017 struct hci_chan *chan;
5018 struct sk_buff *skb;
5019 int quote;
5020 u8 type;
5022 __check_timeout(hdev, cnt);
5024 BT_DBG("%s", hdev->name);
5026 if (hdev->dev_type == HCI_AMP)
5027 type = AMP_LINK;
5028 else
5029 type = ACL_LINK;
5031 while (hdev->block_cnt > 0 &&
5032 (chan = hci_chan_sent(hdev, type, &quote))) {
5033 u32 priority = (skb_peek(&chan->data_q))->priority;
5034 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5035 int blocks;
5037 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5038 skb->len, skb->priority);
5040 /* Stop if priority has changed */
5041 if (skb->priority < priority)
5042 break;
5044 skb = skb_dequeue(&chan->data_q);
5046 blocks = __get_blocks(hdev, skb);
5047 if (blocks > hdev->block_cnt)
5048 return;
5050 hci_conn_enter_active_mode(chan->conn,
5051 bt_cb(skb)->force_active);
5053 hci_send_frame(hdev, skb);
5054 hdev->acl_last_tx = jiffies;
5056 hdev->block_cnt -= blocks;
5057 quote -= blocks;
5059 chan->sent += blocks;
5060 chan->conn->sent += blocks;
5064 if (cnt != hdev->block_cnt)
5065 hci_prio_recalculate(hdev, type);
5068 static void hci_sched_acl(struct hci_dev *hdev)
5070 BT_DBG("%s", hdev->name);
5072 /* No ACL link over BR/EDR controller */
5073 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5074 return;
5076 /* No AMP link over AMP controller */
5077 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5078 return;
5080 switch (hdev->flow_ctl_mode) {
5081 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5082 hci_sched_acl_pkt(hdev);
5083 break;
5085 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5086 hci_sched_acl_blk(hdev);
5087 break;
5091 /* Schedule SCO */
5092 static void hci_sched_sco(struct hci_dev *hdev)
5094 struct hci_conn *conn;
5095 struct sk_buff *skb;
5096 int quote;
5098 BT_DBG("%s", hdev->name);
5100 if (!hci_conn_num(hdev, SCO_LINK))
5101 return;
5103 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5104 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5105 BT_DBG("skb %p len %d", skb, skb->len);
5106 hci_send_frame(hdev, skb);
5108 conn->sent++;
5109 if (conn->sent == ~0)
5110 conn->sent = 0;
5115 static void hci_sched_esco(struct hci_dev *hdev)
5117 struct hci_conn *conn;
5118 struct sk_buff *skb;
5119 int quote;
5121 BT_DBG("%s", hdev->name);
5123 if (!hci_conn_num(hdev, ESCO_LINK))
5124 return;
5126 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5127 &quote))) {
5128 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5129 BT_DBG("skb %p len %d", skb, skb->len);
5130 hci_send_frame(hdev, skb);
5132 conn->sent++;
5133 if (conn->sent == ~0)
5134 conn->sent = 0;
5139 static void hci_sched_le(struct hci_dev *hdev)
5141 struct hci_chan *chan;
5142 struct sk_buff *skb;
5143 int quote, cnt, tmp;
5145 BT_DBG("%s", hdev->name);
5147 if (!hci_conn_num(hdev, LE_LINK))
5148 return;
5150 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5151 /* LE tx timeout must be longer than maximum
5152 * link supervision timeout (40.9 seconds) */
5153 if (!hdev->le_cnt && hdev->le_pkts &&
5154 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5155 hci_link_tx_to(hdev, LE_LINK);
5158 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5159 tmp = cnt;
5160 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5161 u32 priority = (skb_peek(&chan->data_q))->priority;
5162 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5163 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5164 skb->len, skb->priority);
5166 /* Stop if priority has changed */
5167 if (skb->priority < priority)
5168 break;
5170 skb = skb_dequeue(&chan->data_q);
5172 hci_send_frame(hdev, skb);
5173 hdev->le_last_tx = jiffies;
5175 cnt--;
5176 chan->sent++;
5177 chan->conn->sent++;
5181 if (hdev->le_pkts)
5182 hdev->le_cnt = cnt;
5183 else
5184 hdev->acl_cnt = cnt;
5186 if (cnt != tmp)
5187 hci_prio_recalculate(hdev, LE_LINK);
5190 static void hci_tx_work(struct work_struct *work)
5192 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5193 struct sk_buff *skb;
5195 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5196 hdev->sco_cnt, hdev->le_cnt);
5198 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5199 /* Schedule queues and send stuff to HCI driver */
5200 hci_sched_acl(hdev);
5201 hci_sched_sco(hdev);
5202 hci_sched_esco(hdev);
5203 hci_sched_le(hdev);
5206 /* Send next queued raw (unknown type) packet */
5207 while ((skb = skb_dequeue(&hdev->raw_q)))
5208 hci_send_frame(hdev, skb);
5211 /* ----- HCI RX task (incoming data processing) ----- */
5213 /* ACL data packet */
5214 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5216 struct hci_acl_hdr *hdr = (void *) skb->data;
5217 struct hci_conn *conn;
5218 __u16 handle, flags;
5220 skb_pull(skb, HCI_ACL_HDR_SIZE);
5222 handle = __le16_to_cpu(hdr->handle);
5223 flags = hci_flags(handle);
5224 handle = hci_handle(handle);
5226 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5227 handle, flags);
5229 hdev->stat.acl_rx++;
5231 hci_dev_lock(hdev);
5232 conn = hci_conn_hash_lookup_handle(hdev, handle);
5233 hci_dev_unlock(hdev);
5235 if (conn) {
5236 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5238 /* Send to upper protocol */
5239 l2cap_recv_acldata(conn, skb, flags);
5240 return;
5241 } else {
5242 BT_ERR("%s ACL packet for unknown connection handle %d",
5243 hdev->name, handle);
5246 kfree_skb(skb);
5249 /* SCO data packet */
5250 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5252 struct hci_sco_hdr *hdr = (void *) skb->data;
5253 struct hci_conn *conn;
5254 __u16 handle;
5256 skb_pull(skb, HCI_SCO_HDR_SIZE);
5258 handle = __le16_to_cpu(hdr->handle);
5260 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5262 hdev->stat.sco_rx++;
5264 hci_dev_lock(hdev);
5265 conn = hci_conn_hash_lookup_handle(hdev, handle);
5266 hci_dev_unlock(hdev);
5268 if (conn) {
5269 /* Send to upper protocol */
5270 sco_recv_scodata(conn, skb);
5271 return;
5272 } else {
5273 BT_ERR("%s SCO packet for unknown connection handle %d",
5274 hdev->name, handle);
5277 kfree_skb(skb);
5280 static bool hci_req_is_complete(struct hci_dev *hdev)
5282 struct sk_buff *skb;
5284 skb = skb_peek(&hdev->cmd_q);
5285 if (!skb)
5286 return true;
5288 return bt_cb(skb)->req.start;
5291 static void hci_resend_last(struct hci_dev *hdev)
5293 struct hci_command_hdr *sent;
5294 struct sk_buff *skb;
5295 u16 opcode;
5297 if (!hdev->sent_cmd)
5298 return;
5300 sent = (void *) hdev->sent_cmd->data;
5301 opcode = __le16_to_cpu(sent->opcode);
5302 if (opcode == HCI_OP_RESET)
5303 return;
5305 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5306 if (!skb)
5307 return;
5309 skb_queue_head(&hdev->cmd_q, skb);
5310 queue_work(hdev->workqueue, &hdev->cmd_work);
5313 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5315 hci_req_complete_t req_complete = NULL;
5316 struct sk_buff *skb;
5317 unsigned long flags;
5319 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5321 /* If the completed command doesn't match the last one that was
5322 * sent we need to do special handling of it.
5324 if (!hci_sent_cmd_data(hdev, opcode)) {
5325 /* Some CSR based controllers generate a spontaneous
5326 * reset complete event during init and any pending
5327 * command will never be completed. In such a case we
5328 * need to resend whatever was the last sent
5329 * command.
5331 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5332 hci_resend_last(hdev);
5334 return;
5337 /* If the command succeeded and there's still more commands in
5338 * this request the request is not yet complete.
5340 if (!status && !hci_req_is_complete(hdev))
5341 return;
5343 /* If this was the last command in a request the complete
5344 * callback would be found in hdev->sent_cmd instead of the
5345 * command queue (hdev->cmd_q).
5347 if (hdev->sent_cmd) {
5348 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5350 if (req_complete) {
5351 /* We must set the complete callback to NULL to
5352 * avoid calling the callback more than once if
5353 * this function gets called again.
5355 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5357 goto call_complete;
5361 /* Remove all pending commands belonging to this request */
5362 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5363 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5364 if (bt_cb(skb)->req.start) {
5365 __skb_queue_head(&hdev->cmd_q, skb);
5366 break;
5369 req_complete = bt_cb(skb)->req.complete;
5370 kfree_skb(skb);
5372 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5374 call_complete:
5375 if (req_complete)
5376 req_complete(hdev, status);
5379 static void hci_rx_work(struct work_struct *work)
5381 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5382 struct sk_buff *skb;
5384 BT_DBG("%s", hdev->name);
5386 while ((skb = skb_dequeue(&hdev->rx_q))) {
5387 /* Send copy to monitor */
5388 hci_send_to_monitor(hdev, skb);
5390 if (atomic_read(&hdev->promisc)) {
5391 /* Send copy to the sockets */
5392 hci_send_to_sock(hdev, skb);
5395 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5396 kfree_skb(skb);
5397 continue;
5400 if (test_bit(HCI_INIT, &hdev->flags)) {
5401 /* Don't process data packets in this states. */
5402 switch (bt_cb(skb)->pkt_type) {
5403 case HCI_ACLDATA_PKT:
5404 case HCI_SCODATA_PKT:
5405 kfree_skb(skb);
5406 continue;
5410 /* Process frame */
5411 switch (bt_cb(skb)->pkt_type) {
5412 case HCI_EVENT_PKT:
5413 BT_DBG("%s Event packet", hdev->name);
5414 hci_event_packet(hdev, skb);
5415 break;
5417 case HCI_ACLDATA_PKT:
5418 BT_DBG("%s ACL data packet", hdev->name);
5419 hci_acldata_packet(hdev, skb);
5420 break;
5422 case HCI_SCODATA_PKT:
5423 BT_DBG("%s SCO data packet", hdev->name);
5424 hci_scodata_packet(hdev, skb);
5425 break;
5427 default:
5428 kfree_skb(skb);
5429 break;
5434 static void hci_cmd_work(struct work_struct *work)
5436 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5437 struct sk_buff *skb;
5439 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5440 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5442 /* Send queued commands */
5443 if (atomic_read(&hdev->cmd_cnt)) {
5444 skb = skb_dequeue(&hdev->cmd_q);
5445 if (!skb)
5446 return;
5448 kfree_skb(hdev->sent_cmd);
5450 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5451 if (hdev->sent_cmd) {
5452 atomic_dec(&hdev->cmd_cnt);
5453 hci_send_frame(hdev, skb);
5454 if (test_bit(HCI_RESET, &hdev->flags))
5455 cancel_delayed_work(&hdev->cmd_timer);
5456 else
5457 schedule_delayed_work(&hdev->cmd_timer,
5458 HCI_CMD_TIMEOUT);
5459 } else {
5460 skb_queue_head(&hdev->cmd_q, skb);
5461 queue_work(hdev->workqueue, &hdev->cmd_work);
5466 void hci_req_add_le_scan_disable(struct hci_request *req)
5468 struct hci_cp_le_set_scan_enable cp;
5470 memset(&cp, 0, sizeof(cp));
5471 cp.enable = LE_SCAN_DISABLE;
5472 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5475 static void add_to_white_list(struct hci_request *req,
5476 struct hci_conn_params *params)
5478 struct hci_cp_le_add_to_white_list cp;
5480 cp.bdaddr_type = params->addr_type;
5481 bacpy(&cp.bdaddr, &params->addr);
5483 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5486 static u8 update_white_list(struct hci_request *req)
5488 struct hci_dev *hdev = req->hdev;
5489 struct hci_conn_params *params;
5490 struct bdaddr_list *b;
5491 uint8_t white_list_entries = 0;
5493 /* Go through the current white list programmed into the
5494 * controller one by one and check if that address is still
5495 * in the list of pending connections or list of devices to
5496 * report. If not present in either list, then queue the
5497 * command to remove it from the controller.
5499 list_for_each_entry(b, &hdev->le_white_list, list) {
5500 struct hci_cp_le_del_from_white_list cp;
5502 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5503 &b->bdaddr, b->bdaddr_type) ||
5504 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5505 &b->bdaddr, b->bdaddr_type)) {
5506 white_list_entries++;
5507 continue;
5510 cp.bdaddr_type = b->bdaddr_type;
5511 bacpy(&cp.bdaddr, &b->bdaddr);
5513 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5514 sizeof(cp), &cp);
5517 /* Since all no longer valid white list entries have been
5518 * removed, walk through the list of pending connections
5519 * and ensure that any new device gets programmed into
5520 * the controller.
5522 * If the list of the devices is larger than the list of
5523 * available white list entries in the controller, then
5524 * just abort and return filer policy value to not use the
5525 * white list.
5527 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5528 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5529 &params->addr, params->addr_type))
5530 continue;
5532 if (white_list_entries >= hdev->le_white_list_size) {
5533 /* Select filter policy to accept all advertising */
5534 return 0x00;
5537 if (hci_find_irk_by_addr(hdev, &params->addr,
5538 params->addr_type)) {
5539 /* White list can not be used with RPAs */
5540 return 0x00;
5543 white_list_entries++;
5544 add_to_white_list(req, params);
5547 /* After adding all new pending connections, walk through
5548 * the list of pending reports and also add these to the
5549 * white list if there is still space.
5551 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5552 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5553 &params->addr, params->addr_type))
5554 continue;
5556 if (white_list_entries >= hdev->le_white_list_size) {
5557 /* Select filter policy to accept all advertising */
5558 return 0x00;
5561 if (hci_find_irk_by_addr(hdev, &params->addr,
5562 params->addr_type)) {
5563 /* White list can not be used with RPAs */
5564 return 0x00;
5567 white_list_entries++;
5568 add_to_white_list(req, params);
5571 /* Select filter policy to use white list */
5572 return 0x01;
5575 void hci_req_add_le_passive_scan(struct hci_request *req)
5577 struct hci_cp_le_set_scan_param param_cp;
5578 struct hci_cp_le_set_scan_enable enable_cp;
5579 struct hci_dev *hdev = req->hdev;
5580 u8 own_addr_type;
5581 u8 filter_policy;
5583 /* Set require_privacy to false since no SCAN_REQ are send
5584 * during passive scanning. Not using an unresolvable address
5585 * here is important so that peer devices using direct
5586 * advertising with our address will be correctly reported
5587 * by the controller.
5589 if (hci_update_random_address(req, false, &own_addr_type))
5590 return;
5592 /* Adding or removing entries from the white list must
5593 * happen before enabling scanning. The controller does
5594 * not allow white list modification while scanning.
5596 filter_policy = update_white_list(req);
5598 memset(&param_cp, 0, sizeof(param_cp));
5599 param_cp.type = LE_SCAN_PASSIVE;
5600 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5601 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5602 param_cp.own_address_type = own_addr_type;
5603 param_cp.filter_policy = filter_policy;
5604 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5605 &param_cp);
5607 memset(&enable_cp, 0, sizeof(enable_cp));
5608 enable_cp.enable = LE_SCAN_ENABLE;
5609 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5610 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5611 &enable_cp);
5614 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5616 if (status)
5617 BT_DBG("HCI request failed to update background scanning: "
5618 "status 0x%2.2x", status);
5621 /* This function controls the background scanning based on hdev->pend_le_conns
5622 * list. If there are pending LE connection we start the background scanning,
5623 * otherwise we stop it.
5625 * This function requires the caller holds hdev->lock.
5627 void hci_update_background_scan(struct hci_dev *hdev)
5629 struct hci_request req;
5630 struct hci_conn *conn;
5631 int err;
5633 if (!test_bit(HCI_UP, &hdev->flags) ||
5634 test_bit(HCI_INIT, &hdev->flags) ||
5635 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5636 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5637 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5638 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5639 return;
5641 /* No point in doing scanning if LE support hasn't been enabled */
5642 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5643 return;
5645 /* If discovery is active don't interfere with it */
5646 if (hdev->discovery.state != DISCOVERY_STOPPED)
5647 return;
5649 hci_req_init(&req, hdev);
5651 if (list_empty(&hdev->pend_le_conns) &&
5652 list_empty(&hdev->pend_le_reports)) {
5653 /* If there is no pending LE connections or devices
5654 * to be scanned for, we should stop the background
5655 * scanning.
5658 /* If controller is not scanning we are done. */
5659 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5660 return;
5662 hci_req_add_le_scan_disable(&req);
5664 BT_DBG("%s stopping background scanning", hdev->name);
5665 } else {
5666 /* If there is at least one pending LE connection, we should
5667 * keep the background scan running.
5670 /* If controller is connecting, we should not start scanning
5671 * since some controllers are not able to scan and connect at
5672 * the same time.
5674 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5675 if (conn)
5676 return;
5678 /* If controller is currently scanning, we stop it to ensure we
5679 * don't miss any advertising (due to duplicates filter).
5681 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5682 hci_req_add_le_scan_disable(&req);
5684 hci_req_add_le_passive_scan(&req);
5686 BT_DBG("%s starting background scanning", hdev->name);
5689 err = hci_req_run(&req, update_background_scan_complete);
5690 if (err)
5691 BT_ERR("Failed to run HCI request: err %d", err);