usb: phy: rcar-gen2-usb: always use 'dev' variable in probe() method
[linux-2.6/btrfs-unstable.git] / net / bluetooth / hci_core.c
blob5e8663c194c18e3c29ab221efe667dd311944294
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
41 /* HCI device list */
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida);
52 /* ---- HCI notifications ---- */
54 static void hci_notify(struct hci_dev *hdev, int event)
56 hci_sock_dev_event(hdev, event);
59 /* ---- HCI debugfs entries ---- */
61 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
73 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
111 if (err < 0)
112 return err;
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
116 return count;
119 static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
126 static int features_show(struct seq_file *f, void *ptr)
128 struct hci_dev *hdev = f->private;
129 u8 p;
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
147 hci_dev_unlock(hdev);
149 return 0;
152 static int features_open(struct inode *inode, struct file *file)
154 return single_open(file, features_show, inode->i_private);
157 static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
164 static int blacklist_show(struct seq_file *f, void *p)
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
172 hci_dev_unlock(hdev);
174 return 0;
177 static int blacklist_open(struct inode *inode, struct file *file)
179 return single_open(file, blacklist_show, inode->i_private);
182 static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
189 static int uuids_show(struct seq_file *f, void *p)
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
196 u8 i, val[16];
198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
205 seq_printf(f, "%pUb\n", val);
207 hci_dev_unlock(hdev);
209 return 0;
212 static int uuids_open(struct inode *inode, struct file *file)
214 return single_open(file, uuids_show, inode->i_private);
217 static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
224 static int inquiry_cache_show(struct seq_file *f, void *p)
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
230 hci_dev_lock(hdev);
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
243 hci_dev_unlock(hdev);
245 return 0;
248 static int inquiry_cache_open(struct inode *inode, struct file *file)
250 return single_open(file, inquiry_cache_show, inode->i_private);
253 static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
260 static int link_keys_show(struct seq_file *f, void *ptr)
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
271 hci_dev_unlock(hdev);
273 return 0;
276 static int link_keys_open(struct inode *inode, struct file *file)
278 return single_open(file, link_keys_show, inode->i_private);
281 static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
288 static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
291 struct hci_dev *hdev = file->private_data;
292 char buf[3];
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 buf[1] = '\n';
296 buf[2] = '\0';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
300 static const struct file_operations use_debug_keys_fops = {
301 .open = simple_open,
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
306 static int dev_class_show(struct seq_file *f, void *ptr)
308 struct hci_dev *hdev = f->private;
310 hci_dev_lock(hdev);
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
315 return 0;
318 static int dev_class_open(struct inode *inode, struct file *file)
320 return single_open(file, dev_class_show, inode->i_private);
323 static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
330 static int voice_setting_get(void *data, u64 *val)
332 struct hci_dev *hdev = data;
334 hci_dev_lock(hdev);
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
338 return 0;
341 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
344 static int auto_accept_delay_set(void *data, u64 val)
346 struct hci_dev *hdev = data;
348 hci_dev_lock(hdev);
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
352 return 0;
355 static int auto_accept_delay_get(void *data, u64 *val)
357 struct hci_dev *hdev = data;
359 hci_dev_lock(hdev);
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
363 return 0;
366 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
369 static int ssp_debug_mode_set(void *data, u64 val)
371 struct hci_dev *hdev = data;
372 struct sk_buff *skb;
373 __u8 mode;
374 int err;
376 if (val != 0 && val != 1)
377 return -EINVAL;
379 if (!test_bit(HCI_UP, &hdev->flags))
380 return -ENETDOWN;
382 hci_req_lock(hdev);
383 mode = val;
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
391 err = -bt_to_errno(skb->data[0]);
392 kfree_skb(skb);
394 if (err < 0)
395 return err;
397 hci_dev_lock(hdev);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
401 return 0;
404 static int ssp_debug_mode_get(void *data, u64 *val)
406 struct hci_dev *hdev = data;
408 hci_dev_lock(hdev);
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
412 return 0;
415 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
418 static int idle_timeout_set(void *data, u64 val)
420 struct hci_dev *hdev = data;
422 if (val != 0 && (val < 500 || val > 3600000))
423 return -EINVAL;
425 hci_dev_lock(hdev);
426 hdev->idle_timeout = val;
427 hci_dev_unlock(hdev);
429 return 0;
432 static int idle_timeout_get(void *data, u64 *val)
434 struct hci_dev *hdev = data;
436 hci_dev_lock(hdev);
437 *val = hdev->idle_timeout;
438 hci_dev_unlock(hdev);
440 return 0;
443 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444 idle_timeout_set, "%llu\n");
446 static int sniff_min_interval_set(void *data, u64 val)
448 struct hci_dev *hdev = data;
450 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
451 return -EINVAL;
453 hci_dev_lock(hdev);
454 hdev->sniff_min_interval = val;
455 hci_dev_unlock(hdev);
457 return 0;
460 static int sniff_min_interval_get(void *data, u64 *val)
462 struct hci_dev *hdev = data;
464 hci_dev_lock(hdev);
465 *val = hdev->sniff_min_interval;
466 hci_dev_unlock(hdev);
468 return 0;
471 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472 sniff_min_interval_set, "%llu\n");
474 static int sniff_max_interval_set(void *data, u64 val)
476 struct hci_dev *hdev = data;
478 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
479 return -EINVAL;
481 hci_dev_lock(hdev);
482 hdev->sniff_max_interval = val;
483 hci_dev_unlock(hdev);
485 return 0;
488 static int sniff_max_interval_get(void *data, u64 *val)
490 struct hci_dev *hdev = data;
492 hci_dev_lock(hdev);
493 *val = hdev->sniff_max_interval;
494 hci_dev_unlock(hdev);
496 return 0;
499 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500 sniff_max_interval_set, "%llu\n");
502 static int static_address_show(struct seq_file *f, void *p)
504 struct hci_dev *hdev = f->private;
506 hci_dev_lock(hdev);
507 seq_printf(f, "%pMR\n", &hdev->static_addr);
508 hci_dev_unlock(hdev);
510 return 0;
513 static int static_address_open(struct inode *inode, struct file *file)
515 return single_open(file, static_address_show, inode->i_private);
518 static const struct file_operations static_address_fops = {
519 .open = static_address_open,
520 .read = seq_read,
521 .llseek = seq_lseek,
522 .release = single_release,
525 static int own_address_type_set(void *data, u64 val)
527 struct hci_dev *hdev = data;
529 if (val != 0 && val != 1)
530 return -EINVAL;
532 hci_dev_lock(hdev);
533 hdev->own_addr_type = val;
534 hci_dev_unlock(hdev);
536 return 0;
539 static int own_address_type_get(void *data, u64 *val)
541 struct hci_dev *hdev = data;
543 hci_dev_lock(hdev);
544 *val = hdev->own_addr_type;
545 hci_dev_unlock(hdev);
547 return 0;
550 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551 own_address_type_set, "%llu\n");
553 static int long_term_keys_show(struct seq_file *f, void *ptr)
555 struct hci_dev *hdev = f->private;
556 struct list_head *p, *n;
558 hci_dev_lock(hdev);
559 list_for_each_safe(p, n, &hdev->link_keys) {
560 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564 8, ltk->rand, 16, ltk->val);
566 hci_dev_unlock(hdev);
568 return 0;
571 static int long_term_keys_open(struct inode *inode, struct file *file)
573 return single_open(file, long_term_keys_show, inode->i_private);
576 static const struct file_operations long_term_keys_fops = {
577 .open = long_term_keys_open,
578 .read = seq_read,
579 .llseek = seq_lseek,
580 .release = single_release,
583 static int conn_min_interval_set(void *data, u64 val)
585 struct hci_dev *hdev = data;
587 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
588 return -EINVAL;
590 hci_dev_lock(hdev);
591 hdev->le_conn_min_interval = val;
592 hci_dev_unlock(hdev);
594 return 0;
597 static int conn_min_interval_get(void *data, u64 *val)
599 struct hci_dev *hdev = data;
601 hci_dev_lock(hdev);
602 *val = hdev->le_conn_min_interval;
603 hci_dev_unlock(hdev);
605 return 0;
608 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609 conn_min_interval_set, "%llu\n");
611 static int conn_max_interval_set(void *data, u64 val)
613 struct hci_dev *hdev = data;
615 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
616 return -EINVAL;
618 hci_dev_lock(hdev);
619 hdev->le_conn_max_interval = val;
620 hci_dev_unlock(hdev);
622 return 0;
625 static int conn_max_interval_get(void *data, u64 *val)
627 struct hci_dev *hdev = data;
629 hci_dev_lock(hdev);
630 *val = hdev->le_conn_max_interval;
631 hci_dev_unlock(hdev);
633 return 0;
636 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 conn_max_interval_set, "%llu\n");
639 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
640 size_t count, loff_t *ppos)
642 struct hci_dev *hdev = file->private_data;
643 char buf[3];
645 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
646 buf[1] = '\n';
647 buf[2] = '\0';
648 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
651 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
652 size_t count, loff_t *position)
654 struct hci_dev *hdev = fp->private_data;
655 bool enable;
656 char buf[32];
657 size_t buf_size = min(count, (sizeof(buf)-1));
659 if (copy_from_user(buf, user_buffer, buf_size))
660 return -EFAULT;
662 buf[buf_size] = '\0';
664 if (strtobool(buf, &enable) < 0)
665 return -EINVAL;
667 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
668 return -EALREADY;
670 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
672 return count;
675 static const struct file_operations lowpan_debugfs_fops = {
676 .open = simple_open,
677 .read = lowpan_read,
678 .write = lowpan_write,
679 .llseek = default_llseek,
682 /* ---- HCI requests ---- */
684 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
686 BT_DBG("%s result 0x%2.2x", hdev->name, result);
688 if (hdev->req_status == HCI_REQ_PEND) {
689 hdev->req_result = result;
690 hdev->req_status = HCI_REQ_DONE;
691 wake_up_interruptible(&hdev->req_wait_q);
695 static void hci_req_cancel(struct hci_dev *hdev, int err)
697 BT_DBG("%s err 0x%2.2x", hdev->name, err);
699 if (hdev->req_status == HCI_REQ_PEND) {
700 hdev->req_result = err;
701 hdev->req_status = HCI_REQ_CANCELED;
702 wake_up_interruptible(&hdev->req_wait_q);
706 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
707 u8 event)
709 struct hci_ev_cmd_complete *ev;
710 struct hci_event_hdr *hdr;
711 struct sk_buff *skb;
713 hci_dev_lock(hdev);
715 skb = hdev->recv_evt;
716 hdev->recv_evt = NULL;
718 hci_dev_unlock(hdev);
720 if (!skb)
721 return ERR_PTR(-ENODATA);
723 if (skb->len < sizeof(*hdr)) {
724 BT_ERR("Too short HCI event");
725 goto failed;
728 hdr = (void *) skb->data;
729 skb_pull(skb, HCI_EVENT_HDR_SIZE);
731 if (event) {
732 if (hdr->evt != event)
733 goto failed;
734 return skb;
737 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
738 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
739 goto failed;
742 if (skb->len < sizeof(*ev)) {
743 BT_ERR("Too short cmd_complete event");
744 goto failed;
747 ev = (void *) skb->data;
748 skb_pull(skb, sizeof(*ev));
750 if (opcode == __le16_to_cpu(ev->opcode))
751 return skb;
753 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
754 __le16_to_cpu(ev->opcode));
756 failed:
757 kfree_skb(skb);
758 return ERR_PTR(-ENODATA);
761 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
762 const void *param, u8 event, u32 timeout)
764 DECLARE_WAITQUEUE(wait, current);
765 struct hci_request req;
766 int err = 0;
768 BT_DBG("%s", hdev->name);
770 hci_req_init(&req, hdev);
772 hci_req_add_ev(&req, opcode, plen, param, event);
774 hdev->req_status = HCI_REQ_PEND;
776 err = hci_req_run(&req, hci_req_sync_complete);
777 if (err < 0)
778 return ERR_PTR(err);
780 add_wait_queue(&hdev->req_wait_q, &wait);
781 set_current_state(TASK_INTERRUPTIBLE);
783 schedule_timeout(timeout);
785 remove_wait_queue(&hdev->req_wait_q, &wait);
787 if (signal_pending(current))
788 return ERR_PTR(-EINTR);
790 switch (hdev->req_status) {
791 case HCI_REQ_DONE:
792 err = -bt_to_errno(hdev->req_result);
793 break;
795 case HCI_REQ_CANCELED:
796 err = -hdev->req_result;
797 break;
799 default:
800 err = -ETIMEDOUT;
801 break;
804 hdev->req_status = hdev->req_result = 0;
806 BT_DBG("%s end: err %d", hdev->name, err);
808 if (err < 0)
809 return ERR_PTR(err);
811 return hci_get_cmd_complete(hdev, opcode, event);
813 EXPORT_SYMBOL(__hci_cmd_sync_ev);
815 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
816 const void *param, u32 timeout)
818 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
820 EXPORT_SYMBOL(__hci_cmd_sync);
822 /* Execute request and wait for completion. */
823 static int __hci_req_sync(struct hci_dev *hdev,
824 void (*func)(struct hci_request *req,
825 unsigned long opt),
826 unsigned long opt, __u32 timeout)
828 struct hci_request req;
829 DECLARE_WAITQUEUE(wait, current);
830 int err = 0;
832 BT_DBG("%s start", hdev->name);
834 hci_req_init(&req, hdev);
836 hdev->req_status = HCI_REQ_PEND;
838 func(&req, opt);
840 err = hci_req_run(&req, hci_req_sync_complete);
841 if (err < 0) {
842 hdev->req_status = 0;
844 /* ENODATA means the HCI request command queue is empty.
845 * This can happen when a request with conditionals doesn't
846 * trigger any commands to be sent. This is normal behavior
847 * and should not trigger an error return.
849 if (err == -ENODATA)
850 return 0;
852 return err;
855 add_wait_queue(&hdev->req_wait_q, &wait);
856 set_current_state(TASK_INTERRUPTIBLE);
858 schedule_timeout(timeout);
860 remove_wait_queue(&hdev->req_wait_q, &wait);
862 if (signal_pending(current))
863 return -EINTR;
865 switch (hdev->req_status) {
866 case HCI_REQ_DONE:
867 err = -bt_to_errno(hdev->req_result);
868 break;
870 case HCI_REQ_CANCELED:
871 err = -hdev->req_result;
872 break;
874 default:
875 err = -ETIMEDOUT;
876 break;
879 hdev->req_status = hdev->req_result = 0;
881 BT_DBG("%s end: err %d", hdev->name, err);
883 return err;
886 static int hci_req_sync(struct hci_dev *hdev,
887 void (*req)(struct hci_request *req,
888 unsigned long opt),
889 unsigned long opt, __u32 timeout)
891 int ret;
893 if (!test_bit(HCI_UP, &hdev->flags))
894 return -ENETDOWN;
896 /* Serialize all requests */
897 hci_req_lock(hdev);
898 ret = __hci_req_sync(hdev, req, opt, timeout);
899 hci_req_unlock(hdev);
901 return ret;
904 static void hci_reset_req(struct hci_request *req, unsigned long opt)
906 BT_DBG("%s %ld", req->hdev->name, opt);
908 /* Reset device */
909 set_bit(HCI_RESET, &req->hdev->flags);
910 hci_req_add(req, HCI_OP_RESET, 0, NULL);
913 static void bredr_init(struct hci_request *req)
915 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
917 /* Read Local Supported Features */
918 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
920 /* Read Local Version */
921 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
923 /* Read BD Address */
924 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
927 static void amp_init(struct hci_request *req)
929 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
931 /* Read Local Version */
932 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
934 /* Read Local Supported Commands */
935 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
937 /* Read Local Supported Features */
938 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
940 /* Read Local AMP Info */
941 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
943 /* Read Data Blk size */
944 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
946 /* Read Flow Control Mode */
947 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
949 /* Read Location Data */
950 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
953 static void hci_init1_req(struct hci_request *req, unsigned long opt)
955 struct hci_dev *hdev = req->hdev;
957 BT_DBG("%s %ld", hdev->name, opt);
959 /* Reset */
960 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
961 hci_reset_req(req, 0);
963 switch (hdev->dev_type) {
964 case HCI_BREDR:
965 bredr_init(req);
966 break;
968 case HCI_AMP:
969 amp_init(req);
970 break;
972 default:
973 BT_ERR("Unknown device type %d", hdev->dev_type);
974 break;
978 static void bredr_setup(struct hci_request *req)
980 struct hci_dev *hdev = req->hdev;
982 __le16 param;
983 __u8 flt_type;
985 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
986 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
988 /* Read Class of Device */
989 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
991 /* Read Local Name */
992 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
994 /* Read Voice Setting */
995 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
997 /* Read Number of Supported IAC */
998 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1000 /* Read Current IAC LAP */
1001 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1003 /* Clear Event Filters */
1004 flt_type = HCI_FLT_CLEAR_ALL;
1005 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1007 /* Connection accept timeout ~20 secs */
1008 param = __constant_cpu_to_le16(0x7d00);
1009 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1011 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1012 * but it does not support page scan related HCI commands.
1014 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1015 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1016 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1020 static void le_setup(struct hci_request *req)
1022 struct hci_dev *hdev = req->hdev;
1024 /* Read LE Buffer Size */
1025 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1027 /* Read LE Local Supported Features */
1028 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1030 /* Read LE Advertising Channel TX Power */
1031 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1033 /* Read LE White List Size */
1034 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1036 /* Read LE Supported States */
1037 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1039 /* LE-only controllers have LE implicitly enabled */
1040 if (!lmp_bredr_capable(hdev))
1041 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1044 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1046 if (lmp_ext_inq_capable(hdev))
1047 return 0x02;
1049 if (lmp_inq_rssi_capable(hdev))
1050 return 0x01;
1052 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1053 hdev->lmp_subver == 0x0757)
1054 return 0x01;
1056 if (hdev->manufacturer == 15) {
1057 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1058 return 0x01;
1059 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1060 return 0x01;
1061 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1062 return 0x01;
1065 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1066 hdev->lmp_subver == 0x1805)
1067 return 0x01;
1069 return 0x00;
1072 static void hci_setup_inquiry_mode(struct hci_request *req)
1074 u8 mode;
1076 mode = hci_get_inquiry_mode(req->hdev);
1078 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1081 static void hci_setup_event_mask(struct hci_request *req)
1083 struct hci_dev *hdev = req->hdev;
1085 /* The second byte is 0xff instead of 0x9f (two reserved bits
1086 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1087 * command otherwise.
1089 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1091 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1092 * any event mask for pre 1.2 devices.
1094 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1095 return;
1097 if (lmp_bredr_capable(hdev)) {
1098 events[4] |= 0x01; /* Flow Specification Complete */
1099 events[4] |= 0x02; /* Inquiry Result with RSSI */
1100 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1101 events[5] |= 0x08; /* Synchronous Connection Complete */
1102 events[5] |= 0x10; /* Synchronous Connection Changed */
1103 } else {
1104 /* Use a different default for LE-only devices */
1105 memset(events, 0, sizeof(events));
1106 events[0] |= 0x10; /* Disconnection Complete */
1107 events[0] |= 0x80; /* Encryption Change */
1108 events[1] |= 0x08; /* Read Remote Version Information Complete */
1109 events[1] |= 0x20; /* Command Complete */
1110 events[1] |= 0x40; /* Command Status */
1111 events[1] |= 0x80; /* Hardware Error */
1112 events[2] |= 0x04; /* Number of Completed Packets */
1113 events[3] |= 0x02; /* Data Buffer Overflow */
1114 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1117 if (lmp_inq_rssi_capable(hdev))
1118 events[4] |= 0x02; /* Inquiry Result with RSSI */
1120 if (lmp_sniffsubr_capable(hdev))
1121 events[5] |= 0x20; /* Sniff Subrating */
1123 if (lmp_pause_enc_capable(hdev))
1124 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1126 if (lmp_ext_inq_capable(hdev))
1127 events[5] |= 0x40; /* Extended Inquiry Result */
1129 if (lmp_no_flush_capable(hdev))
1130 events[7] |= 0x01; /* Enhanced Flush Complete */
1132 if (lmp_lsto_capable(hdev))
1133 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1135 if (lmp_ssp_capable(hdev)) {
1136 events[6] |= 0x01; /* IO Capability Request */
1137 events[6] |= 0x02; /* IO Capability Response */
1138 events[6] |= 0x04; /* User Confirmation Request */
1139 events[6] |= 0x08; /* User Passkey Request */
1140 events[6] |= 0x10; /* Remote OOB Data Request */
1141 events[6] |= 0x20; /* Simple Pairing Complete */
1142 events[7] |= 0x04; /* User Passkey Notification */
1143 events[7] |= 0x08; /* Keypress Notification */
1144 events[7] |= 0x10; /* Remote Host Supported
1145 * Features Notification
1149 if (lmp_le_capable(hdev))
1150 events[7] |= 0x20; /* LE Meta-Event */
1152 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1154 if (lmp_le_capable(hdev)) {
1155 memset(events, 0, sizeof(events));
1156 events[0] = 0x1f;
1157 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1158 sizeof(events), events);
1162 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1164 struct hci_dev *hdev = req->hdev;
1166 if (lmp_bredr_capable(hdev))
1167 bredr_setup(req);
1168 else
1169 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1171 if (lmp_le_capable(hdev))
1172 le_setup(req);
1174 hci_setup_event_mask(req);
1176 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1177 * local supported commands HCI command.
1179 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1180 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1182 if (lmp_ssp_capable(hdev)) {
1183 /* When SSP is available, then the host features page
1184 * should also be available as well. However some
1185 * controllers list the max_page as 0 as long as SSP
1186 * has not been enabled. To achieve proper debugging
1187 * output, force the minimum max_page to 1 at least.
1189 hdev->max_page = 0x01;
1191 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1192 u8 mode = 0x01;
1193 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1194 sizeof(mode), &mode);
1195 } else {
1196 struct hci_cp_write_eir cp;
1198 memset(hdev->eir, 0, sizeof(hdev->eir));
1199 memset(&cp, 0, sizeof(cp));
1201 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1205 if (lmp_inq_rssi_capable(hdev))
1206 hci_setup_inquiry_mode(req);
1208 if (lmp_inq_tx_pwr_capable(hdev))
1209 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1211 if (lmp_ext_feat_capable(hdev)) {
1212 struct hci_cp_read_local_ext_features cp;
1214 cp.page = 0x01;
1215 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1216 sizeof(cp), &cp);
1219 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1220 u8 enable = 1;
1221 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1222 &enable);
1226 static void hci_setup_link_policy(struct hci_request *req)
1228 struct hci_dev *hdev = req->hdev;
1229 struct hci_cp_write_def_link_policy cp;
1230 u16 link_policy = 0;
1232 if (lmp_rswitch_capable(hdev))
1233 link_policy |= HCI_LP_RSWITCH;
1234 if (lmp_hold_capable(hdev))
1235 link_policy |= HCI_LP_HOLD;
1236 if (lmp_sniff_capable(hdev))
1237 link_policy |= HCI_LP_SNIFF;
1238 if (lmp_park_capable(hdev))
1239 link_policy |= HCI_LP_PARK;
1241 cp.policy = cpu_to_le16(link_policy);
1242 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1245 static void hci_set_le_support(struct hci_request *req)
1247 struct hci_dev *hdev = req->hdev;
1248 struct hci_cp_write_le_host_supported cp;
1250 /* LE-only devices do not support explicit enablement */
1251 if (!lmp_bredr_capable(hdev))
1252 return;
1254 memset(&cp, 0, sizeof(cp));
1256 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1257 cp.le = 0x01;
1258 cp.simul = lmp_le_br_capable(hdev);
1261 if (cp.le != lmp_host_le_capable(hdev))
1262 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1263 &cp);
1266 static void hci_set_event_mask_page_2(struct hci_request *req)
1268 struct hci_dev *hdev = req->hdev;
1269 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1271 /* If Connectionless Slave Broadcast master role is supported
1272 * enable all necessary events for it.
1274 if (lmp_csb_master_capable(hdev)) {
1275 events[1] |= 0x40; /* Triggered Clock Capture */
1276 events[1] |= 0x80; /* Synchronization Train Complete */
1277 events[2] |= 0x10; /* Slave Page Response Timeout */
1278 events[2] |= 0x20; /* CSB Channel Map Change */
1281 /* If Connectionless Slave Broadcast slave role is supported
1282 * enable all necessary events for it.
1284 if (lmp_csb_slave_capable(hdev)) {
1285 events[2] |= 0x01; /* Synchronization Train Received */
1286 events[2] |= 0x02; /* CSB Receive */
1287 events[2] |= 0x04; /* CSB Timeout */
1288 events[2] |= 0x08; /* Truncated Page Complete */
1291 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1294 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1296 struct hci_dev *hdev = req->hdev;
1297 u8 p;
1299 /* Some Broadcom based Bluetooth controllers do not support the
1300 * Delete Stored Link Key command. They are clearly indicating its
1301 * absence in the bit mask of supported commands.
1303 * Check the supported commands and only if the the command is marked
1304 * as supported send it. If not supported assume that the controller
1305 * does not have actual support for stored link keys which makes this
1306 * command redundant anyway.
1308 * Some controllers indicate that they support handling deleting
1309 * stored link keys, but they don't. The quirk lets a driver
1310 * just disable this command.
1312 if (hdev->commands[6] & 0x80 &&
1313 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1314 struct hci_cp_delete_stored_link_key cp;
1316 bacpy(&cp.bdaddr, BDADDR_ANY);
1317 cp.delete_all = 0x01;
1318 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1319 sizeof(cp), &cp);
1322 if (hdev->commands[5] & 0x10)
1323 hci_setup_link_policy(req);
1325 if (lmp_le_capable(hdev)) {
1326 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1327 /* If the controller has a public BD_ADDR, then
1328 * by default use that one. If this is a LE only
1329 * controller without a public address, default
1330 * to the random address.
1332 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1333 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1334 else
1335 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1338 hci_set_le_support(req);
1341 /* Read features beyond page 1 if available */
1342 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1343 struct hci_cp_read_local_ext_features cp;
1345 cp.page = p;
1346 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1347 sizeof(cp), &cp);
1351 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1353 struct hci_dev *hdev = req->hdev;
1355 /* Set event mask page 2 if the HCI command for it is supported */
1356 if (hdev->commands[22] & 0x04)
1357 hci_set_event_mask_page_2(req);
1359 /* Check for Synchronization Train support */
1360 if (lmp_sync_train_capable(hdev))
1361 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1364 static int __hci_init(struct hci_dev *hdev)
1366 int err;
1368 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1369 if (err < 0)
1370 return err;
1372 /* The Device Under Test (DUT) mode is special and available for
1373 * all controller types. So just create it early on.
1375 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1376 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1377 &dut_mode_fops);
1380 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1381 * BR/EDR/LE type controllers. AMP controllers only need the
1382 * first stage init.
1384 if (hdev->dev_type != HCI_BREDR)
1385 return 0;
1387 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1388 if (err < 0)
1389 return err;
1391 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1392 if (err < 0)
1393 return err;
1395 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1396 if (err < 0)
1397 return err;
1399 /* Only create debugfs entries during the initial setup
1400 * phase and not every time the controller gets powered on.
1402 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1403 return 0;
1405 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1406 &features_fops);
1407 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1408 &hdev->manufacturer);
1409 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1410 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1411 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1412 &blacklist_fops);
1413 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1415 if (lmp_bredr_capable(hdev)) {
1416 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1417 hdev, &inquiry_cache_fops);
1418 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1419 hdev, &link_keys_fops);
1420 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1421 hdev, &use_debug_keys_fops);
1422 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1423 hdev, &dev_class_fops);
1424 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1425 hdev, &voice_setting_fops);
1428 if (lmp_ssp_capable(hdev)) {
1429 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1430 hdev, &auto_accept_delay_fops);
1431 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1432 hdev, &ssp_debug_mode_fops);
1435 if (lmp_sniff_capable(hdev)) {
1436 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1437 hdev, &idle_timeout_fops);
1438 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1439 hdev, &sniff_min_interval_fops);
1440 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1441 hdev, &sniff_max_interval_fops);
1444 if (lmp_le_capable(hdev)) {
1445 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1446 &hdev->le_white_list_size);
1447 debugfs_create_file("static_address", 0444, hdev->debugfs,
1448 hdev, &static_address_fops);
1449 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1450 hdev, &own_address_type_fops);
1451 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1452 hdev, &long_term_keys_fops);
1453 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1454 hdev, &conn_min_interval_fops);
1455 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1456 hdev, &conn_max_interval_fops);
1457 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1458 &lowpan_debugfs_fops);
1461 return 0;
1464 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1466 __u8 scan = opt;
1468 BT_DBG("%s %x", req->hdev->name, scan);
1470 /* Inquiry and Page scans */
1471 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1474 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1476 __u8 auth = opt;
1478 BT_DBG("%s %x", req->hdev->name, auth);
1480 /* Authentication */
1481 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1484 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1486 __u8 encrypt = opt;
1488 BT_DBG("%s %x", req->hdev->name, encrypt);
1490 /* Encryption */
1491 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1494 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1496 __le16 policy = cpu_to_le16(opt);
1498 BT_DBG("%s %x", req->hdev->name, policy);
1500 /* Default link policy */
1501 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1504 /* Get HCI device by index.
1505 * Device is held on return. */
1506 struct hci_dev *hci_dev_get(int index)
1508 struct hci_dev *hdev = NULL, *d;
1510 BT_DBG("%d", index);
1512 if (index < 0)
1513 return NULL;
1515 read_lock(&hci_dev_list_lock);
1516 list_for_each_entry(d, &hci_dev_list, list) {
1517 if (d->id == index) {
1518 hdev = hci_dev_hold(d);
1519 break;
1522 read_unlock(&hci_dev_list_lock);
1523 return hdev;
1526 /* ---- Inquiry support ---- */
1528 bool hci_discovery_active(struct hci_dev *hdev)
1530 struct discovery_state *discov = &hdev->discovery;
1532 switch (discov->state) {
1533 case DISCOVERY_FINDING:
1534 case DISCOVERY_RESOLVING:
1535 return true;
1537 default:
1538 return false;
1542 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1544 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1546 if (hdev->discovery.state == state)
1547 return;
1549 switch (state) {
1550 case DISCOVERY_STOPPED:
1551 if (hdev->discovery.state != DISCOVERY_STARTING)
1552 mgmt_discovering(hdev, 0);
1553 break;
1554 case DISCOVERY_STARTING:
1555 break;
1556 case DISCOVERY_FINDING:
1557 mgmt_discovering(hdev, 1);
1558 break;
1559 case DISCOVERY_RESOLVING:
1560 break;
1561 case DISCOVERY_STOPPING:
1562 break;
1565 hdev->discovery.state = state;
1568 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1570 struct discovery_state *cache = &hdev->discovery;
1571 struct inquiry_entry *p, *n;
1573 list_for_each_entry_safe(p, n, &cache->all, all) {
1574 list_del(&p->all);
1575 kfree(p);
1578 INIT_LIST_HEAD(&cache->unknown);
1579 INIT_LIST_HEAD(&cache->resolve);
1582 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1583 bdaddr_t *bdaddr)
1585 struct discovery_state *cache = &hdev->discovery;
1586 struct inquiry_entry *e;
1588 BT_DBG("cache %p, %pMR", cache, bdaddr);
1590 list_for_each_entry(e, &cache->all, all) {
1591 if (!bacmp(&e->data.bdaddr, bdaddr))
1592 return e;
1595 return NULL;
1598 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1599 bdaddr_t *bdaddr)
1601 struct discovery_state *cache = &hdev->discovery;
1602 struct inquiry_entry *e;
1604 BT_DBG("cache %p, %pMR", cache, bdaddr);
1606 list_for_each_entry(e, &cache->unknown, list) {
1607 if (!bacmp(&e->data.bdaddr, bdaddr))
1608 return e;
1611 return NULL;
1614 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1615 bdaddr_t *bdaddr,
1616 int state)
1618 struct discovery_state *cache = &hdev->discovery;
1619 struct inquiry_entry *e;
1621 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1623 list_for_each_entry(e, &cache->resolve, list) {
1624 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1625 return e;
1626 if (!bacmp(&e->data.bdaddr, bdaddr))
1627 return e;
1630 return NULL;
1633 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1634 struct inquiry_entry *ie)
1636 struct discovery_state *cache = &hdev->discovery;
1637 struct list_head *pos = &cache->resolve;
1638 struct inquiry_entry *p;
1640 list_del(&ie->list);
1642 list_for_each_entry(p, &cache->resolve, list) {
1643 if (p->name_state != NAME_PENDING &&
1644 abs(p->data.rssi) >= abs(ie->data.rssi))
1645 break;
1646 pos = &p->list;
1649 list_add(&ie->list, pos);
1652 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1653 bool name_known, bool *ssp)
1655 struct discovery_state *cache = &hdev->discovery;
1656 struct inquiry_entry *ie;
1658 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1660 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1662 if (ssp)
1663 *ssp = data->ssp_mode;
1665 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1666 if (ie) {
1667 if (ie->data.ssp_mode && ssp)
1668 *ssp = true;
1670 if (ie->name_state == NAME_NEEDED &&
1671 data->rssi != ie->data.rssi) {
1672 ie->data.rssi = data->rssi;
1673 hci_inquiry_cache_update_resolve(hdev, ie);
1676 goto update;
1679 /* Entry not in the cache. Add new one. */
1680 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1681 if (!ie)
1682 return false;
1684 list_add(&ie->all, &cache->all);
1686 if (name_known) {
1687 ie->name_state = NAME_KNOWN;
1688 } else {
1689 ie->name_state = NAME_NOT_KNOWN;
1690 list_add(&ie->list, &cache->unknown);
1693 update:
1694 if (name_known && ie->name_state != NAME_KNOWN &&
1695 ie->name_state != NAME_PENDING) {
1696 ie->name_state = NAME_KNOWN;
1697 list_del(&ie->list);
1700 memcpy(&ie->data, data, sizeof(*data));
1701 ie->timestamp = jiffies;
1702 cache->timestamp = jiffies;
1704 if (ie->name_state == NAME_NOT_KNOWN)
1705 return false;
1707 return true;
1710 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1712 struct discovery_state *cache = &hdev->discovery;
1713 struct inquiry_info *info = (struct inquiry_info *) buf;
1714 struct inquiry_entry *e;
1715 int copied = 0;
1717 list_for_each_entry(e, &cache->all, all) {
1718 struct inquiry_data *data = &e->data;
1720 if (copied >= num)
1721 break;
1723 bacpy(&info->bdaddr, &data->bdaddr);
1724 info->pscan_rep_mode = data->pscan_rep_mode;
1725 info->pscan_period_mode = data->pscan_period_mode;
1726 info->pscan_mode = data->pscan_mode;
1727 memcpy(info->dev_class, data->dev_class, 3);
1728 info->clock_offset = data->clock_offset;
1730 info++;
1731 copied++;
1734 BT_DBG("cache %p, copied %d", cache, copied);
1735 return copied;
1738 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1740 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1741 struct hci_dev *hdev = req->hdev;
1742 struct hci_cp_inquiry cp;
1744 BT_DBG("%s", hdev->name);
1746 if (test_bit(HCI_INQUIRY, &hdev->flags))
1747 return;
1749 /* Start Inquiry */
1750 memcpy(&cp.lap, &ir->lap, 3);
1751 cp.length = ir->length;
1752 cp.num_rsp = ir->num_rsp;
1753 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1756 static int wait_inquiry(void *word)
1758 schedule();
1759 return signal_pending(current);
1762 int hci_inquiry(void __user *arg)
1764 __u8 __user *ptr = arg;
1765 struct hci_inquiry_req ir;
1766 struct hci_dev *hdev;
1767 int err = 0, do_inquiry = 0, max_rsp;
1768 long timeo;
1769 __u8 *buf;
1771 if (copy_from_user(&ir, ptr, sizeof(ir)))
1772 return -EFAULT;
1774 hdev = hci_dev_get(ir.dev_id);
1775 if (!hdev)
1776 return -ENODEV;
1778 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1779 err = -EBUSY;
1780 goto done;
1783 if (hdev->dev_type != HCI_BREDR) {
1784 err = -EOPNOTSUPP;
1785 goto done;
1788 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1789 err = -EOPNOTSUPP;
1790 goto done;
1793 hci_dev_lock(hdev);
1794 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1795 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1796 hci_inquiry_cache_flush(hdev);
1797 do_inquiry = 1;
1799 hci_dev_unlock(hdev);
1801 timeo = ir.length * msecs_to_jiffies(2000);
1803 if (do_inquiry) {
1804 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1805 timeo);
1806 if (err < 0)
1807 goto done;
1809 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1810 * cleared). If it is interrupted by a signal, return -EINTR.
1812 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1813 TASK_INTERRUPTIBLE))
1814 return -EINTR;
1817 /* for unlimited number of responses we will use buffer with
1818 * 255 entries
1820 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1822 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1823 * copy it to the user space.
1825 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1826 if (!buf) {
1827 err = -ENOMEM;
1828 goto done;
1831 hci_dev_lock(hdev);
1832 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1833 hci_dev_unlock(hdev);
1835 BT_DBG("num_rsp %d", ir.num_rsp);
1837 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1838 ptr += sizeof(ir);
1839 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1840 ir.num_rsp))
1841 err = -EFAULT;
1842 } else
1843 err = -EFAULT;
1845 kfree(buf);
1847 done:
1848 hci_dev_put(hdev);
1849 return err;
1852 static int hci_dev_do_open(struct hci_dev *hdev)
1854 int ret = 0;
1856 BT_DBG("%s %p", hdev->name, hdev);
1858 hci_req_lock(hdev);
1860 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1861 ret = -ENODEV;
1862 goto done;
1865 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1866 /* Check for rfkill but allow the HCI setup stage to
1867 * proceed (which in itself doesn't cause any RF activity).
1869 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1870 ret = -ERFKILL;
1871 goto done;
1874 /* Check for valid public address or a configured static
1875 * random adddress, but let the HCI setup proceed to
1876 * be able to determine if there is a public address
1877 * or not.
1879 * This check is only valid for BR/EDR controllers
1880 * since AMP controllers do not have an address.
1882 if (hdev->dev_type == HCI_BREDR &&
1883 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1884 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1885 ret = -EADDRNOTAVAIL;
1886 goto done;
1890 if (test_bit(HCI_UP, &hdev->flags)) {
1891 ret = -EALREADY;
1892 goto done;
1895 if (hdev->open(hdev)) {
1896 ret = -EIO;
1897 goto done;
1900 atomic_set(&hdev->cmd_cnt, 1);
1901 set_bit(HCI_INIT, &hdev->flags);
1903 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1904 ret = hdev->setup(hdev);
1906 if (!ret) {
1907 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1908 set_bit(HCI_RAW, &hdev->flags);
1910 if (!test_bit(HCI_RAW, &hdev->flags) &&
1911 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1912 ret = __hci_init(hdev);
1915 clear_bit(HCI_INIT, &hdev->flags);
1917 if (!ret) {
1918 hci_dev_hold(hdev);
1919 set_bit(HCI_UP, &hdev->flags);
1920 hci_notify(hdev, HCI_DEV_UP);
1921 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1922 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1923 hdev->dev_type == HCI_BREDR) {
1924 hci_dev_lock(hdev);
1925 mgmt_powered(hdev, 1);
1926 hci_dev_unlock(hdev);
1928 } else {
1929 /* Init failed, cleanup */
1930 flush_work(&hdev->tx_work);
1931 flush_work(&hdev->cmd_work);
1932 flush_work(&hdev->rx_work);
1934 skb_queue_purge(&hdev->cmd_q);
1935 skb_queue_purge(&hdev->rx_q);
1937 if (hdev->flush)
1938 hdev->flush(hdev);
1940 if (hdev->sent_cmd) {
1941 kfree_skb(hdev->sent_cmd);
1942 hdev->sent_cmd = NULL;
1945 hdev->close(hdev);
1946 hdev->flags = 0;
1949 done:
1950 hci_req_unlock(hdev);
1951 return ret;
1954 /* ---- HCI ioctl helpers ---- */
1956 int hci_dev_open(__u16 dev)
1958 struct hci_dev *hdev;
1959 int err;
1961 hdev = hci_dev_get(dev);
1962 if (!hdev)
1963 return -ENODEV;
1965 /* We need to ensure that no other power on/off work is pending
1966 * before proceeding to call hci_dev_do_open. This is
1967 * particularly important if the setup procedure has not yet
1968 * completed.
1970 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1971 cancel_delayed_work(&hdev->power_off);
1973 /* After this call it is guaranteed that the setup procedure
1974 * has finished. This means that error conditions like RFKILL
1975 * or no valid public or static random address apply.
1977 flush_workqueue(hdev->req_workqueue);
1979 err = hci_dev_do_open(hdev);
1981 hci_dev_put(hdev);
1983 return err;
1986 static int hci_dev_do_close(struct hci_dev *hdev)
1988 BT_DBG("%s %p", hdev->name, hdev);
1990 cancel_delayed_work(&hdev->power_off);
1992 hci_req_cancel(hdev, ENODEV);
1993 hci_req_lock(hdev);
1995 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1996 del_timer_sync(&hdev->cmd_timer);
1997 hci_req_unlock(hdev);
1998 return 0;
2001 /* Flush RX and TX works */
2002 flush_work(&hdev->tx_work);
2003 flush_work(&hdev->rx_work);
2005 if (hdev->discov_timeout > 0) {
2006 cancel_delayed_work(&hdev->discov_off);
2007 hdev->discov_timeout = 0;
2008 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2009 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2012 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2013 cancel_delayed_work(&hdev->service_cache);
2015 cancel_delayed_work_sync(&hdev->le_scan_disable);
2017 hci_dev_lock(hdev);
2018 hci_inquiry_cache_flush(hdev);
2019 hci_conn_hash_flush(hdev);
2020 hci_dev_unlock(hdev);
2022 hci_notify(hdev, HCI_DEV_DOWN);
2024 if (hdev->flush)
2025 hdev->flush(hdev);
2027 /* Reset device */
2028 skb_queue_purge(&hdev->cmd_q);
2029 atomic_set(&hdev->cmd_cnt, 1);
2030 if (!test_bit(HCI_RAW, &hdev->flags) &&
2031 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2032 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2033 set_bit(HCI_INIT, &hdev->flags);
2034 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2035 clear_bit(HCI_INIT, &hdev->flags);
2038 /* flush cmd work */
2039 flush_work(&hdev->cmd_work);
2041 /* Drop queues */
2042 skb_queue_purge(&hdev->rx_q);
2043 skb_queue_purge(&hdev->cmd_q);
2044 skb_queue_purge(&hdev->raw_q);
2046 /* Drop last sent command */
2047 if (hdev->sent_cmd) {
2048 del_timer_sync(&hdev->cmd_timer);
2049 kfree_skb(hdev->sent_cmd);
2050 hdev->sent_cmd = NULL;
2053 kfree_skb(hdev->recv_evt);
2054 hdev->recv_evt = NULL;
2056 /* After this point our queues are empty
2057 * and no tasks are scheduled. */
2058 hdev->close(hdev);
2060 /* Clear flags */
2061 hdev->flags = 0;
2062 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2064 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2065 if (hdev->dev_type == HCI_BREDR) {
2066 hci_dev_lock(hdev);
2067 mgmt_powered(hdev, 0);
2068 hci_dev_unlock(hdev);
2072 /* Controller radio is available but is currently powered down */
2073 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2075 memset(hdev->eir, 0, sizeof(hdev->eir));
2076 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2078 hci_req_unlock(hdev);
2080 hci_dev_put(hdev);
2081 return 0;
2084 int hci_dev_close(__u16 dev)
2086 struct hci_dev *hdev;
2087 int err;
2089 hdev = hci_dev_get(dev);
2090 if (!hdev)
2091 return -ENODEV;
2093 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2094 err = -EBUSY;
2095 goto done;
2098 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2099 cancel_delayed_work(&hdev->power_off);
2101 err = hci_dev_do_close(hdev);
2103 done:
2104 hci_dev_put(hdev);
2105 return err;
2108 int hci_dev_reset(__u16 dev)
2110 struct hci_dev *hdev;
2111 int ret = 0;
2113 hdev = hci_dev_get(dev);
2114 if (!hdev)
2115 return -ENODEV;
2117 hci_req_lock(hdev);
2119 if (!test_bit(HCI_UP, &hdev->flags)) {
2120 ret = -ENETDOWN;
2121 goto done;
2124 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2125 ret = -EBUSY;
2126 goto done;
2129 /* Drop queues */
2130 skb_queue_purge(&hdev->rx_q);
2131 skb_queue_purge(&hdev->cmd_q);
2133 hci_dev_lock(hdev);
2134 hci_inquiry_cache_flush(hdev);
2135 hci_conn_hash_flush(hdev);
2136 hci_dev_unlock(hdev);
2138 if (hdev->flush)
2139 hdev->flush(hdev);
2141 atomic_set(&hdev->cmd_cnt, 1);
2142 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2144 if (!test_bit(HCI_RAW, &hdev->flags))
2145 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2147 done:
2148 hci_req_unlock(hdev);
2149 hci_dev_put(hdev);
2150 return ret;
2153 int hci_dev_reset_stat(__u16 dev)
2155 struct hci_dev *hdev;
2156 int ret = 0;
2158 hdev = hci_dev_get(dev);
2159 if (!hdev)
2160 return -ENODEV;
2162 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2163 ret = -EBUSY;
2164 goto done;
2167 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2169 done:
2170 hci_dev_put(hdev);
2171 return ret;
2174 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2176 struct hci_dev *hdev;
2177 struct hci_dev_req dr;
2178 int err = 0;
2180 if (copy_from_user(&dr, arg, sizeof(dr)))
2181 return -EFAULT;
2183 hdev = hci_dev_get(dr.dev_id);
2184 if (!hdev)
2185 return -ENODEV;
2187 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2188 err = -EBUSY;
2189 goto done;
2192 if (hdev->dev_type != HCI_BREDR) {
2193 err = -EOPNOTSUPP;
2194 goto done;
2197 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2198 err = -EOPNOTSUPP;
2199 goto done;
2202 switch (cmd) {
2203 case HCISETAUTH:
2204 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2205 HCI_INIT_TIMEOUT);
2206 break;
2208 case HCISETENCRYPT:
2209 if (!lmp_encrypt_capable(hdev)) {
2210 err = -EOPNOTSUPP;
2211 break;
2214 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2215 /* Auth must be enabled first */
2216 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2217 HCI_INIT_TIMEOUT);
2218 if (err)
2219 break;
2222 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2223 HCI_INIT_TIMEOUT);
2224 break;
2226 case HCISETSCAN:
2227 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2228 HCI_INIT_TIMEOUT);
2229 break;
2231 case HCISETLINKPOL:
2232 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2233 HCI_INIT_TIMEOUT);
2234 break;
2236 case HCISETLINKMODE:
2237 hdev->link_mode = ((__u16) dr.dev_opt) &
2238 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2239 break;
2241 case HCISETPTYPE:
2242 hdev->pkt_type = (__u16) dr.dev_opt;
2243 break;
2245 case HCISETACLMTU:
2246 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2247 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2248 break;
2250 case HCISETSCOMTU:
2251 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2252 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2253 break;
2255 default:
2256 err = -EINVAL;
2257 break;
2260 done:
2261 hci_dev_put(hdev);
2262 return err;
2265 int hci_get_dev_list(void __user *arg)
2267 struct hci_dev *hdev;
2268 struct hci_dev_list_req *dl;
2269 struct hci_dev_req *dr;
2270 int n = 0, size, err;
2271 __u16 dev_num;
2273 if (get_user(dev_num, (__u16 __user *) arg))
2274 return -EFAULT;
2276 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2277 return -EINVAL;
2279 size = sizeof(*dl) + dev_num * sizeof(*dr);
2281 dl = kzalloc(size, GFP_KERNEL);
2282 if (!dl)
2283 return -ENOMEM;
2285 dr = dl->dev_req;
2287 read_lock(&hci_dev_list_lock);
2288 list_for_each_entry(hdev, &hci_dev_list, list) {
2289 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2290 cancel_delayed_work(&hdev->power_off);
2292 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2293 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2295 (dr + n)->dev_id = hdev->id;
2296 (dr + n)->dev_opt = hdev->flags;
2298 if (++n >= dev_num)
2299 break;
2301 read_unlock(&hci_dev_list_lock);
2303 dl->dev_num = n;
2304 size = sizeof(*dl) + n * sizeof(*dr);
2306 err = copy_to_user(arg, dl, size);
2307 kfree(dl);
2309 return err ? -EFAULT : 0;
2312 int hci_get_dev_info(void __user *arg)
2314 struct hci_dev *hdev;
2315 struct hci_dev_info di;
2316 int err = 0;
2318 if (copy_from_user(&di, arg, sizeof(di)))
2319 return -EFAULT;
2321 hdev = hci_dev_get(di.dev_id);
2322 if (!hdev)
2323 return -ENODEV;
2325 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2326 cancel_delayed_work_sync(&hdev->power_off);
2328 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2329 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2331 strcpy(di.name, hdev->name);
2332 di.bdaddr = hdev->bdaddr;
2333 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2334 di.flags = hdev->flags;
2335 di.pkt_type = hdev->pkt_type;
2336 if (lmp_bredr_capable(hdev)) {
2337 di.acl_mtu = hdev->acl_mtu;
2338 di.acl_pkts = hdev->acl_pkts;
2339 di.sco_mtu = hdev->sco_mtu;
2340 di.sco_pkts = hdev->sco_pkts;
2341 } else {
2342 di.acl_mtu = hdev->le_mtu;
2343 di.acl_pkts = hdev->le_pkts;
2344 di.sco_mtu = 0;
2345 di.sco_pkts = 0;
2347 di.link_policy = hdev->link_policy;
2348 di.link_mode = hdev->link_mode;
2350 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2351 memcpy(&di.features, &hdev->features, sizeof(di.features));
2353 if (copy_to_user(arg, &di, sizeof(di)))
2354 err = -EFAULT;
2356 hci_dev_put(hdev);
2358 return err;
2361 /* ---- Interface to HCI drivers ---- */
2363 static int hci_rfkill_set_block(void *data, bool blocked)
2365 struct hci_dev *hdev = data;
2367 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2369 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2370 return -EBUSY;
2372 if (blocked) {
2373 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2374 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2375 hci_dev_do_close(hdev);
2376 } else {
2377 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2380 return 0;
2383 static const struct rfkill_ops hci_rfkill_ops = {
2384 .set_block = hci_rfkill_set_block,
2387 static void hci_power_on(struct work_struct *work)
2389 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2390 int err;
2392 BT_DBG("%s", hdev->name);
2394 err = hci_dev_do_open(hdev);
2395 if (err < 0) {
2396 mgmt_set_powered_failed(hdev, err);
2397 return;
2400 /* During the HCI setup phase, a few error conditions are
2401 * ignored and they need to be checked now. If they are still
2402 * valid, it is important to turn the device back off.
2404 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2405 (hdev->dev_type == HCI_BREDR &&
2406 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2407 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2408 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2409 hci_dev_do_close(hdev);
2410 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2411 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2412 HCI_AUTO_OFF_TIMEOUT);
2415 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2416 mgmt_index_added(hdev);
2419 static void hci_power_off(struct work_struct *work)
2421 struct hci_dev *hdev = container_of(work, struct hci_dev,
2422 power_off.work);
2424 BT_DBG("%s", hdev->name);
2426 hci_dev_do_close(hdev);
2429 static void hci_discov_off(struct work_struct *work)
2431 struct hci_dev *hdev;
2433 hdev = container_of(work, struct hci_dev, discov_off.work);
2435 BT_DBG("%s", hdev->name);
2437 mgmt_discoverable_timeout(hdev);
2440 int hci_uuids_clear(struct hci_dev *hdev)
2442 struct bt_uuid *uuid, *tmp;
2444 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2445 list_del(&uuid->list);
2446 kfree(uuid);
2449 return 0;
2452 int hci_link_keys_clear(struct hci_dev *hdev)
2454 struct list_head *p, *n;
2456 list_for_each_safe(p, n, &hdev->link_keys) {
2457 struct link_key *key;
2459 key = list_entry(p, struct link_key, list);
2461 list_del(p);
2462 kfree(key);
2465 return 0;
2468 int hci_smp_ltks_clear(struct hci_dev *hdev)
2470 struct smp_ltk *k, *tmp;
2472 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2473 list_del(&k->list);
2474 kfree(k);
2477 return 0;
2480 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2482 struct link_key *k;
2484 list_for_each_entry(k, &hdev->link_keys, list)
2485 if (bacmp(bdaddr, &k->bdaddr) == 0)
2486 return k;
2488 return NULL;
2491 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2492 u8 key_type, u8 old_key_type)
2494 /* Legacy key */
2495 if (key_type < 0x03)
2496 return true;
2498 /* Debug keys are insecure so don't store them persistently */
2499 if (key_type == HCI_LK_DEBUG_COMBINATION)
2500 return false;
2502 /* Changed combination key and there's no previous one */
2503 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2504 return false;
2506 /* Security mode 3 case */
2507 if (!conn)
2508 return true;
2510 /* Neither local nor remote side had no-bonding as requirement */
2511 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2512 return true;
2514 /* Local side had dedicated bonding as requirement */
2515 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2516 return true;
2518 /* Remote side had dedicated bonding as requirement */
2519 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2520 return true;
2522 /* If none of the above criteria match, then don't store the key
2523 * persistently */
2524 return false;
2527 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
2529 struct smp_ltk *k;
2531 list_for_each_entry(k, &hdev->long_term_keys, list) {
2532 if (k->ediv != ediv ||
2533 memcmp(rand, k->rand, sizeof(k->rand)))
2534 continue;
2536 return k;
2539 return NULL;
2542 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2543 u8 addr_type)
2545 struct smp_ltk *k;
2547 list_for_each_entry(k, &hdev->long_term_keys, list)
2548 if (addr_type == k->bdaddr_type &&
2549 bacmp(bdaddr, &k->bdaddr) == 0)
2550 return k;
2552 return NULL;
2555 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2556 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2558 struct link_key *key, *old_key;
2559 u8 old_key_type;
2560 bool persistent;
2562 old_key = hci_find_link_key(hdev, bdaddr);
2563 if (old_key) {
2564 old_key_type = old_key->type;
2565 key = old_key;
2566 } else {
2567 old_key_type = conn ? conn->key_type : 0xff;
2568 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2569 if (!key)
2570 return -ENOMEM;
2571 list_add(&key->list, &hdev->link_keys);
2574 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2576 /* Some buggy controller combinations generate a changed
2577 * combination key for legacy pairing even when there's no
2578 * previous key */
2579 if (type == HCI_LK_CHANGED_COMBINATION &&
2580 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2581 type = HCI_LK_COMBINATION;
2582 if (conn)
2583 conn->key_type = type;
2586 bacpy(&key->bdaddr, bdaddr);
2587 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2588 key->pin_len = pin_len;
2590 if (type == HCI_LK_CHANGED_COMBINATION)
2591 key->type = old_key_type;
2592 else
2593 key->type = type;
2595 if (!new_key)
2596 return 0;
2598 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2600 mgmt_new_link_key(hdev, key, persistent);
2602 if (conn)
2603 conn->flush_key = !persistent;
2605 return 0;
2608 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2609 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2610 ediv, u8 rand[8])
2612 struct smp_ltk *key, *old_key;
2614 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2615 return 0;
2617 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2618 if (old_key)
2619 key = old_key;
2620 else {
2621 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2622 if (!key)
2623 return -ENOMEM;
2624 list_add(&key->list, &hdev->long_term_keys);
2627 bacpy(&key->bdaddr, bdaddr);
2628 key->bdaddr_type = addr_type;
2629 memcpy(key->val, tk, sizeof(key->val));
2630 key->authenticated = authenticated;
2631 key->ediv = ediv;
2632 key->enc_size = enc_size;
2633 key->type = type;
2634 memcpy(key->rand, rand, sizeof(key->rand));
2636 if (!new_key)
2637 return 0;
2639 if (type & HCI_SMP_LTK)
2640 mgmt_new_ltk(hdev, key, 1);
2642 return 0;
2645 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2647 struct link_key *key;
2649 key = hci_find_link_key(hdev, bdaddr);
2650 if (!key)
2651 return -ENOENT;
2653 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2655 list_del(&key->list);
2656 kfree(key);
2658 return 0;
2661 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2663 struct smp_ltk *k, *tmp;
2665 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2666 if (bacmp(bdaddr, &k->bdaddr))
2667 continue;
2669 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2671 list_del(&k->list);
2672 kfree(k);
2675 return 0;
2678 /* HCI command timer function */
2679 static void hci_cmd_timeout(unsigned long arg)
2681 struct hci_dev *hdev = (void *) arg;
2683 if (hdev->sent_cmd) {
2684 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2685 u16 opcode = __le16_to_cpu(sent->opcode);
2687 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2688 } else {
2689 BT_ERR("%s command tx timeout", hdev->name);
2692 atomic_set(&hdev->cmd_cnt, 1);
2693 queue_work(hdev->workqueue, &hdev->cmd_work);
2696 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2697 bdaddr_t *bdaddr)
2699 struct oob_data *data;
2701 list_for_each_entry(data, &hdev->remote_oob_data, list)
2702 if (bacmp(bdaddr, &data->bdaddr) == 0)
2703 return data;
2705 return NULL;
2708 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2710 struct oob_data *data;
2712 data = hci_find_remote_oob_data(hdev, bdaddr);
2713 if (!data)
2714 return -ENOENT;
2716 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2718 list_del(&data->list);
2719 kfree(data);
2721 return 0;
2724 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2726 struct oob_data *data, *n;
2728 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2729 list_del(&data->list);
2730 kfree(data);
2733 return 0;
2736 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2737 u8 *randomizer)
2739 struct oob_data *data;
2741 data = hci_find_remote_oob_data(hdev, bdaddr);
2743 if (!data) {
2744 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2745 if (!data)
2746 return -ENOMEM;
2748 bacpy(&data->bdaddr, bdaddr);
2749 list_add(&data->list, &hdev->remote_oob_data);
2752 memcpy(data->hash, hash, sizeof(data->hash));
2753 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2755 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2757 return 0;
2760 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2761 bdaddr_t *bdaddr, u8 type)
2763 struct bdaddr_list *b;
2765 list_for_each_entry(b, &hdev->blacklist, list) {
2766 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2767 return b;
2770 return NULL;
2773 int hci_blacklist_clear(struct hci_dev *hdev)
2775 struct list_head *p, *n;
2777 list_for_each_safe(p, n, &hdev->blacklist) {
2778 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2780 list_del(p);
2781 kfree(b);
2784 return 0;
2787 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2789 struct bdaddr_list *entry;
2791 if (!bacmp(bdaddr, BDADDR_ANY))
2792 return -EBADF;
2794 if (hci_blacklist_lookup(hdev, bdaddr, type))
2795 return -EEXIST;
2797 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2798 if (!entry)
2799 return -ENOMEM;
2801 bacpy(&entry->bdaddr, bdaddr);
2802 entry->bdaddr_type = type;
2804 list_add(&entry->list, &hdev->blacklist);
2806 return mgmt_device_blocked(hdev, bdaddr, type);
2809 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2811 struct bdaddr_list *entry;
2813 if (!bacmp(bdaddr, BDADDR_ANY))
2814 return hci_blacklist_clear(hdev);
2816 entry = hci_blacklist_lookup(hdev, bdaddr, type);
2817 if (!entry)
2818 return -ENOENT;
2820 list_del(&entry->list);
2821 kfree(entry);
2823 return mgmt_device_unblocked(hdev, bdaddr, type);
2826 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2828 if (status) {
2829 BT_ERR("Failed to start inquiry: status %d", status);
2831 hci_dev_lock(hdev);
2832 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2833 hci_dev_unlock(hdev);
2834 return;
2838 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2840 /* General inquiry access code (GIAC) */
2841 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2842 struct hci_request req;
2843 struct hci_cp_inquiry cp;
2844 int err;
2846 if (status) {
2847 BT_ERR("Failed to disable LE scanning: status %d", status);
2848 return;
2851 switch (hdev->discovery.type) {
2852 case DISCOV_TYPE_LE:
2853 hci_dev_lock(hdev);
2854 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2855 hci_dev_unlock(hdev);
2856 break;
2858 case DISCOV_TYPE_INTERLEAVED:
2859 hci_req_init(&req, hdev);
2861 memset(&cp, 0, sizeof(cp));
2862 memcpy(&cp.lap, lap, sizeof(cp.lap));
2863 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2864 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2866 hci_dev_lock(hdev);
2868 hci_inquiry_cache_flush(hdev);
2870 err = hci_req_run(&req, inquiry_complete);
2871 if (err) {
2872 BT_ERR("Inquiry request failed: err %d", err);
2873 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2876 hci_dev_unlock(hdev);
2877 break;
2881 static void le_scan_disable_work(struct work_struct *work)
2883 struct hci_dev *hdev = container_of(work, struct hci_dev,
2884 le_scan_disable.work);
2885 struct hci_cp_le_set_scan_enable cp;
2886 struct hci_request req;
2887 int err;
2889 BT_DBG("%s", hdev->name);
2891 hci_req_init(&req, hdev);
2893 memset(&cp, 0, sizeof(cp));
2894 cp.enable = LE_SCAN_DISABLE;
2895 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2897 err = hci_req_run(&req, le_scan_disable_work_complete);
2898 if (err)
2899 BT_ERR("Disable LE scanning request failed: err %d", err);
2902 /* Alloc HCI device */
2903 struct hci_dev *hci_alloc_dev(void)
2905 struct hci_dev *hdev;
2907 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2908 if (!hdev)
2909 return NULL;
2911 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2912 hdev->esco_type = (ESCO_HV1);
2913 hdev->link_mode = (HCI_LM_ACCEPT);
2914 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2915 hdev->io_capability = 0x03; /* No Input No Output */
2916 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2917 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2919 hdev->sniff_max_interval = 800;
2920 hdev->sniff_min_interval = 80;
2922 hdev->le_scan_interval = 0x0060;
2923 hdev->le_scan_window = 0x0030;
2924 hdev->le_conn_min_interval = 0x0028;
2925 hdev->le_conn_max_interval = 0x0038;
2927 mutex_init(&hdev->lock);
2928 mutex_init(&hdev->req_lock);
2930 INIT_LIST_HEAD(&hdev->mgmt_pending);
2931 INIT_LIST_HEAD(&hdev->blacklist);
2932 INIT_LIST_HEAD(&hdev->uuids);
2933 INIT_LIST_HEAD(&hdev->link_keys);
2934 INIT_LIST_HEAD(&hdev->long_term_keys);
2935 INIT_LIST_HEAD(&hdev->remote_oob_data);
2936 INIT_LIST_HEAD(&hdev->conn_hash.list);
2938 INIT_WORK(&hdev->rx_work, hci_rx_work);
2939 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2940 INIT_WORK(&hdev->tx_work, hci_tx_work);
2941 INIT_WORK(&hdev->power_on, hci_power_on);
2943 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2944 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2945 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2947 skb_queue_head_init(&hdev->rx_q);
2948 skb_queue_head_init(&hdev->cmd_q);
2949 skb_queue_head_init(&hdev->raw_q);
2951 init_waitqueue_head(&hdev->req_wait_q);
2953 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2955 hci_init_sysfs(hdev);
2956 discovery_init(hdev);
2958 return hdev;
2960 EXPORT_SYMBOL(hci_alloc_dev);
2962 /* Free HCI device */
2963 void hci_free_dev(struct hci_dev *hdev)
2965 /* will free via device release */
2966 put_device(&hdev->dev);
2968 EXPORT_SYMBOL(hci_free_dev);
2970 /* Register HCI device */
2971 int hci_register_dev(struct hci_dev *hdev)
2973 int id, error;
2975 if (!hdev->open || !hdev->close)
2976 return -EINVAL;
2978 /* Do not allow HCI_AMP devices to register at index 0,
2979 * so the index can be used as the AMP controller ID.
2981 switch (hdev->dev_type) {
2982 case HCI_BREDR:
2983 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2984 break;
2985 case HCI_AMP:
2986 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2987 break;
2988 default:
2989 return -EINVAL;
2992 if (id < 0)
2993 return id;
2995 sprintf(hdev->name, "hci%d", id);
2996 hdev->id = id;
2998 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3000 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3001 WQ_MEM_RECLAIM, 1, hdev->name);
3002 if (!hdev->workqueue) {
3003 error = -ENOMEM;
3004 goto err;
3007 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3008 WQ_MEM_RECLAIM, 1, hdev->name);
3009 if (!hdev->req_workqueue) {
3010 destroy_workqueue(hdev->workqueue);
3011 error = -ENOMEM;
3012 goto err;
3015 if (!IS_ERR_OR_NULL(bt_debugfs))
3016 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3018 dev_set_name(&hdev->dev, "%s", hdev->name);
3020 error = device_add(&hdev->dev);
3021 if (error < 0)
3022 goto err_wqueue;
3024 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3025 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3026 hdev);
3027 if (hdev->rfkill) {
3028 if (rfkill_register(hdev->rfkill) < 0) {
3029 rfkill_destroy(hdev->rfkill);
3030 hdev->rfkill = NULL;
3034 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3035 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3037 set_bit(HCI_SETUP, &hdev->dev_flags);
3038 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3040 if (hdev->dev_type == HCI_BREDR) {
3041 /* Assume BR/EDR support until proven otherwise (such as
3042 * through reading supported features during init.
3044 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3047 write_lock(&hci_dev_list_lock);
3048 list_add(&hdev->list, &hci_dev_list);
3049 write_unlock(&hci_dev_list_lock);
3051 hci_notify(hdev, HCI_DEV_REG);
3052 hci_dev_hold(hdev);
3054 queue_work(hdev->req_workqueue, &hdev->power_on);
3056 return id;
3058 err_wqueue:
3059 destroy_workqueue(hdev->workqueue);
3060 destroy_workqueue(hdev->req_workqueue);
3061 err:
3062 ida_simple_remove(&hci_index_ida, hdev->id);
3064 return error;
3066 EXPORT_SYMBOL(hci_register_dev);
3068 /* Unregister HCI device */
3069 void hci_unregister_dev(struct hci_dev *hdev)
3071 int i, id;
3073 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3075 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3077 id = hdev->id;
3079 write_lock(&hci_dev_list_lock);
3080 list_del(&hdev->list);
3081 write_unlock(&hci_dev_list_lock);
3083 hci_dev_do_close(hdev);
3085 for (i = 0; i < NUM_REASSEMBLY; i++)
3086 kfree_skb(hdev->reassembly[i]);
3088 cancel_work_sync(&hdev->power_on);
3090 if (!test_bit(HCI_INIT, &hdev->flags) &&
3091 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3092 hci_dev_lock(hdev);
3093 mgmt_index_removed(hdev);
3094 hci_dev_unlock(hdev);
3097 /* mgmt_index_removed should take care of emptying the
3098 * pending list */
3099 BUG_ON(!list_empty(&hdev->mgmt_pending));
3101 hci_notify(hdev, HCI_DEV_UNREG);
3103 if (hdev->rfkill) {
3104 rfkill_unregister(hdev->rfkill);
3105 rfkill_destroy(hdev->rfkill);
3108 device_del(&hdev->dev);
3110 debugfs_remove_recursive(hdev->debugfs);
3112 destroy_workqueue(hdev->workqueue);
3113 destroy_workqueue(hdev->req_workqueue);
3115 hci_dev_lock(hdev);
3116 hci_blacklist_clear(hdev);
3117 hci_uuids_clear(hdev);
3118 hci_link_keys_clear(hdev);
3119 hci_smp_ltks_clear(hdev);
3120 hci_remote_oob_data_clear(hdev);
3121 hci_dev_unlock(hdev);
3123 hci_dev_put(hdev);
3125 ida_simple_remove(&hci_index_ida, id);
3127 EXPORT_SYMBOL(hci_unregister_dev);
3129 /* Suspend HCI device */
3130 int hci_suspend_dev(struct hci_dev *hdev)
3132 hci_notify(hdev, HCI_DEV_SUSPEND);
3133 return 0;
3135 EXPORT_SYMBOL(hci_suspend_dev);
3137 /* Resume HCI device */
3138 int hci_resume_dev(struct hci_dev *hdev)
3140 hci_notify(hdev, HCI_DEV_RESUME);
3141 return 0;
3143 EXPORT_SYMBOL(hci_resume_dev);
3145 /* Receive frame from HCI drivers */
3146 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3148 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3149 && !test_bit(HCI_INIT, &hdev->flags))) {
3150 kfree_skb(skb);
3151 return -ENXIO;
3154 /* Incoming skb */
3155 bt_cb(skb)->incoming = 1;
3157 /* Time stamp */
3158 __net_timestamp(skb);
3160 skb_queue_tail(&hdev->rx_q, skb);
3161 queue_work(hdev->workqueue, &hdev->rx_work);
3163 return 0;
3165 EXPORT_SYMBOL(hci_recv_frame);
3167 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3168 int count, __u8 index)
3170 int len = 0;
3171 int hlen = 0;
3172 int remain = count;
3173 struct sk_buff *skb;
3174 struct bt_skb_cb *scb;
3176 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3177 index >= NUM_REASSEMBLY)
3178 return -EILSEQ;
3180 skb = hdev->reassembly[index];
3182 if (!skb) {
3183 switch (type) {
3184 case HCI_ACLDATA_PKT:
3185 len = HCI_MAX_FRAME_SIZE;
3186 hlen = HCI_ACL_HDR_SIZE;
3187 break;
3188 case HCI_EVENT_PKT:
3189 len = HCI_MAX_EVENT_SIZE;
3190 hlen = HCI_EVENT_HDR_SIZE;
3191 break;
3192 case HCI_SCODATA_PKT:
3193 len = HCI_MAX_SCO_SIZE;
3194 hlen = HCI_SCO_HDR_SIZE;
3195 break;
3198 skb = bt_skb_alloc(len, GFP_ATOMIC);
3199 if (!skb)
3200 return -ENOMEM;
3202 scb = (void *) skb->cb;
3203 scb->expect = hlen;
3204 scb->pkt_type = type;
3206 hdev->reassembly[index] = skb;
3209 while (count) {
3210 scb = (void *) skb->cb;
3211 len = min_t(uint, scb->expect, count);
3213 memcpy(skb_put(skb, len), data, len);
3215 count -= len;
3216 data += len;
3217 scb->expect -= len;
3218 remain = count;
3220 switch (type) {
3221 case HCI_EVENT_PKT:
3222 if (skb->len == HCI_EVENT_HDR_SIZE) {
3223 struct hci_event_hdr *h = hci_event_hdr(skb);
3224 scb->expect = h->plen;
3226 if (skb_tailroom(skb) < scb->expect) {
3227 kfree_skb(skb);
3228 hdev->reassembly[index] = NULL;
3229 return -ENOMEM;
3232 break;
3234 case HCI_ACLDATA_PKT:
3235 if (skb->len == HCI_ACL_HDR_SIZE) {
3236 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3237 scb->expect = __le16_to_cpu(h->dlen);
3239 if (skb_tailroom(skb) < scb->expect) {
3240 kfree_skb(skb);
3241 hdev->reassembly[index] = NULL;
3242 return -ENOMEM;
3245 break;
3247 case HCI_SCODATA_PKT:
3248 if (skb->len == HCI_SCO_HDR_SIZE) {
3249 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3250 scb->expect = h->dlen;
3252 if (skb_tailroom(skb) < scb->expect) {
3253 kfree_skb(skb);
3254 hdev->reassembly[index] = NULL;
3255 return -ENOMEM;
3258 break;
3261 if (scb->expect == 0) {
3262 /* Complete frame */
3264 bt_cb(skb)->pkt_type = type;
3265 hci_recv_frame(hdev, skb);
3267 hdev->reassembly[index] = NULL;
3268 return remain;
3272 return remain;
3275 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3277 int rem = 0;
3279 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3280 return -EILSEQ;
3282 while (count) {
3283 rem = hci_reassembly(hdev, type, data, count, type - 1);
3284 if (rem < 0)
3285 return rem;
3287 data += (count - rem);
3288 count = rem;
3291 return rem;
3293 EXPORT_SYMBOL(hci_recv_fragment);
3295 #define STREAM_REASSEMBLY 0
3297 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3299 int type;
3300 int rem = 0;
3302 while (count) {
3303 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3305 if (!skb) {
3306 struct { char type; } *pkt;
3308 /* Start of the frame */
3309 pkt = data;
3310 type = pkt->type;
3312 data++;
3313 count--;
3314 } else
3315 type = bt_cb(skb)->pkt_type;
3317 rem = hci_reassembly(hdev, type, data, count,
3318 STREAM_REASSEMBLY);
3319 if (rem < 0)
3320 return rem;
3322 data += (count - rem);
3323 count = rem;
3326 return rem;
3328 EXPORT_SYMBOL(hci_recv_stream_fragment);
3330 /* ---- Interface to upper protocols ---- */
3332 int hci_register_cb(struct hci_cb *cb)
3334 BT_DBG("%p name %s", cb, cb->name);
3336 write_lock(&hci_cb_list_lock);
3337 list_add(&cb->list, &hci_cb_list);
3338 write_unlock(&hci_cb_list_lock);
3340 return 0;
3342 EXPORT_SYMBOL(hci_register_cb);
3344 int hci_unregister_cb(struct hci_cb *cb)
3346 BT_DBG("%p name %s", cb, cb->name);
3348 write_lock(&hci_cb_list_lock);
3349 list_del(&cb->list);
3350 write_unlock(&hci_cb_list_lock);
3352 return 0;
3354 EXPORT_SYMBOL(hci_unregister_cb);
3356 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3358 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3360 /* Time stamp */
3361 __net_timestamp(skb);
3363 /* Send copy to monitor */
3364 hci_send_to_monitor(hdev, skb);
3366 if (atomic_read(&hdev->promisc)) {
3367 /* Send copy to the sockets */
3368 hci_send_to_sock(hdev, skb);
3371 /* Get rid of skb owner, prior to sending to the driver. */
3372 skb_orphan(skb);
3374 if (hdev->send(hdev, skb) < 0)
3375 BT_ERR("%s sending frame failed", hdev->name);
3378 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3380 skb_queue_head_init(&req->cmd_q);
3381 req->hdev = hdev;
3382 req->err = 0;
3385 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3387 struct hci_dev *hdev = req->hdev;
3388 struct sk_buff *skb;
3389 unsigned long flags;
3391 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3393 /* If an error occured during request building, remove all HCI
3394 * commands queued on the HCI request queue.
3396 if (req->err) {
3397 skb_queue_purge(&req->cmd_q);
3398 return req->err;
3401 /* Do not allow empty requests */
3402 if (skb_queue_empty(&req->cmd_q))
3403 return -ENODATA;
3405 skb = skb_peek_tail(&req->cmd_q);
3406 bt_cb(skb)->req.complete = complete;
3408 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3409 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3410 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3412 queue_work(hdev->workqueue, &hdev->cmd_work);
3414 return 0;
3417 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3418 u32 plen, const void *param)
3420 int len = HCI_COMMAND_HDR_SIZE + plen;
3421 struct hci_command_hdr *hdr;
3422 struct sk_buff *skb;
3424 skb = bt_skb_alloc(len, GFP_ATOMIC);
3425 if (!skb)
3426 return NULL;
3428 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3429 hdr->opcode = cpu_to_le16(opcode);
3430 hdr->plen = plen;
3432 if (plen)
3433 memcpy(skb_put(skb, plen), param, plen);
3435 BT_DBG("skb len %d", skb->len);
3437 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3439 return skb;
3442 /* Send HCI command */
3443 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3444 const void *param)
3446 struct sk_buff *skb;
3448 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3450 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3451 if (!skb) {
3452 BT_ERR("%s no memory for command", hdev->name);
3453 return -ENOMEM;
3456 /* Stand-alone HCI commands must be flaged as
3457 * single-command requests.
3459 bt_cb(skb)->req.start = true;
3461 skb_queue_tail(&hdev->cmd_q, skb);
3462 queue_work(hdev->workqueue, &hdev->cmd_work);
3464 return 0;
3467 /* Queue a command to an asynchronous HCI request */
3468 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3469 const void *param, u8 event)
3471 struct hci_dev *hdev = req->hdev;
3472 struct sk_buff *skb;
3474 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3476 /* If an error occured during request building, there is no point in
3477 * queueing the HCI command. We can simply return.
3479 if (req->err)
3480 return;
3482 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3483 if (!skb) {
3484 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3485 hdev->name, opcode);
3486 req->err = -ENOMEM;
3487 return;
3490 if (skb_queue_empty(&req->cmd_q))
3491 bt_cb(skb)->req.start = true;
3493 bt_cb(skb)->req.event = event;
3495 skb_queue_tail(&req->cmd_q, skb);
3498 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3499 const void *param)
3501 hci_req_add_ev(req, opcode, plen, param, 0);
3504 /* Get data from the previously sent command */
3505 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3507 struct hci_command_hdr *hdr;
3509 if (!hdev->sent_cmd)
3510 return NULL;
3512 hdr = (void *) hdev->sent_cmd->data;
3514 if (hdr->opcode != cpu_to_le16(opcode))
3515 return NULL;
3517 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3519 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3522 /* Send ACL data */
3523 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3525 struct hci_acl_hdr *hdr;
3526 int len = skb->len;
3528 skb_push(skb, HCI_ACL_HDR_SIZE);
3529 skb_reset_transport_header(skb);
3530 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3531 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3532 hdr->dlen = cpu_to_le16(len);
3535 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3536 struct sk_buff *skb, __u16 flags)
3538 struct hci_conn *conn = chan->conn;
3539 struct hci_dev *hdev = conn->hdev;
3540 struct sk_buff *list;
3542 skb->len = skb_headlen(skb);
3543 skb->data_len = 0;
3545 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3547 switch (hdev->dev_type) {
3548 case HCI_BREDR:
3549 hci_add_acl_hdr(skb, conn->handle, flags);
3550 break;
3551 case HCI_AMP:
3552 hci_add_acl_hdr(skb, chan->handle, flags);
3553 break;
3554 default:
3555 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3556 return;
3559 list = skb_shinfo(skb)->frag_list;
3560 if (!list) {
3561 /* Non fragmented */
3562 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3564 skb_queue_tail(queue, skb);
3565 } else {
3566 /* Fragmented */
3567 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3569 skb_shinfo(skb)->frag_list = NULL;
3571 /* Queue all fragments atomically */
3572 spin_lock(&queue->lock);
3574 __skb_queue_tail(queue, skb);
3576 flags &= ~ACL_START;
3577 flags |= ACL_CONT;
3578 do {
3579 skb = list; list = list->next;
3581 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3582 hci_add_acl_hdr(skb, conn->handle, flags);
3584 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3586 __skb_queue_tail(queue, skb);
3587 } while (list);
3589 spin_unlock(&queue->lock);
3593 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3595 struct hci_dev *hdev = chan->conn->hdev;
3597 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3599 hci_queue_acl(chan, &chan->data_q, skb, flags);
3601 queue_work(hdev->workqueue, &hdev->tx_work);
3604 /* Send SCO data */
3605 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3607 struct hci_dev *hdev = conn->hdev;
3608 struct hci_sco_hdr hdr;
3610 BT_DBG("%s len %d", hdev->name, skb->len);
3612 hdr.handle = cpu_to_le16(conn->handle);
3613 hdr.dlen = skb->len;
3615 skb_push(skb, HCI_SCO_HDR_SIZE);
3616 skb_reset_transport_header(skb);
3617 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3619 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3621 skb_queue_tail(&conn->data_q, skb);
3622 queue_work(hdev->workqueue, &hdev->tx_work);
3625 /* ---- HCI TX task (outgoing data) ---- */
3627 /* HCI Connection scheduler */
3628 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3629 int *quote)
3631 struct hci_conn_hash *h = &hdev->conn_hash;
3632 struct hci_conn *conn = NULL, *c;
3633 unsigned int num = 0, min = ~0;
3635 /* We don't have to lock device here. Connections are always
3636 * added and removed with TX task disabled. */
3638 rcu_read_lock();
3640 list_for_each_entry_rcu(c, &h->list, list) {
3641 if (c->type != type || skb_queue_empty(&c->data_q))
3642 continue;
3644 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3645 continue;
3647 num++;
3649 if (c->sent < min) {
3650 min = c->sent;
3651 conn = c;
3654 if (hci_conn_num(hdev, type) == num)
3655 break;
3658 rcu_read_unlock();
3660 if (conn) {
3661 int cnt, q;
3663 switch (conn->type) {
3664 case ACL_LINK:
3665 cnt = hdev->acl_cnt;
3666 break;
3667 case SCO_LINK:
3668 case ESCO_LINK:
3669 cnt = hdev->sco_cnt;
3670 break;
3671 case LE_LINK:
3672 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3673 break;
3674 default:
3675 cnt = 0;
3676 BT_ERR("Unknown link type");
3679 q = cnt / num;
3680 *quote = q ? q : 1;
3681 } else
3682 *quote = 0;
3684 BT_DBG("conn %p quote %d", conn, *quote);
3685 return conn;
3688 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3690 struct hci_conn_hash *h = &hdev->conn_hash;
3691 struct hci_conn *c;
3693 BT_ERR("%s link tx timeout", hdev->name);
3695 rcu_read_lock();
3697 /* Kill stalled connections */
3698 list_for_each_entry_rcu(c, &h->list, list) {
3699 if (c->type == type && c->sent) {
3700 BT_ERR("%s killing stalled connection %pMR",
3701 hdev->name, &c->dst);
3702 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3706 rcu_read_unlock();
3709 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3710 int *quote)
3712 struct hci_conn_hash *h = &hdev->conn_hash;
3713 struct hci_chan *chan = NULL;
3714 unsigned int num = 0, min = ~0, cur_prio = 0;
3715 struct hci_conn *conn;
3716 int cnt, q, conn_num = 0;
3718 BT_DBG("%s", hdev->name);
3720 rcu_read_lock();
3722 list_for_each_entry_rcu(conn, &h->list, list) {
3723 struct hci_chan *tmp;
3725 if (conn->type != type)
3726 continue;
3728 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3729 continue;
3731 conn_num++;
3733 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3734 struct sk_buff *skb;
3736 if (skb_queue_empty(&tmp->data_q))
3737 continue;
3739 skb = skb_peek(&tmp->data_q);
3740 if (skb->priority < cur_prio)
3741 continue;
3743 if (skb->priority > cur_prio) {
3744 num = 0;
3745 min = ~0;
3746 cur_prio = skb->priority;
3749 num++;
3751 if (conn->sent < min) {
3752 min = conn->sent;
3753 chan = tmp;
3757 if (hci_conn_num(hdev, type) == conn_num)
3758 break;
3761 rcu_read_unlock();
3763 if (!chan)
3764 return NULL;
3766 switch (chan->conn->type) {
3767 case ACL_LINK:
3768 cnt = hdev->acl_cnt;
3769 break;
3770 case AMP_LINK:
3771 cnt = hdev->block_cnt;
3772 break;
3773 case SCO_LINK:
3774 case ESCO_LINK:
3775 cnt = hdev->sco_cnt;
3776 break;
3777 case LE_LINK:
3778 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3779 break;
3780 default:
3781 cnt = 0;
3782 BT_ERR("Unknown link type");
3785 q = cnt / num;
3786 *quote = q ? q : 1;
3787 BT_DBG("chan %p quote %d", chan, *quote);
3788 return chan;
3791 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3793 struct hci_conn_hash *h = &hdev->conn_hash;
3794 struct hci_conn *conn;
3795 int num = 0;
3797 BT_DBG("%s", hdev->name);
3799 rcu_read_lock();
3801 list_for_each_entry_rcu(conn, &h->list, list) {
3802 struct hci_chan *chan;
3804 if (conn->type != type)
3805 continue;
3807 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3808 continue;
3810 num++;
3812 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3813 struct sk_buff *skb;
3815 if (chan->sent) {
3816 chan->sent = 0;
3817 continue;
3820 if (skb_queue_empty(&chan->data_q))
3821 continue;
3823 skb = skb_peek(&chan->data_q);
3824 if (skb->priority >= HCI_PRIO_MAX - 1)
3825 continue;
3827 skb->priority = HCI_PRIO_MAX - 1;
3829 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3830 skb->priority);
3833 if (hci_conn_num(hdev, type) == num)
3834 break;
3837 rcu_read_unlock();
3841 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3843 /* Calculate count of blocks used by this packet */
3844 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3847 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3849 if (!test_bit(HCI_RAW, &hdev->flags)) {
3850 /* ACL tx timeout must be longer than maximum
3851 * link supervision timeout (40.9 seconds) */
3852 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3853 HCI_ACL_TX_TIMEOUT))
3854 hci_link_tx_to(hdev, ACL_LINK);
3858 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3860 unsigned int cnt = hdev->acl_cnt;
3861 struct hci_chan *chan;
3862 struct sk_buff *skb;
3863 int quote;
3865 __check_timeout(hdev, cnt);
3867 while (hdev->acl_cnt &&
3868 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3869 u32 priority = (skb_peek(&chan->data_q))->priority;
3870 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3871 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3872 skb->len, skb->priority);
3874 /* Stop if priority has changed */
3875 if (skb->priority < priority)
3876 break;
3878 skb = skb_dequeue(&chan->data_q);
3880 hci_conn_enter_active_mode(chan->conn,
3881 bt_cb(skb)->force_active);
3883 hci_send_frame(hdev, skb);
3884 hdev->acl_last_tx = jiffies;
3886 hdev->acl_cnt--;
3887 chan->sent++;
3888 chan->conn->sent++;
3892 if (cnt != hdev->acl_cnt)
3893 hci_prio_recalculate(hdev, ACL_LINK);
3896 static void hci_sched_acl_blk(struct hci_dev *hdev)
3898 unsigned int cnt = hdev->block_cnt;
3899 struct hci_chan *chan;
3900 struct sk_buff *skb;
3901 int quote;
3902 u8 type;
3904 __check_timeout(hdev, cnt);
3906 BT_DBG("%s", hdev->name);
3908 if (hdev->dev_type == HCI_AMP)
3909 type = AMP_LINK;
3910 else
3911 type = ACL_LINK;
3913 while (hdev->block_cnt > 0 &&
3914 (chan = hci_chan_sent(hdev, type, &quote))) {
3915 u32 priority = (skb_peek(&chan->data_q))->priority;
3916 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3917 int blocks;
3919 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3920 skb->len, skb->priority);
3922 /* Stop if priority has changed */
3923 if (skb->priority < priority)
3924 break;
3926 skb = skb_dequeue(&chan->data_q);
3928 blocks = __get_blocks(hdev, skb);
3929 if (blocks > hdev->block_cnt)
3930 return;
3932 hci_conn_enter_active_mode(chan->conn,
3933 bt_cb(skb)->force_active);
3935 hci_send_frame(hdev, skb);
3936 hdev->acl_last_tx = jiffies;
3938 hdev->block_cnt -= blocks;
3939 quote -= blocks;
3941 chan->sent += blocks;
3942 chan->conn->sent += blocks;
3946 if (cnt != hdev->block_cnt)
3947 hci_prio_recalculate(hdev, type);
3950 static void hci_sched_acl(struct hci_dev *hdev)
3952 BT_DBG("%s", hdev->name);
3954 /* No ACL link over BR/EDR controller */
3955 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3956 return;
3958 /* No AMP link over AMP controller */
3959 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3960 return;
3962 switch (hdev->flow_ctl_mode) {
3963 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3964 hci_sched_acl_pkt(hdev);
3965 break;
3967 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3968 hci_sched_acl_blk(hdev);
3969 break;
3973 /* Schedule SCO */
3974 static void hci_sched_sco(struct hci_dev *hdev)
3976 struct hci_conn *conn;
3977 struct sk_buff *skb;
3978 int quote;
3980 BT_DBG("%s", hdev->name);
3982 if (!hci_conn_num(hdev, SCO_LINK))
3983 return;
3985 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3986 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3987 BT_DBG("skb %p len %d", skb, skb->len);
3988 hci_send_frame(hdev, skb);
3990 conn->sent++;
3991 if (conn->sent == ~0)
3992 conn->sent = 0;
3997 static void hci_sched_esco(struct hci_dev *hdev)
3999 struct hci_conn *conn;
4000 struct sk_buff *skb;
4001 int quote;
4003 BT_DBG("%s", hdev->name);
4005 if (!hci_conn_num(hdev, ESCO_LINK))
4006 return;
4008 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4009 &quote))) {
4010 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4011 BT_DBG("skb %p len %d", skb, skb->len);
4012 hci_send_frame(hdev, skb);
4014 conn->sent++;
4015 if (conn->sent == ~0)
4016 conn->sent = 0;
4021 static void hci_sched_le(struct hci_dev *hdev)
4023 struct hci_chan *chan;
4024 struct sk_buff *skb;
4025 int quote, cnt, tmp;
4027 BT_DBG("%s", hdev->name);
4029 if (!hci_conn_num(hdev, LE_LINK))
4030 return;
4032 if (!test_bit(HCI_RAW, &hdev->flags)) {
4033 /* LE tx timeout must be longer than maximum
4034 * link supervision timeout (40.9 seconds) */
4035 if (!hdev->le_cnt && hdev->le_pkts &&
4036 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4037 hci_link_tx_to(hdev, LE_LINK);
4040 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4041 tmp = cnt;
4042 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4043 u32 priority = (skb_peek(&chan->data_q))->priority;
4044 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4045 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4046 skb->len, skb->priority);
4048 /* Stop if priority has changed */
4049 if (skb->priority < priority)
4050 break;
4052 skb = skb_dequeue(&chan->data_q);
4054 hci_send_frame(hdev, skb);
4055 hdev->le_last_tx = jiffies;
4057 cnt--;
4058 chan->sent++;
4059 chan->conn->sent++;
4063 if (hdev->le_pkts)
4064 hdev->le_cnt = cnt;
4065 else
4066 hdev->acl_cnt = cnt;
4068 if (cnt != tmp)
4069 hci_prio_recalculate(hdev, LE_LINK);
4072 static void hci_tx_work(struct work_struct *work)
4074 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4075 struct sk_buff *skb;
4077 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4078 hdev->sco_cnt, hdev->le_cnt);
4080 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4081 /* Schedule queues and send stuff to HCI driver */
4082 hci_sched_acl(hdev);
4083 hci_sched_sco(hdev);
4084 hci_sched_esco(hdev);
4085 hci_sched_le(hdev);
4088 /* Send next queued raw (unknown type) packet */
4089 while ((skb = skb_dequeue(&hdev->raw_q)))
4090 hci_send_frame(hdev, skb);
4093 /* ----- HCI RX task (incoming data processing) ----- */
4095 /* ACL data packet */
4096 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4098 struct hci_acl_hdr *hdr = (void *) skb->data;
4099 struct hci_conn *conn;
4100 __u16 handle, flags;
4102 skb_pull(skb, HCI_ACL_HDR_SIZE);
4104 handle = __le16_to_cpu(hdr->handle);
4105 flags = hci_flags(handle);
4106 handle = hci_handle(handle);
4108 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4109 handle, flags);
4111 hdev->stat.acl_rx++;
4113 hci_dev_lock(hdev);
4114 conn = hci_conn_hash_lookup_handle(hdev, handle);
4115 hci_dev_unlock(hdev);
4117 if (conn) {
4118 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4120 /* Send to upper protocol */
4121 l2cap_recv_acldata(conn, skb, flags);
4122 return;
4123 } else {
4124 BT_ERR("%s ACL packet for unknown connection handle %d",
4125 hdev->name, handle);
4128 kfree_skb(skb);
4131 /* SCO data packet */
4132 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4134 struct hci_sco_hdr *hdr = (void *) skb->data;
4135 struct hci_conn *conn;
4136 __u16 handle;
4138 skb_pull(skb, HCI_SCO_HDR_SIZE);
4140 handle = __le16_to_cpu(hdr->handle);
4142 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4144 hdev->stat.sco_rx++;
4146 hci_dev_lock(hdev);
4147 conn = hci_conn_hash_lookup_handle(hdev, handle);
4148 hci_dev_unlock(hdev);
4150 if (conn) {
4151 /* Send to upper protocol */
4152 sco_recv_scodata(conn, skb);
4153 return;
4154 } else {
4155 BT_ERR("%s SCO packet for unknown connection handle %d",
4156 hdev->name, handle);
4159 kfree_skb(skb);
4162 static bool hci_req_is_complete(struct hci_dev *hdev)
4164 struct sk_buff *skb;
4166 skb = skb_peek(&hdev->cmd_q);
4167 if (!skb)
4168 return true;
4170 return bt_cb(skb)->req.start;
4173 static void hci_resend_last(struct hci_dev *hdev)
4175 struct hci_command_hdr *sent;
4176 struct sk_buff *skb;
4177 u16 opcode;
4179 if (!hdev->sent_cmd)
4180 return;
4182 sent = (void *) hdev->sent_cmd->data;
4183 opcode = __le16_to_cpu(sent->opcode);
4184 if (opcode == HCI_OP_RESET)
4185 return;
4187 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4188 if (!skb)
4189 return;
4191 skb_queue_head(&hdev->cmd_q, skb);
4192 queue_work(hdev->workqueue, &hdev->cmd_work);
4195 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4197 hci_req_complete_t req_complete = NULL;
4198 struct sk_buff *skb;
4199 unsigned long flags;
4201 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4203 /* If the completed command doesn't match the last one that was
4204 * sent we need to do special handling of it.
4206 if (!hci_sent_cmd_data(hdev, opcode)) {
4207 /* Some CSR based controllers generate a spontaneous
4208 * reset complete event during init and any pending
4209 * command will never be completed. In such a case we
4210 * need to resend whatever was the last sent
4211 * command.
4213 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4214 hci_resend_last(hdev);
4216 return;
4219 /* If the command succeeded and there's still more commands in
4220 * this request the request is not yet complete.
4222 if (!status && !hci_req_is_complete(hdev))
4223 return;
4225 /* If this was the last command in a request the complete
4226 * callback would be found in hdev->sent_cmd instead of the
4227 * command queue (hdev->cmd_q).
4229 if (hdev->sent_cmd) {
4230 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4232 if (req_complete) {
4233 /* We must set the complete callback to NULL to
4234 * avoid calling the callback more than once if
4235 * this function gets called again.
4237 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4239 goto call_complete;
4243 /* Remove all pending commands belonging to this request */
4244 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4245 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4246 if (bt_cb(skb)->req.start) {
4247 __skb_queue_head(&hdev->cmd_q, skb);
4248 break;
4251 req_complete = bt_cb(skb)->req.complete;
4252 kfree_skb(skb);
4254 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4256 call_complete:
4257 if (req_complete)
4258 req_complete(hdev, status);
4261 static void hci_rx_work(struct work_struct *work)
4263 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4264 struct sk_buff *skb;
4266 BT_DBG("%s", hdev->name);
4268 while ((skb = skb_dequeue(&hdev->rx_q))) {
4269 /* Send copy to monitor */
4270 hci_send_to_monitor(hdev, skb);
4272 if (atomic_read(&hdev->promisc)) {
4273 /* Send copy to the sockets */
4274 hci_send_to_sock(hdev, skb);
4277 if (test_bit(HCI_RAW, &hdev->flags) ||
4278 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4279 kfree_skb(skb);
4280 continue;
4283 if (test_bit(HCI_INIT, &hdev->flags)) {
4284 /* Don't process data packets in this states. */
4285 switch (bt_cb(skb)->pkt_type) {
4286 case HCI_ACLDATA_PKT:
4287 case HCI_SCODATA_PKT:
4288 kfree_skb(skb);
4289 continue;
4293 /* Process frame */
4294 switch (bt_cb(skb)->pkt_type) {
4295 case HCI_EVENT_PKT:
4296 BT_DBG("%s Event packet", hdev->name);
4297 hci_event_packet(hdev, skb);
4298 break;
4300 case HCI_ACLDATA_PKT:
4301 BT_DBG("%s ACL data packet", hdev->name);
4302 hci_acldata_packet(hdev, skb);
4303 break;
4305 case HCI_SCODATA_PKT:
4306 BT_DBG("%s SCO data packet", hdev->name);
4307 hci_scodata_packet(hdev, skb);
4308 break;
4310 default:
4311 kfree_skb(skb);
4312 break;
4317 static void hci_cmd_work(struct work_struct *work)
4319 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4320 struct sk_buff *skb;
4322 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4323 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4325 /* Send queued commands */
4326 if (atomic_read(&hdev->cmd_cnt)) {
4327 skb = skb_dequeue(&hdev->cmd_q);
4328 if (!skb)
4329 return;
4331 kfree_skb(hdev->sent_cmd);
4333 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4334 if (hdev->sent_cmd) {
4335 atomic_dec(&hdev->cmd_cnt);
4336 hci_send_frame(hdev, skb);
4337 if (test_bit(HCI_RESET, &hdev->flags))
4338 del_timer(&hdev->cmd_timer);
4339 else
4340 mod_timer(&hdev->cmd_timer,
4341 jiffies + HCI_CMD_TIMEOUT);
4342 } else {
4343 skb_queue_head(&hdev->cmd_q, skb);
4344 queue_work(hdev->workqueue, &hdev->cmd_work);