iio: adis16480: Fix scale factors
[linux-2.6/btrfs-unstable.git] / net / bluetooth / hci_core.c
blob2f8fb33067e1c48fedf3055ddf29a3be68161c9f
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
59 /* ----- HCI requests ----- */
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
68 /* ---- HCI notifications ---- */
70 static void hci_notify(struct hci_dev *hdev, int event)
72 hci_sock_dev_event(hdev, event);
75 /* ---- HCI debugfs entries ---- */
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109 return -EALREADY;
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
123 kfree_skb(skb);
125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
127 return count;
130 static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
137 /* ---- HCI requests ---- */
139 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140 struct sk_buff *skb)
142 BT_DBG("%s result 0x%2.2x", hdev->name, result);
144 if (hdev->req_status == HCI_REQ_PEND) {
145 hdev->req_result = result;
146 hdev->req_status = HCI_REQ_DONE;
147 if (skb)
148 hdev->req_skb = skb_get(skb);
149 wake_up_interruptible(&hdev->req_wait_q);
153 static void hci_req_cancel(struct hci_dev *hdev, int err)
155 BT_DBG("%s err 0x%2.2x", hdev->name, err);
157 if (hdev->req_status == HCI_REQ_PEND) {
158 hdev->req_result = err;
159 hdev->req_status = HCI_REQ_CANCELED;
160 wake_up_interruptible(&hdev->req_wait_q);
164 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
165 const void *param, u8 event, u32 timeout)
167 DECLARE_WAITQUEUE(wait, current);
168 struct hci_request req;
169 struct sk_buff *skb;
170 int err = 0;
172 BT_DBG("%s", hdev->name);
174 hci_req_init(&req, hdev);
176 hci_req_add_ev(&req, opcode, plen, param, event);
178 hdev->req_status = HCI_REQ_PEND;
180 add_wait_queue(&hdev->req_wait_q, &wait);
181 set_current_state(TASK_INTERRUPTIBLE);
183 err = hci_req_run_skb(&req, hci_req_sync_complete);
184 if (err < 0) {
185 remove_wait_queue(&hdev->req_wait_q, &wait);
186 set_current_state(TASK_RUNNING);
187 return ERR_PTR(err);
190 schedule_timeout(timeout);
192 remove_wait_queue(&hdev->req_wait_q, &wait);
194 if (signal_pending(current))
195 return ERR_PTR(-EINTR);
197 switch (hdev->req_status) {
198 case HCI_REQ_DONE:
199 err = -bt_to_errno(hdev->req_result);
200 break;
202 case HCI_REQ_CANCELED:
203 err = -hdev->req_result;
204 break;
206 default:
207 err = -ETIMEDOUT;
208 break;
211 hdev->req_status = hdev->req_result = 0;
212 skb = hdev->req_skb;
213 hdev->req_skb = NULL;
215 BT_DBG("%s end: err %d", hdev->name, err);
217 if (err < 0) {
218 kfree_skb(skb);
219 return ERR_PTR(err);
222 if (!skb)
223 return ERR_PTR(-ENODATA);
225 return skb;
227 EXPORT_SYMBOL(__hci_cmd_sync_ev);
229 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
230 const void *param, u32 timeout)
232 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
234 EXPORT_SYMBOL(__hci_cmd_sync);
236 /* Execute request and wait for completion. */
237 static int __hci_req_sync(struct hci_dev *hdev,
238 void (*func)(struct hci_request *req,
239 unsigned long opt),
240 unsigned long opt, __u32 timeout)
242 struct hci_request req;
243 DECLARE_WAITQUEUE(wait, current);
244 int err = 0;
246 BT_DBG("%s start", hdev->name);
248 hci_req_init(&req, hdev);
250 hdev->req_status = HCI_REQ_PEND;
252 func(&req, opt);
254 add_wait_queue(&hdev->req_wait_q, &wait);
255 set_current_state(TASK_INTERRUPTIBLE);
257 err = hci_req_run_skb(&req, hci_req_sync_complete);
258 if (err < 0) {
259 hdev->req_status = 0;
261 remove_wait_queue(&hdev->req_wait_q, &wait);
262 set_current_state(TASK_RUNNING);
264 /* ENODATA means the HCI request command queue is empty.
265 * This can happen when a request with conditionals doesn't
266 * trigger any commands to be sent. This is normal behavior
267 * and should not trigger an error return.
269 if (err == -ENODATA)
270 return 0;
272 return err;
275 schedule_timeout(timeout);
277 remove_wait_queue(&hdev->req_wait_q, &wait);
279 if (signal_pending(current))
280 return -EINTR;
282 switch (hdev->req_status) {
283 case HCI_REQ_DONE:
284 err = -bt_to_errno(hdev->req_result);
285 break;
287 case HCI_REQ_CANCELED:
288 err = -hdev->req_result;
289 break;
291 default:
292 err = -ETIMEDOUT;
293 break;
296 hdev->req_status = hdev->req_result = 0;
298 BT_DBG("%s end: err %d", hdev->name, err);
300 return err;
303 static int hci_req_sync(struct hci_dev *hdev,
304 void (*req)(struct hci_request *req,
305 unsigned long opt),
306 unsigned long opt, __u32 timeout)
308 int ret;
310 if (!test_bit(HCI_UP, &hdev->flags))
311 return -ENETDOWN;
313 /* Serialize all requests */
314 hci_req_lock(hdev);
315 ret = __hci_req_sync(hdev, req, opt, timeout);
316 hci_req_unlock(hdev);
318 return ret;
321 static void hci_reset_req(struct hci_request *req, unsigned long opt)
323 BT_DBG("%s %ld", req->hdev->name, opt);
325 /* Reset device */
326 set_bit(HCI_RESET, &req->hdev->flags);
327 hci_req_add(req, HCI_OP_RESET, 0, NULL);
330 static void bredr_init(struct hci_request *req)
332 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
334 /* Read Local Supported Features */
335 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
337 /* Read Local Version */
338 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
340 /* Read BD Address */
341 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
344 static void amp_init1(struct hci_request *req)
346 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
348 /* Read Local Version */
349 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
351 /* Read Local Supported Commands */
352 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
354 /* Read Local AMP Info */
355 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
357 /* Read Data Blk size */
358 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
360 /* Read Flow Control Mode */
361 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
363 /* Read Location Data */
364 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
367 static void amp_init2(struct hci_request *req)
369 /* Read Local Supported Features. Not all AMP controllers
370 * support this so it's placed conditionally in the second
371 * stage init.
373 if (req->hdev->commands[14] & 0x20)
374 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
377 static void hci_init1_req(struct hci_request *req, unsigned long opt)
379 struct hci_dev *hdev = req->hdev;
381 BT_DBG("%s %ld", hdev->name, opt);
383 /* Reset */
384 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
385 hci_reset_req(req, 0);
387 switch (hdev->dev_type) {
388 case HCI_BREDR:
389 bredr_init(req);
390 break;
392 case HCI_AMP:
393 amp_init1(req);
394 break;
396 default:
397 BT_ERR("Unknown device type %d", hdev->dev_type);
398 break;
402 static void bredr_setup(struct hci_request *req)
404 __le16 param;
405 __u8 flt_type;
407 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
408 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
410 /* Read Class of Device */
411 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
413 /* Read Local Name */
414 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
416 /* Read Voice Setting */
417 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
419 /* Read Number of Supported IAC */
420 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
422 /* Read Current IAC LAP */
423 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
425 /* Clear Event Filters */
426 flt_type = HCI_FLT_CLEAR_ALL;
427 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
429 /* Connection accept timeout ~20 secs */
430 param = cpu_to_le16(0x7d00);
431 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
434 static void le_setup(struct hci_request *req)
436 struct hci_dev *hdev = req->hdev;
438 /* Read LE Buffer Size */
439 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
441 /* Read LE Local Supported Features */
442 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
444 /* Read LE Supported States */
445 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
447 /* Read LE White List Size */
448 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
450 /* Clear LE White List */
451 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
453 /* LE-only controllers have LE implicitly enabled */
454 if (!lmp_bredr_capable(hdev))
455 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
458 static void hci_setup_event_mask(struct hci_request *req)
460 struct hci_dev *hdev = req->hdev;
462 /* The second byte is 0xff instead of 0x9f (two reserved bits
463 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464 * command otherwise.
466 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
468 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469 * any event mask for pre 1.2 devices.
471 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472 return;
474 if (lmp_bredr_capable(hdev)) {
475 events[4] |= 0x01; /* Flow Specification Complete */
476 events[4] |= 0x02; /* Inquiry Result with RSSI */
477 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478 events[5] |= 0x08; /* Synchronous Connection Complete */
479 events[5] |= 0x10; /* Synchronous Connection Changed */
480 } else {
481 /* Use a different default for LE-only devices */
482 memset(events, 0, sizeof(events));
483 events[0] |= 0x10; /* Disconnection Complete */
484 events[1] |= 0x08; /* Read Remote Version Information Complete */
485 events[1] |= 0x20; /* Command Complete */
486 events[1] |= 0x40; /* Command Status */
487 events[1] |= 0x80; /* Hardware Error */
488 events[2] |= 0x04; /* Number of Completed Packets */
489 events[3] |= 0x02; /* Data Buffer Overflow */
491 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492 events[0] |= 0x80; /* Encryption Change */
493 events[5] |= 0x80; /* Encryption Key Refresh Complete */
497 if (lmp_inq_rssi_capable(hdev))
498 events[4] |= 0x02; /* Inquiry Result with RSSI */
500 if (lmp_sniffsubr_capable(hdev))
501 events[5] |= 0x20; /* Sniff Subrating */
503 if (lmp_pause_enc_capable(hdev))
504 events[5] |= 0x80; /* Encryption Key Refresh Complete */
506 if (lmp_ext_inq_capable(hdev))
507 events[5] |= 0x40; /* Extended Inquiry Result */
509 if (lmp_no_flush_capable(hdev))
510 events[7] |= 0x01; /* Enhanced Flush Complete */
512 if (lmp_lsto_capable(hdev))
513 events[6] |= 0x80; /* Link Supervision Timeout Changed */
515 if (lmp_ssp_capable(hdev)) {
516 events[6] |= 0x01; /* IO Capability Request */
517 events[6] |= 0x02; /* IO Capability Response */
518 events[6] |= 0x04; /* User Confirmation Request */
519 events[6] |= 0x08; /* User Passkey Request */
520 events[6] |= 0x10; /* Remote OOB Data Request */
521 events[6] |= 0x20; /* Simple Pairing Complete */
522 events[7] |= 0x04; /* User Passkey Notification */
523 events[7] |= 0x08; /* Keypress Notification */
524 events[7] |= 0x10; /* Remote Host Supported
525 * Features Notification
529 if (lmp_le_capable(hdev))
530 events[7] |= 0x20; /* LE Meta-Event */
532 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
535 static void hci_init2_req(struct hci_request *req, unsigned long opt)
537 struct hci_dev *hdev = req->hdev;
539 if (hdev->dev_type == HCI_AMP)
540 return amp_init2(req);
542 if (lmp_bredr_capable(hdev))
543 bredr_setup(req);
544 else
545 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
547 if (lmp_le_capable(hdev))
548 le_setup(req);
550 /* All Bluetooth 1.2 and later controllers should support the
551 * HCI command for reading the local supported commands.
553 * Unfortunately some controllers indicate Bluetooth 1.2 support,
554 * but do not have support for this command. If that is the case,
555 * the driver can quirk the behavior and skip reading the local
556 * supported commands.
558 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
560 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
562 if (lmp_ssp_capable(hdev)) {
563 /* When SSP is available, then the host features page
564 * should also be available as well. However some
565 * controllers list the max_page as 0 as long as SSP
566 * has not been enabled. To achieve proper debugging
567 * output, force the minimum max_page to 1 at least.
569 hdev->max_page = 0x01;
571 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
572 u8 mode = 0x01;
574 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575 sizeof(mode), &mode);
576 } else {
577 struct hci_cp_write_eir cp;
579 memset(hdev->eir, 0, sizeof(hdev->eir));
580 memset(&cp, 0, sizeof(cp));
582 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
586 if (lmp_inq_rssi_capable(hdev) ||
587 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
588 u8 mode;
590 /* If Extended Inquiry Result events are supported, then
591 * they are clearly preferred over Inquiry Result with RSSI
592 * events.
594 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
596 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
599 if (lmp_inq_tx_pwr_capable(hdev))
600 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
602 if (lmp_ext_feat_capable(hdev)) {
603 struct hci_cp_read_local_ext_features cp;
605 cp.page = 0x01;
606 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607 sizeof(cp), &cp);
610 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
611 u8 enable = 1;
612 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613 &enable);
617 static void hci_setup_link_policy(struct hci_request *req)
619 struct hci_dev *hdev = req->hdev;
620 struct hci_cp_write_def_link_policy cp;
621 u16 link_policy = 0;
623 if (lmp_rswitch_capable(hdev))
624 link_policy |= HCI_LP_RSWITCH;
625 if (lmp_hold_capable(hdev))
626 link_policy |= HCI_LP_HOLD;
627 if (lmp_sniff_capable(hdev))
628 link_policy |= HCI_LP_SNIFF;
629 if (lmp_park_capable(hdev))
630 link_policy |= HCI_LP_PARK;
632 cp.policy = cpu_to_le16(link_policy);
633 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
636 static void hci_set_le_support(struct hci_request *req)
638 struct hci_dev *hdev = req->hdev;
639 struct hci_cp_write_le_host_supported cp;
641 /* LE-only devices do not support explicit enablement */
642 if (!lmp_bredr_capable(hdev))
643 return;
645 memset(&cp, 0, sizeof(cp));
647 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
648 cp.le = 0x01;
649 cp.simul = 0x00;
652 if (cp.le != lmp_host_le_capable(hdev))
653 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654 &cp);
657 static void hci_set_event_mask_page_2(struct hci_request *req)
659 struct hci_dev *hdev = req->hdev;
660 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
662 /* If Connectionless Slave Broadcast master role is supported
663 * enable all necessary events for it.
665 if (lmp_csb_master_capable(hdev)) {
666 events[1] |= 0x40; /* Triggered Clock Capture */
667 events[1] |= 0x80; /* Synchronization Train Complete */
668 events[2] |= 0x10; /* Slave Page Response Timeout */
669 events[2] |= 0x20; /* CSB Channel Map Change */
672 /* If Connectionless Slave Broadcast slave role is supported
673 * enable all necessary events for it.
675 if (lmp_csb_slave_capable(hdev)) {
676 events[2] |= 0x01; /* Synchronization Train Received */
677 events[2] |= 0x02; /* CSB Receive */
678 events[2] |= 0x04; /* CSB Timeout */
679 events[2] |= 0x08; /* Truncated Page Complete */
682 /* Enable Authenticated Payload Timeout Expired event if supported */
683 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
684 events[2] |= 0x80;
686 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
689 static void hci_init3_req(struct hci_request *req, unsigned long opt)
691 struct hci_dev *hdev = req->hdev;
692 u8 p;
694 hci_setup_event_mask(req);
696 if (hdev->commands[6] & 0x20) {
697 struct hci_cp_read_stored_link_key cp;
699 bacpy(&cp.bdaddr, BDADDR_ANY);
700 cp.read_all = 0x01;
701 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
704 if (hdev->commands[5] & 0x10)
705 hci_setup_link_policy(req);
707 if (hdev->commands[8] & 0x01)
708 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
710 /* Some older Broadcom based Bluetooth 1.2 controllers do not
711 * support the Read Page Scan Type command. Check support for
712 * this command in the bit mask of supported commands.
714 if (hdev->commands[13] & 0x01)
715 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
717 if (lmp_le_capable(hdev)) {
718 u8 events[8];
720 memset(events, 0, sizeof(events));
721 events[0] = 0x0f;
723 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
724 events[0] |= 0x10; /* LE Long Term Key Request */
726 /* If controller supports the Connection Parameters Request
727 * Link Layer Procedure, enable the corresponding event.
729 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
730 events[0] |= 0x20; /* LE Remote Connection
731 * Parameter Request
734 /* If the controller supports the Data Length Extension
735 * feature, enable the corresponding event.
737 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
738 events[0] |= 0x40; /* LE Data Length Change */
740 /* If the controller supports Extended Scanner Filter
741 * Policies, enable the correspondig event.
743 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
744 events[1] |= 0x04; /* LE Direct Advertising
745 * Report
748 /* If the controller supports the LE Read Local P-256
749 * Public Key command, enable the corresponding event.
751 if (hdev->commands[34] & 0x02)
752 events[0] |= 0x80; /* LE Read Local P-256
753 * Public Key Complete
756 /* If the controller supports the LE Generate DHKey
757 * command, enable the corresponding event.
759 if (hdev->commands[34] & 0x04)
760 events[1] |= 0x01; /* LE Generate DHKey Complete */
762 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
763 events);
765 if (hdev->commands[25] & 0x40) {
766 /* Read LE Advertising Channel TX Power */
767 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
770 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
771 /* Read LE Maximum Data Length */
772 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
774 /* Read LE Suggested Default Data Length */
775 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
778 hci_set_le_support(req);
781 /* Read features beyond page 1 if available */
782 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
783 struct hci_cp_read_local_ext_features cp;
785 cp.page = p;
786 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
787 sizeof(cp), &cp);
791 static void hci_init4_req(struct hci_request *req, unsigned long opt)
793 struct hci_dev *hdev = req->hdev;
795 /* Some Broadcom based Bluetooth controllers do not support the
796 * Delete Stored Link Key command. They are clearly indicating its
797 * absence in the bit mask of supported commands.
799 * Check the supported commands and only if the the command is marked
800 * as supported send it. If not supported assume that the controller
801 * does not have actual support for stored link keys which makes this
802 * command redundant anyway.
804 * Some controllers indicate that they support handling deleting
805 * stored link keys, but they don't. The quirk lets a driver
806 * just disable this command.
808 if (hdev->commands[6] & 0x80 &&
809 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
810 struct hci_cp_delete_stored_link_key cp;
812 bacpy(&cp.bdaddr, BDADDR_ANY);
813 cp.delete_all = 0x01;
814 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
815 sizeof(cp), &cp);
818 /* Set event mask page 2 if the HCI command for it is supported */
819 if (hdev->commands[22] & 0x04)
820 hci_set_event_mask_page_2(req);
822 /* Read local codec list if the HCI command is supported */
823 if (hdev->commands[29] & 0x20)
824 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
826 /* Get MWS transport configuration if the HCI command is supported */
827 if (hdev->commands[30] & 0x08)
828 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
830 /* Check for Synchronization Train support */
831 if (lmp_sync_train_capable(hdev))
832 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
834 /* Enable Secure Connections if supported and configured */
835 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
836 bredr_sc_enabled(hdev)) {
837 u8 support = 0x01;
839 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
840 sizeof(support), &support);
844 static int __hci_init(struct hci_dev *hdev)
846 int err;
848 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
849 if (err < 0)
850 return err;
852 /* The Device Under Test (DUT) mode is special and available for
853 * all controller types. So just create it early on.
855 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
856 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
857 &dut_mode_fops);
860 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
861 if (err < 0)
862 return err;
864 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
865 * BR/EDR/LE type controllers. AMP controllers only need the
866 * first two stages of init.
868 if (hdev->dev_type != HCI_BREDR)
869 return 0;
871 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
872 if (err < 0)
873 return err;
875 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
876 if (err < 0)
877 return err;
879 /* This function is only called when the controller is actually in
880 * configured state. When the controller is marked as unconfigured,
881 * this initialization procedure is not run.
883 * It means that it is possible that a controller runs through its
884 * setup phase and then discovers missing settings. If that is the
885 * case, then this function will not be called. It then will only
886 * be called during the config phase.
888 * So only when in setup phase or config phase, create the debugfs
889 * entries and register the SMP channels.
891 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
892 !hci_dev_test_flag(hdev, HCI_CONFIG))
893 return 0;
895 hci_debugfs_create_common(hdev);
897 if (lmp_bredr_capable(hdev))
898 hci_debugfs_create_bredr(hdev);
900 if (lmp_le_capable(hdev))
901 hci_debugfs_create_le(hdev);
903 return 0;
906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
908 struct hci_dev *hdev = req->hdev;
910 BT_DBG("%s %ld", hdev->name, opt);
912 /* Reset */
913 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
914 hci_reset_req(req, 0);
916 /* Read Local Version */
917 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
919 /* Read BD Address */
920 if (hdev->set_bdaddr)
921 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
924 static int __hci_unconf_init(struct hci_dev *hdev)
926 int err;
928 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
929 return 0;
931 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
932 if (err < 0)
933 return err;
935 return 0;
938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
940 __u8 scan = opt;
942 BT_DBG("%s %x", req->hdev->name, scan);
944 /* Inquiry and Page scans */
945 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
950 __u8 auth = opt;
952 BT_DBG("%s %x", req->hdev->name, auth);
954 /* Authentication */
955 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
960 __u8 encrypt = opt;
962 BT_DBG("%s %x", req->hdev->name, encrypt);
964 /* Encryption */
965 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
970 __le16 policy = cpu_to_le16(opt);
972 BT_DBG("%s %x", req->hdev->name, policy);
974 /* Default link policy */
975 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
978 /* Get HCI device by index.
979 * Device is held on return. */
980 struct hci_dev *hci_dev_get(int index)
982 struct hci_dev *hdev = NULL, *d;
984 BT_DBG("%d", index);
986 if (index < 0)
987 return NULL;
989 read_lock(&hci_dev_list_lock);
990 list_for_each_entry(d, &hci_dev_list, list) {
991 if (d->id == index) {
992 hdev = hci_dev_hold(d);
993 break;
996 read_unlock(&hci_dev_list_lock);
997 return hdev;
1000 /* ---- Inquiry support ---- */
1002 bool hci_discovery_active(struct hci_dev *hdev)
1004 struct discovery_state *discov = &hdev->discovery;
1006 switch (discov->state) {
1007 case DISCOVERY_FINDING:
1008 case DISCOVERY_RESOLVING:
1009 return true;
1011 default:
1012 return false;
1016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1018 int old_state = hdev->discovery.state;
1020 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1022 if (old_state == state)
1023 return;
1025 hdev->discovery.state = state;
1027 switch (state) {
1028 case DISCOVERY_STOPPED:
1029 hci_update_background_scan(hdev);
1031 if (old_state != DISCOVERY_STARTING)
1032 mgmt_discovering(hdev, 0);
1033 break;
1034 case DISCOVERY_STARTING:
1035 break;
1036 case DISCOVERY_FINDING:
1037 mgmt_discovering(hdev, 1);
1038 break;
1039 case DISCOVERY_RESOLVING:
1040 break;
1041 case DISCOVERY_STOPPING:
1042 break;
1046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1048 struct discovery_state *cache = &hdev->discovery;
1049 struct inquiry_entry *p, *n;
1051 list_for_each_entry_safe(p, n, &cache->all, all) {
1052 list_del(&p->all);
1053 kfree(p);
1056 INIT_LIST_HEAD(&cache->unknown);
1057 INIT_LIST_HEAD(&cache->resolve);
1060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1061 bdaddr_t *bdaddr)
1063 struct discovery_state *cache = &hdev->discovery;
1064 struct inquiry_entry *e;
1066 BT_DBG("cache %p, %pMR", cache, bdaddr);
1068 list_for_each_entry(e, &cache->all, all) {
1069 if (!bacmp(&e->data.bdaddr, bdaddr))
1070 return e;
1073 return NULL;
1076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1077 bdaddr_t *bdaddr)
1079 struct discovery_state *cache = &hdev->discovery;
1080 struct inquiry_entry *e;
1082 BT_DBG("cache %p, %pMR", cache, bdaddr);
1084 list_for_each_entry(e, &cache->unknown, list) {
1085 if (!bacmp(&e->data.bdaddr, bdaddr))
1086 return e;
1089 return NULL;
1092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1093 bdaddr_t *bdaddr,
1094 int state)
1096 struct discovery_state *cache = &hdev->discovery;
1097 struct inquiry_entry *e;
1099 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1101 list_for_each_entry(e, &cache->resolve, list) {
1102 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1103 return e;
1104 if (!bacmp(&e->data.bdaddr, bdaddr))
1105 return e;
1108 return NULL;
1111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1112 struct inquiry_entry *ie)
1114 struct discovery_state *cache = &hdev->discovery;
1115 struct list_head *pos = &cache->resolve;
1116 struct inquiry_entry *p;
1118 list_del(&ie->list);
1120 list_for_each_entry(p, &cache->resolve, list) {
1121 if (p->name_state != NAME_PENDING &&
1122 abs(p->data.rssi) >= abs(ie->data.rssi))
1123 break;
1124 pos = &p->list;
1127 list_add(&ie->list, pos);
1130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1131 bool name_known)
1133 struct discovery_state *cache = &hdev->discovery;
1134 struct inquiry_entry *ie;
1135 u32 flags = 0;
1137 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1139 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1141 if (!data->ssp_mode)
1142 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1144 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1145 if (ie) {
1146 if (!ie->data.ssp_mode)
1147 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1149 if (ie->name_state == NAME_NEEDED &&
1150 data->rssi != ie->data.rssi) {
1151 ie->data.rssi = data->rssi;
1152 hci_inquiry_cache_update_resolve(hdev, ie);
1155 goto update;
1158 /* Entry not in the cache. Add new one. */
1159 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1160 if (!ie) {
1161 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1162 goto done;
1165 list_add(&ie->all, &cache->all);
1167 if (name_known) {
1168 ie->name_state = NAME_KNOWN;
1169 } else {
1170 ie->name_state = NAME_NOT_KNOWN;
1171 list_add(&ie->list, &cache->unknown);
1174 update:
1175 if (name_known && ie->name_state != NAME_KNOWN &&
1176 ie->name_state != NAME_PENDING) {
1177 ie->name_state = NAME_KNOWN;
1178 list_del(&ie->list);
1181 memcpy(&ie->data, data, sizeof(*data));
1182 ie->timestamp = jiffies;
1183 cache->timestamp = jiffies;
1185 if (ie->name_state == NAME_NOT_KNOWN)
1186 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1188 done:
1189 return flags;
1192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1194 struct discovery_state *cache = &hdev->discovery;
1195 struct inquiry_info *info = (struct inquiry_info *) buf;
1196 struct inquiry_entry *e;
1197 int copied = 0;
1199 list_for_each_entry(e, &cache->all, all) {
1200 struct inquiry_data *data = &e->data;
1202 if (copied >= num)
1203 break;
1205 bacpy(&info->bdaddr, &data->bdaddr);
1206 info->pscan_rep_mode = data->pscan_rep_mode;
1207 info->pscan_period_mode = data->pscan_period_mode;
1208 info->pscan_mode = data->pscan_mode;
1209 memcpy(info->dev_class, data->dev_class, 3);
1210 info->clock_offset = data->clock_offset;
1212 info++;
1213 copied++;
1216 BT_DBG("cache %p, copied %d", cache, copied);
1217 return copied;
1220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1222 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1223 struct hci_dev *hdev = req->hdev;
1224 struct hci_cp_inquiry cp;
1226 BT_DBG("%s", hdev->name);
1228 if (test_bit(HCI_INQUIRY, &hdev->flags))
1229 return;
1231 /* Start Inquiry */
1232 memcpy(&cp.lap, &ir->lap, 3);
1233 cp.length = ir->length;
1234 cp.num_rsp = ir->num_rsp;
1235 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1238 int hci_inquiry(void __user *arg)
1240 __u8 __user *ptr = arg;
1241 struct hci_inquiry_req ir;
1242 struct hci_dev *hdev;
1243 int err = 0, do_inquiry = 0, max_rsp;
1244 long timeo;
1245 __u8 *buf;
1247 if (copy_from_user(&ir, ptr, sizeof(ir)))
1248 return -EFAULT;
1250 hdev = hci_dev_get(ir.dev_id);
1251 if (!hdev)
1252 return -ENODEV;
1254 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1255 err = -EBUSY;
1256 goto done;
1259 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1260 err = -EOPNOTSUPP;
1261 goto done;
1264 if (hdev->dev_type != HCI_BREDR) {
1265 err = -EOPNOTSUPP;
1266 goto done;
1269 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1270 err = -EOPNOTSUPP;
1271 goto done;
1274 hci_dev_lock(hdev);
1275 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1276 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1277 hci_inquiry_cache_flush(hdev);
1278 do_inquiry = 1;
1280 hci_dev_unlock(hdev);
1282 timeo = ir.length * msecs_to_jiffies(2000);
1284 if (do_inquiry) {
1285 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1286 timeo);
1287 if (err < 0)
1288 goto done;
1290 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1291 * cleared). If it is interrupted by a signal, return -EINTR.
1293 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1294 TASK_INTERRUPTIBLE))
1295 return -EINTR;
1298 /* for unlimited number of responses we will use buffer with
1299 * 255 entries
1301 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1303 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1304 * copy it to the user space.
1306 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1307 if (!buf) {
1308 err = -ENOMEM;
1309 goto done;
1312 hci_dev_lock(hdev);
1313 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1314 hci_dev_unlock(hdev);
1316 BT_DBG("num_rsp %d", ir.num_rsp);
1318 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1319 ptr += sizeof(ir);
1320 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1321 ir.num_rsp))
1322 err = -EFAULT;
1323 } else
1324 err = -EFAULT;
1326 kfree(buf);
1328 done:
1329 hci_dev_put(hdev);
1330 return err;
1333 static int hci_dev_do_open(struct hci_dev *hdev)
1335 int ret = 0;
1337 BT_DBG("%s %p", hdev->name, hdev);
1339 hci_req_lock(hdev);
1341 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1342 ret = -ENODEV;
1343 goto done;
1346 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1347 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1348 /* Check for rfkill but allow the HCI setup stage to
1349 * proceed (which in itself doesn't cause any RF activity).
1351 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1352 ret = -ERFKILL;
1353 goto done;
1356 /* Check for valid public address or a configured static
1357 * random adddress, but let the HCI setup proceed to
1358 * be able to determine if there is a public address
1359 * or not.
1361 * In case of user channel usage, it is not important
1362 * if a public address or static random address is
1363 * available.
1365 * This check is only valid for BR/EDR controllers
1366 * since AMP controllers do not have an address.
1368 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1369 hdev->dev_type == HCI_BREDR &&
1370 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1371 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1372 ret = -EADDRNOTAVAIL;
1373 goto done;
1377 if (test_bit(HCI_UP, &hdev->flags)) {
1378 ret = -EALREADY;
1379 goto done;
1382 if (hdev->open(hdev)) {
1383 ret = -EIO;
1384 goto done;
1387 atomic_set(&hdev->cmd_cnt, 1);
1388 set_bit(HCI_INIT, &hdev->flags);
1390 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1391 if (hdev->setup)
1392 ret = hdev->setup(hdev);
1394 /* The transport driver can set these quirks before
1395 * creating the HCI device or in its setup callback.
1397 * In case any of them is set, the controller has to
1398 * start up as unconfigured.
1400 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1402 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1404 /* For an unconfigured controller it is required to
1405 * read at least the version information provided by
1406 * the Read Local Version Information command.
1408 * If the set_bdaddr driver callback is provided, then
1409 * also the original Bluetooth public device address
1410 * will be read using the Read BD Address command.
1412 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1413 ret = __hci_unconf_init(hdev);
1416 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1417 /* If public address change is configured, ensure that
1418 * the address gets programmed. If the driver does not
1419 * support changing the public address, fail the power
1420 * on procedure.
1422 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1423 hdev->set_bdaddr)
1424 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1425 else
1426 ret = -EADDRNOTAVAIL;
1429 if (!ret) {
1430 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1432 ret = __hci_init(hdev);
1435 clear_bit(HCI_INIT, &hdev->flags);
1437 if (!ret) {
1438 hci_dev_hold(hdev);
1439 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1440 set_bit(HCI_UP, &hdev->flags);
1441 hci_notify(hdev, HCI_DEV_UP);
1442 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1443 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1444 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1445 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1446 hdev->dev_type == HCI_BREDR) {
1447 hci_dev_lock(hdev);
1448 mgmt_powered(hdev, 1);
1449 hci_dev_unlock(hdev);
1451 } else {
1452 /* Init failed, cleanup */
1453 flush_work(&hdev->tx_work);
1454 flush_work(&hdev->cmd_work);
1455 flush_work(&hdev->rx_work);
1457 skb_queue_purge(&hdev->cmd_q);
1458 skb_queue_purge(&hdev->rx_q);
1460 if (hdev->flush)
1461 hdev->flush(hdev);
1463 if (hdev->sent_cmd) {
1464 kfree_skb(hdev->sent_cmd);
1465 hdev->sent_cmd = NULL;
1468 hdev->close(hdev);
1469 hdev->flags &= BIT(HCI_RAW);
1472 done:
1473 hci_req_unlock(hdev);
1474 return ret;
1477 /* ---- HCI ioctl helpers ---- */
1479 int hci_dev_open(__u16 dev)
1481 struct hci_dev *hdev;
1482 int err;
1484 hdev = hci_dev_get(dev);
1485 if (!hdev)
1486 return -ENODEV;
1488 /* Devices that are marked as unconfigured can only be powered
1489 * up as user channel. Trying to bring them up as normal devices
1490 * will result into a failure. Only user channel operation is
1491 * possible.
1493 * When this function is called for a user channel, the flag
1494 * HCI_USER_CHANNEL will be set first before attempting to
1495 * open the device.
1497 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1498 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1499 err = -EOPNOTSUPP;
1500 goto done;
1503 /* We need to ensure that no other power on/off work is pending
1504 * before proceeding to call hci_dev_do_open. This is
1505 * particularly important if the setup procedure has not yet
1506 * completed.
1508 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1509 cancel_delayed_work(&hdev->power_off);
1511 /* After this call it is guaranteed that the setup procedure
1512 * has finished. This means that error conditions like RFKILL
1513 * or no valid public or static random address apply.
1515 flush_workqueue(hdev->req_workqueue);
1517 /* For controllers not using the management interface and that
1518 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1519 * so that pairing works for them. Once the management interface
1520 * is in use this bit will be cleared again and userspace has
1521 * to explicitly enable it.
1523 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1524 !hci_dev_test_flag(hdev, HCI_MGMT))
1525 hci_dev_set_flag(hdev, HCI_BONDABLE);
1527 err = hci_dev_do_open(hdev);
1529 done:
1530 hci_dev_put(hdev);
1531 return err;
1534 /* This function requires the caller holds hdev->lock */
1535 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1537 struct hci_conn_params *p;
1539 list_for_each_entry(p, &hdev->le_conn_params, list) {
1540 if (p->conn) {
1541 hci_conn_drop(p->conn);
1542 hci_conn_put(p->conn);
1543 p->conn = NULL;
1545 list_del_init(&p->action);
1548 BT_DBG("All LE pending actions cleared");
1551 static int hci_dev_do_close(struct hci_dev *hdev)
1553 BT_DBG("%s %p", hdev->name, hdev);
1555 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1556 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1557 test_bit(HCI_UP, &hdev->flags)) {
1558 /* Execute vendor specific shutdown routine */
1559 if (hdev->shutdown)
1560 hdev->shutdown(hdev);
1563 cancel_delayed_work(&hdev->power_off);
1565 hci_req_cancel(hdev, ENODEV);
1566 hci_req_lock(hdev);
1568 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1569 cancel_delayed_work_sync(&hdev->cmd_timer);
1570 hci_req_unlock(hdev);
1571 return 0;
1574 /* Flush RX and TX works */
1575 flush_work(&hdev->tx_work);
1576 flush_work(&hdev->rx_work);
1578 if (hdev->discov_timeout > 0) {
1579 cancel_delayed_work(&hdev->discov_off);
1580 hdev->discov_timeout = 0;
1581 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1582 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1585 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1586 cancel_delayed_work(&hdev->service_cache);
1588 cancel_delayed_work_sync(&hdev->le_scan_disable);
1589 cancel_delayed_work_sync(&hdev->le_scan_restart);
1591 if (hci_dev_test_flag(hdev, HCI_MGMT))
1592 cancel_delayed_work_sync(&hdev->rpa_expired);
1594 if (hdev->adv_instance_timeout) {
1595 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1596 hdev->adv_instance_timeout = 0;
1599 /* Avoid potential lockdep warnings from the *_flush() calls by
1600 * ensuring the workqueue is empty up front.
1602 drain_workqueue(hdev->workqueue);
1604 hci_dev_lock(hdev);
1606 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1608 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1609 if (hdev->dev_type == HCI_BREDR)
1610 mgmt_powered(hdev, 0);
1613 hci_inquiry_cache_flush(hdev);
1614 hci_pend_le_actions_clear(hdev);
1615 hci_conn_hash_flush(hdev);
1616 hci_dev_unlock(hdev);
1618 smp_unregister(hdev);
1620 hci_notify(hdev, HCI_DEV_DOWN);
1622 if (hdev->flush)
1623 hdev->flush(hdev);
1625 /* Reset device */
1626 skb_queue_purge(&hdev->cmd_q);
1627 atomic_set(&hdev->cmd_cnt, 1);
1628 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1629 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1630 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1631 set_bit(HCI_INIT, &hdev->flags);
1632 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1633 clear_bit(HCI_INIT, &hdev->flags);
1636 /* flush cmd work */
1637 flush_work(&hdev->cmd_work);
1639 /* Drop queues */
1640 skb_queue_purge(&hdev->rx_q);
1641 skb_queue_purge(&hdev->cmd_q);
1642 skb_queue_purge(&hdev->raw_q);
1644 /* Drop last sent command */
1645 if (hdev->sent_cmd) {
1646 cancel_delayed_work_sync(&hdev->cmd_timer);
1647 kfree_skb(hdev->sent_cmd);
1648 hdev->sent_cmd = NULL;
1651 /* After this point our queues are empty
1652 * and no tasks are scheduled. */
1653 hdev->close(hdev);
1655 /* Clear flags */
1656 hdev->flags &= BIT(HCI_RAW);
1657 hci_dev_clear_volatile_flags(hdev);
1659 /* Controller radio is available but is currently powered down */
1660 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1662 memset(hdev->eir, 0, sizeof(hdev->eir));
1663 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1664 bacpy(&hdev->random_addr, BDADDR_ANY);
1666 hci_req_unlock(hdev);
1668 hci_dev_put(hdev);
1669 return 0;
1672 int hci_dev_close(__u16 dev)
1674 struct hci_dev *hdev;
1675 int err;
1677 hdev = hci_dev_get(dev);
1678 if (!hdev)
1679 return -ENODEV;
1681 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1682 err = -EBUSY;
1683 goto done;
1686 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1687 cancel_delayed_work(&hdev->power_off);
1689 err = hci_dev_do_close(hdev);
1691 done:
1692 hci_dev_put(hdev);
1693 return err;
1696 static int hci_dev_do_reset(struct hci_dev *hdev)
1698 int ret;
1700 BT_DBG("%s %p", hdev->name, hdev);
1702 hci_req_lock(hdev);
1704 /* Drop queues */
1705 skb_queue_purge(&hdev->rx_q);
1706 skb_queue_purge(&hdev->cmd_q);
1708 /* Avoid potential lockdep warnings from the *_flush() calls by
1709 * ensuring the workqueue is empty up front.
1711 drain_workqueue(hdev->workqueue);
1713 hci_dev_lock(hdev);
1714 hci_inquiry_cache_flush(hdev);
1715 hci_conn_hash_flush(hdev);
1716 hci_dev_unlock(hdev);
1718 if (hdev->flush)
1719 hdev->flush(hdev);
1721 atomic_set(&hdev->cmd_cnt, 1);
1722 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1724 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1726 hci_req_unlock(hdev);
1727 return ret;
1730 int hci_dev_reset(__u16 dev)
1732 struct hci_dev *hdev;
1733 int err;
1735 hdev = hci_dev_get(dev);
1736 if (!hdev)
1737 return -ENODEV;
1739 if (!test_bit(HCI_UP, &hdev->flags)) {
1740 err = -ENETDOWN;
1741 goto done;
1744 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1745 err = -EBUSY;
1746 goto done;
1749 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1750 err = -EOPNOTSUPP;
1751 goto done;
1754 err = hci_dev_do_reset(hdev);
1756 done:
1757 hci_dev_put(hdev);
1758 return err;
1761 int hci_dev_reset_stat(__u16 dev)
1763 struct hci_dev *hdev;
1764 int ret = 0;
1766 hdev = hci_dev_get(dev);
1767 if (!hdev)
1768 return -ENODEV;
1770 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1771 ret = -EBUSY;
1772 goto done;
1775 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1776 ret = -EOPNOTSUPP;
1777 goto done;
1780 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1782 done:
1783 hci_dev_put(hdev);
1784 return ret;
1787 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1789 bool conn_changed, discov_changed;
1791 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1793 if ((scan & SCAN_PAGE))
1794 conn_changed = !hci_dev_test_and_set_flag(hdev,
1795 HCI_CONNECTABLE);
1796 else
1797 conn_changed = hci_dev_test_and_clear_flag(hdev,
1798 HCI_CONNECTABLE);
1800 if ((scan & SCAN_INQUIRY)) {
1801 discov_changed = !hci_dev_test_and_set_flag(hdev,
1802 HCI_DISCOVERABLE);
1803 } else {
1804 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1805 discov_changed = hci_dev_test_and_clear_flag(hdev,
1806 HCI_DISCOVERABLE);
1809 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1810 return;
1812 if (conn_changed || discov_changed) {
1813 /* In case this was disabled through mgmt */
1814 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1816 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1817 mgmt_update_adv_data(hdev);
1819 mgmt_new_settings(hdev);
1823 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1825 struct hci_dev *hdev;
1826 struct hci_dev_req dr;
1827 int err = 0;
1829 if (copy_from_user(&dr, arg, sizeof(dr)))
1830 return -EFAULT;
1832 hdev = hci_dev_get(dr.dev_id);
1833 if (!hdev)
1834 return -ENODEV;
1836 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1837 err = -EBUSY;
1838 goto done;
1841 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1842 err = -EOPNOTSUPP;
1843 goto done;
1846 if (hdev->dev_type != HCI_BREDR) {
1847 err = -EOPNOTSUPP;
1848 goto done;
1851 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1852 err = -EOPNOTSUPP;
1853 goto done;
1856 switch (cmd) {
1857 case HCISETAUTH:
1858 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1859 HCI_INIT_TIMEOUT);
1860 break;
1862 case HCISETENCRYPT:
1863 if (!lmp_encrypt_capable(hdev)) {
1864 err = -EOPNOTSUPP;
1865 break;
1868 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1869 /* Auth must be enabled first */
1870 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1871 HCI_INIT_TIMEOUT);
1872 if (err)
1873 break;
1876 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1877 HCI_INIT_TIMEOUT);
1878 break;
1880 case HCISETSCAN:
1881 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1882 HCI_INIT_TIMEOUT);
1884 /* Ensure that the connectable and discoverable states
1885 * get correctly modified as this was a non-mgmt change.
1887 if (!err)
1888 hci_update_scan_state(hdev, dr.dev_opt);
1889 break;
1891 case HCISETLINKPOL:
1892 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1893 HCI_INIT_TIMEOUT);
1894 break;
1896 case HCISETLINKMODE:
1897 hdev->link_mode = ((__u16) dr.dev_opt) &
1898 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1899 break;
1901 case HCISETPTYPE:
1902 hdev->pkt_type = (__u16) dr.dev_opt;
1903 break;
1905 case HCISETACLMTU:
1906 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1907 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1908 break;
1910 case HCISETSCOMTU:
1911 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1912 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1913 break;
1915 default:
1916 err = -EINVAL;
1917 break;
1920 done:
1921 hci_dev_put(hdev);
1922 return err;
1925 int hci_get_dev_list(void __user *arg)
1927 struct hci_dev *hdev;
1928 struct hci_dev_list_req *dl;
1929 struct hci_dev_req *dr;
1930 int n = 0, size, err;
1931 __u16 dev_num;
1933 if (get_user(dev_num, (__u16 __user *) arg))
1934 return -EFAULT;
1936 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1937 return -EINVAL;
1939 size = sizeof(*dl) + dev_num * sizeof(*dr);
1941 dl = kzalloc(size, GFP_KERNEL);
1942 if (!dl)
1943 return -ENOMEM;
1945 dr = dl->dev_req;
1947 read_lock(&hci_dev_list_lock);
1948 list_for_each_entry(hdev, &hci_dev_list, list) {
1949 unsigned long flags = hdev->flags;
1951 /* When the auto-off is configured it means the transport
1952 * is running, but in that case still indicate that the
1953 * device is actually down.
1955 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1956 flags &= ~BIT(HCI_UP);
1958 (dr + n)->dev_id = hdev->id;
1959 (dr + n)->dev_opt = flags;
1961 if (++n >= dev_num)
1962 break;
1964 read_unlock(&hci_dev_list_lock);
1966 dl->dev_num = n;
1967 size = sizeof(*dl) + n * sizeof(*dr);
1969 err = copy_to_user(arg, dl, size);
1970 kfree(dl);
1972 return err ? -EFAULT : 0;
1975 int hci_get_dev_info(void __user *arg)
1977 struct hci_dev *hdev;
1978 struct hci_dev_info di;
1979 unsigned long flags;
1980 int err = 0;
1982 if (copy_from_user(&di, arg, sizeof(di)))
1983 return -EFAULT;
1985 hdev = hci_dev_get(di.dev_id);
1986 if (!hdev)
1987 return -ENODEV;
1989 /* When the auto-off is configured it means the transport
1990 * is running, but in that case still indicate that the
1991 * device is actually down.
1993 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1994 flags = hdev->flags & ~BIT(HCI_UP);
1995 else
1996 flags = hdev->flags;
1998 strcpy(di.name, hdev->name);
1999 di.bdaddr = hdev->bdaddr;
2000 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2001 di.flags = flags;
2002 di.pkt_type = hdev->pkt_type;
2003 if (lmp_bredr_capable(hdev)) {
2004 di.acl_mtu = hdev->acl_mtu;
2005 di.acl_pkts = hdev->acl_pkts;
2006 di.sco_mtu = hdev->sco_mtu;
2007 di.sco_pkts = hdev->sco_pkts;
2008 } else {
2009 di.acl_mtu = hdev->le_mtu;
2010 di.acl_pkts = hdev->le_pkts;
2011 di.sco_mtu = 0;
2012 di.sco_pkts = 0;
2014 di.link_policy = hdev->link_policy;
2015 di.link_mode = hdev->link_mode;
2017 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2018 memcpy(&di.features, &hdev->features, sizeof(di.features));
2020 if (copy_to_user(arg, &di, sizeof(di)))
2021 err = -EFAULT;
2023 hci_dev_put(hdev);
2025 return err;
2028 /* ---- Interface to HCI drivers ---- */
2030 static int hci_rfkill_set_block(void *data, bool blocked)
2032 struct hci_dev *hdev = data;
2034 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2036 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2037 return -EBUSY;
2039 if (blocked) {
2040 hci_dev_set_flag(hdev, HCI_RFKILLED);
2041 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2042 !hci_dev_test_flag(hdev, HCI_CONFIG))
2043 hci_dev_do_close(hdev);
2044 } else {
2045 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2048 return 0;
2051 static const struct rfkill_ops hci_rfkill_ops = {
2052 .set_block = hci_rfkill_set_block,
2055 static void hci_power_on(struct work_struct *work)
2057 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2058 int err;
2060 BT_DBG("%s", hdev->name);
2062 err = hci_dev_do_open(hdev);
2063 if (err < 0) {
2064 hci_dev_lock(hdev);
2065 mgmt_set_powered_failed(hdev, err);
2066 hci_dev_unlock(hdev);
2067 return;
2070 /* During the HCI setup phase, a few error conditions are
2071 * ignored and they need to be checked now. If they are still
2072 * valid, it is important to turn the device back off.
2074 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2075 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2076 (hdev->dev_type == HCI_BREDR &&
2077 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2078 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2079 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2080 hci_dev_do_close(hdev);
2081 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2082 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2083 HCI_AUTO_OFF_TIMEOUT);
2086 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2087 /* For unconfigured devices, set the HCI_RAW flag
2088 * so that userspace can easily identify them.
2090 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2091 set_bit(HCI_RAW, &hdev->flags);
2093 /* For fully configured devices, this will send
2094 * the Index Added event. For unconfigured devices,
2095 * it will send Unconfigued Index Added event.
2097 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2098 * and no event will be send.
2100 mgmt_index_added(hdev);
2101 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2102 /* When the controller is now configured, then it
2103 * is important to clear the HCI_RAW flag.
2105 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2106 clear_bit(HCI_RAW, &hdev->flags);
2108 /* Powering on the controller with HCI_CONFIG set only
2109 * happens with the transition from unconfigured to
2110 * configured. This will send the Index Added event.
2112 mgmt_index_added(hdev);
2116 static void hci_power_off(struct work_struct *work)
2118 struct hci_dev *hdev = container_of(work, struct hci_dev,
2119 power_off.work);
2121 BT_DBG("%s", hdev->name);
2123 hci_dev_do_close(hdev);
2126 static void hci_error_reset(struct work_struct *work)
2128 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2130 BT_DBG("%s", hdev->name);
2132 if (hdev->hw_error)
2133 hdev->hw_error(hdev, hdev->hw_error_code);
2134 else
2135 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2136 hdev->hw_error_code);
2138 if (hci_dev_do_close(hdev))
2139 return;
2141 hci_dev_do_open(hdev);
2144 static void hci_discov_off(struct work_struct *work)
2146 struct hci_dev *hdev;
2148 hdev = container_of(work, struct hci_dev, discov_off.work);
2150 BT_DBG("%s", hdev->name);
2152 mgmt_discoverable_timeout(hdev);
2155 static void hci_adv_timeout_expire(struct work_struct *work)
2157 struct hci_dev *hdev;
2159 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2161 BT_DBG("%s", hdev->name);
2163 mgmt_adv_timeout_expired(hdev);
2166 void hci_uuids_clear(struct hci_dev *hdev)
2168 struct bt_uuid *uuid, *tmp;
2170 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2171 list_del(&uuid->list);
2172 kfree(uuid);
2176 void hci_link_keys_clear(struct hci_dev *hdev)
2178 struct link_key *key;
2180 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2181 list_del_rcu(&key->list);
2182 kfree_rcu(key, rcu);
2186 void hci_smp_ltks_clear(struct hci_dev *hdev)
2188 struct smp_ltk *k;
2190 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2191 list_del_rcu(&k->list);
2192 kfree_rcu(k, rcu);
2196 void hci_smp_irks_clear(struct hci_dev *hdev)
2198 struct smp_irk *k;
2200 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2201 list_del_rcu(&k->list);
2202 kfree_rcu(k, rcu);
2206 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2208 struct link_key *k;
2210 rcu_read_lock();
2211 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2212 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2213 rcu_read_unlock();
2214 return k;
2217 rcu_read_unlock();
2219 return NULL;
2222 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2223 u8 key_type, u8 old_key_type)
2225 /* Legacy key */
2226 if (key_type < 0x03)
2227 return true;
2229 /* Debug keys are insecure so don't store them persistently */
2230 if (key_type == HCI_LK_DEBUG_COMBINATION)
2231 return false;
2233 /* Changed combination key and there's no previous one */
2234 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2235 return false;
2237 /* Security mode 3 case */
2238 if (!conn)
2239 return true;
2241 /* BR/EDR key derived using SC from an LE link */
2242 if (conn->type == LE_LINK)
2243 return true;
2245 /* Neither local nor remote side had no-bonding as requirement */
2246 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2247 return true;
2249 /* Local side had dedicated bonding as requirement */
2250 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2251 return true;
2253 /* Remote side had dedicated bonding as requirement */
2254 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2255 return true;
2257 /* If none of the above criteria match, then don't store the key
2258 * persistently */
2259 return false;
2262 static u8 ltk_role(u8 type)
2264 if (type == SMP_LTK)
2265 return HCI_ROLE_MASTER;
2267 return HCI_ROLE_SLAVE;
2270 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2271 u8 addr_type, u8 role)
2273 struct smp_ltk *k;
2275 rcu_read_lock();
2276 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2277 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2278 continue;
2280 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2281 rcu_read_unlock();
2282 return k;
2285 rcu_read_unlock();
2287 return NULL;
2290 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2292 struct smp_irk *irk;
2294 rcu_read_lock();
2295 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2296 if (!bacmp(&irk->rpa, rpa)) {
2297 rcu_read_unlock();
2298 return irk;
2302 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2303 if (smp_irk_matches(hdev, irk->val, rpa)) {
2304 bacpy(&irk->rpa, rpa);
2305 rcu_read_unlock();
2306 return irk;
2309 rcu_read_unlock();
2311 return NULL;
2314 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2315 u8 addr_type)
2317 struct smp_irk *irk;
2319 /* Identity Address must be public or static random */
2320 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2321 return NULL;
2323 rcu_read_lock();
2324 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2325 if (addr_type == irk->addr_type &&
2326 bacmp(bdaddr, &irk->bdaddr) == 0) {
2327 rcu_read_unlock();
2328 return irk;
2331 rcu_read_unlock();
2333 return NULL;
2336 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2337 bdaddr_t *bdaddr, u8 *val, u8 type,
2338 u8 pin_len, bool *persistent)
2340 struct link_key *key, *old_key;
2341 u8 old_key_type;
2343 old_key = hci_find_link_key(hdev, bdaddr);
2344 if (old_key) {
2345 old_key_type = old_key->type;
2346 key = old_key;
2347 } else {
2348 old_key_type = conn ? conn->key_type : 0xff;
2349 key = kzalloc(sizeof(*key), GFP_KERNEL);
2350 if (!key)
2351 return NULL;
2352 list_add_rcu(&key->list, &hdev->link_keys);
2355 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2357 /* Some buggy controller combinations generate a changed
2358 * combination key for legacy pairing even when there's no
2359 * previous key */
2360 if (type == HCI_LK_CHANGED_COMBINATION &&
2361 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2362 type = HCI_LK_COMBINATION;
2363 if (conn)
2364 conn->key_type = type;
2367 bacpy(&key->bdaddr, bdaddr);
2368 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2369 key->pin_len = pin_len;
2371 if (type == HCI_LK_CHANGED_COMBINATION)
2372 key->type = old_key_type;
2373 else
2374 key->type = type;
2376 if (persistent)
2377 *persistent = hci_persistent_key(hdev, conn, type,
2378 old_key_type);
2380 return key;
2383 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2384 u8 addr_type, u8 type, u8 authenticated,
2385 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2387 struct smp_ltk *key, *old_key;
2388 u8 role = ltk_role(type);
2390 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2391 if (old_key)
2392 key = old_key;
2393 else {
2394 key = kzalloc(sizeof(*key), GFP_KERNEL);
2395 if (!key)
2396 return NULL;
2397 list_add_rcu(&key->list, &hdev->long_term_keys);
2400 bacpy(&key->bdaddr, bdaddr);
2401 key->bdaddr_type = addr_type;
2402 memcpy(key->val, tk, sizeof(key->val));
2403 key->authenticated = authenticated;
2404 key->ediv = ediv;
2405 key->rand = rand;
2406 key->enc_size = enc_size;
2407 key->type = type;
2409 return key;
2412 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2413 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2415 struct smp_irk *irk;
2417 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2418 if (!irk) {
2419 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2420 if (!irk)
2421 return NULL;
2423 bacpy(&irk->bdaddr, bdaddr);
2424 irk->addr_type = addr_type;
2426 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2429 memcpy(irk->val, val, 16);
2430 bacpy(&irk->rpa, rpa);
2432 return irk;
2435 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2437 struct link_key *key;
2439 key = hci_find_link_key(hdev, bdaddr);
2440 if (!key)
2441 return -ENOENT;
2443 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2445 list_del_rcu(&key->list);
2446 kfree_rcu(key, rcu);
2448 return 0;
2451 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2453 struct smp_ltk *k;
2454 int removed = 0;
2456 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2457 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2458 continue;
2460 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2462 list_del_rcu(&k->list);
2463 kfree_rcu(k, rcu);
2464 removed++;
2467 return removed ? 0 : -ENOENT;
2470 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2472 struct smp_irk *k;
2474 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2475 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2476 continue;
2478 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2480 list_del_rcu(&k->list);
2481 kfree_rcu(k, rcu);
2485 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2487 struct smp_ltk *k;
2488 struct smp_irk *irk;
2489 u8 addr_type;
2491 if (type == BDADDR_BREDR) {
2492 if (hci_find_link_key(hdev, bdaddr))
2493 return true;
2494 return false;
2497 /* Convert to HCI addr type which struct smp_ltk uses */
2498 if (type == BDADDR_LE_PUBLIC)
2499 addr_type = ADDR_LE_DEV_PUBLIC;
2500 else
2501 addr_type = ADDR_LE_DEV_RANDOM;
2503 irk = hci_get_irk(hdev, bdaddr, addr_type);
2504 if (irk) {
2505 bdaddr = &irk->bdaddr;
2506 addr_type = irk->addr_type;
2509 rcu_read_lock();
2510 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2511 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2512 rcu_read_unlock();
2513 return true;
2516 rcu_read_unlock();
2518 return false;
2521 /* HCI command timer function */
2522 static void hci_cmd_timeout(struct work_struct *work)
2524 struct hci_dev *hdev = container_of(work, struct hci_dev,
2525 cmd_timer.work);
2527 if (hdev->sent_cmd) {
2528 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2529 u16 opcode = __le16_to_cpu(sent->opcode);
2531 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2532 } else {
2533 BT_ERR("%s command tx timeout", hdev->name);
2536 atomic_set(&hdev->cmd_cnt, 1);
2537 queue_work(hdev->workqueue, &hdev->cmd_work);
2540 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2541 bdaddr_t *bdaddr, u8 bdaddr_type)
2543 struct oob_data *data;
2545 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2546 if (bacmp(bdaddr, &data->bdaddr) != 0)
2547 continue;
2548 if (data->bdaddr_type != bdaddr_type)
2549 continue;
2550 return data;
2553 return NULL;
2556 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2557 u8 bdaddr_type)
2559 struct oob_data *data;
2561 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2562 if (!data)
2563 return -ENOENT;
2565 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2567 list_del(&data->list);
2568 kfree(data);
2570 return 0;
2573 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2575 struct oob_data *data, *n;
2577 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2578 list_del(&data->list);
2579 kfree(data);
2583 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2584 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2585 u8 *hash256, u8 *rand256)
2587 struct oob_data *data;
2589 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2590 if (!data) {
2591 data = kmalloc(sizeof(*data), GFP_KERNEL);
2592 if (!data)
2593 return -ENOMEM;
2595 bacpy(&data->bdaddr, bdaddr);
2596 data->bdaddr_type = bdaddr_type;
2597 list_add(&data->list, &hdev->remote_oob_data);
2600 if (hash192 && rand192) {
2601 memcpy(data->hash192, hash192, sizeof(data->hash192));
2602 memcpy(data->rand192, rand192, sizeof(data->rand192));
2603 if (hash256 && rand256)
2604 data->present = 0x03;
2605 } else {
2606 memset(data->hash192, 0, sizeof(data->hash192));
2607 memset(data->rand192, 0, sizeof(data->rand192));
2608 if (hash256 && rand256)
2609 data->present = 0x02;
2610 else
2611 data->present = 0x00;
2614 if (hash256 && rand256) {
2615 memcpy(data->hash256, hash256, sizeof(data->hash256));
2616 memcpy(data->rand256, rand256, sizeof(data->rand256));
2617 } else {
2618 memset(data->hash256, 0, sizeof(data->hash256));
2619 memset(data->rand256, 0, sizeof(data->rand256));
2620 if (hash192 && rand192)
2621 data->present = 0x01;
2624 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2626 return 0;
2629 /* This function requires the caller holds hdev->lock */
2630 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2632 struct adv_info *adv_instance;
2634 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2635 if (adv_instance->instance == instance)
2636 return adv_instance;
2639 return NULL;
2642 /* This function requires the caller holds hdev->lock */
2643 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2644 struct adv_info *cur_instance;
2646 cur_instance = hci_find_adv_instance(hdev, instance);
2647 if (!cur_instance)
2648 return NULL;
2650 if (cur_instance == list_last_entry(&hdev->adv_instances,
2651 struct adv_info, list))
2652 return list_first_entry(&hdev->adv_instances,
2653 struct adv_info, list);
2654 else
2655 return list_next_entry(cur_instance, list);
2658 /* This function requires the caller holds hdev->lock */
2659 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2661 struct adv_info *adv_instance;
2663 adv_instance = hci_find_adv_instance(hdev, instance);
2664 if (!adv_instance)
2665 return -ENOENT;
2667 BT_DBG("%s removing %dMR", hdev->name, instance);
2669 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2670 cancel_delayed_work(&hdev->adv_instance_expire);
2671 hdev->adv_instance_timeout = 0;
2674 list_del(&adv_instance->list);
2675 kfree(adv_instance);
2677 hdev->adv_instance_cnt--;
2679 return 0;
2682 /* This function requires the caller holds hdev->lock */
2683 void hci_adv_instances_clear(struct hci_dev *hdev)
2685 struct adv_info *adv_instance, *n;
2687 if (hdev->adv_instance_timeout) {
2688 cancel_delayed_work(&hdev->adv_instance_expire);
2689 hdev->adv_instance_timeout = 0;
2692 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2693 list_del(&adv_instance->list);
2694 kfree(adv_instance);
2697 hdev->adv_instance_cnt = 0;
2700 /* This function requires the caller holds hdev->lock */
2701 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2702 u16 adv_data_len, u8 *adv_data,
2703 u16 scan_rsp_len, u8 *scan_rsp_data,
2704 u16 timeout, u16 duration)
2706 struct adv_info *adv_instance;
2708 adv_instance = hci_find_adv_instance(hdev, instance);
2709 if (adv_instance) {
2710 memset(adv_instance->adv_data, 0,
2711 sizeof(adv_instance->adv_data));
2712 memset(adv_instance->scan_rsp_data, 0,
2713 sizeof(adv_instance->scan_rsp_data));
2714 } else {
2715 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2716 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2717 return -EOVERFLOW;
2719 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2720 if (!adv_instance)
2721 return -ENOMEM;
2723 adv_instance->pending = true;
2724 adv_instance->instance = instance;
2725 list_add(&adv_instance->list, &hdev->adv_instances);
2726 hdev->adv_instance_cnt++;
2729 adv_instance->flags = flags;
2730 adv_instance->adv_data_len = adv_data_len;
2731 adv_instance->scan_rsp_len = scan_rsp_len;
2733 if (adv_data_len)
2734 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2736 if (scan_rsp_len)
2737 memcpy(adv_instance->scan_rsp_data,
2738 scan_rsp_data, scan_rsp_len);
2740 adv_instance->timeout = timeout;
2741 adv_instance->remaining_time = timeout;
2743 if (duration == 0)
2744 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2745 else
2746 adv_instance->duration = duration;
2748 BT_DBG("%s for %dMR", hdev->name, instance);
2750 return 0;
2753 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2754 bdaddr_t *bdaddr, u8 type)
2756 struct bdaddr_list *b;
2758 list_for_each_entry(b, bdaddr_list, list) {
2759 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2760 return b;
2763 return NULL;
2766 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2768 struct list_head *p, *n;
2770 list_for_each_safe(p, n, bdaddr_list) {
2771 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2773 list_del(p);
2774 kfree(b);
2778 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2780 struct bdaddr_list *entry;
2782 if (!bacmp(bdaddr, BDADDR_ANY))
2783 return -EBADF;
2785 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2786 return -EEXIST;
2788 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2789 if (!entry)
2790 return -ENOMEM;
2792 bacpy(&entry->bdaddr, bdaddr);
2793 entry->bdaddr_type = type;
2795 list_add(&entry->list, list);
2797 return 0;
2800 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2802 struct bdaddr_list *entry;
2804 if (!bacmp(bdaddr, BDADDR_ANY)) {
2805 hci_bdaddr_list_clear(list);
2806 return 0;
2809 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2810 if (!entry)
2811 return -ENOENT;
2813 list_del(&entry->list);
2814 kfree(entry);
2816 return 0;
2819 /* This function requires the caller holds hdev->lock */
2820 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2821 bdaddr_t *addr, u8 addr_type)
2823 struct hci_conn_params *params;
2825 /* The conn params list only contains identity addresses */
2826 if (!hci_is_identity_address(addr, addr_type))
2827 return NULL;
2829 list_for_each_entry(params, &hdev->le_conn_params, list) {
2830 if (bacmp(&params->addr, addr) == 0 &&
2831 params->addr_type == addr_type) {
2832 return params;
2836 return NULL;
2839 /* This function requires the caller holds hdev->lock */
2840 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2841 bdaddr_t *addr, u8 addr_type)
2843 struct hci_conn_params *param;
2845 /* The list only contains identity addresses */
2846 if (!hci_is_identity_address(addr, addr_type))
2847 return NULL;
2849 list_for_each_entry(param, list, action) {
2850 if (bacmp(&param->addr, addr) == 0 &&
2851 param->addr_type == addr_type)
2852 return param;
2855 return NULL;
2858 /* This function requires the caller holds hdev->lock */
2859 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2860 bdaddr_t *addr, u8 addr_type)
2862 struct hci_conn_params *params;
2864 if (!hci_is_identity_address(addr, addr_type))
2865 return NULL;
2867 params = hci_conn_params_lookup(hdev, addr, addr_type);
2868 if (params)
2869 return params;
2871 params = kzalloc(sizeof(*params), GFP_KERNEL);
2872 if (!params) {
2873 BT_ERR("Out of memory");
2874 return NULL;
2877 bacpy(&params->addr, addr);
2878 params->addr_type = addr_type;
2880 list_add(&params->list, &hdev->le_conn_params);
2881 INIT_LIST_HEAD(&params->action);
2883 params->conn_min_interval = hdev->le_conn_min_interval;
2884 params->conn_max_interval = hdev->le_conn_max_interval;
2885 params->conn_latency = hdev->le_conn_latency;
2886 params->supervision_timeout = hdev->le_supv_timeout;
2887 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2889 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2891 return params;
2894 static void hci_conn_params_free(struct hci_conn_params *params)
2896 if (params->conn) {
2897 hci_conn_drop(params->conn);
2898 hci_conn_put(params->conn);
2901 list_del(&params->action);
2902 list_del(&params->list);
2903 kfree(params);
2906 /* This function requires the caller holds hdev->lock */
2907 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2909 struct hci_conn_params *params;
2911 params = hci_conn_params_lookup(hdev, addr, addr_type);
2912 if (!params)
2913 return;
2915 hci_conn_params_free(params);
2917 hci_update_background_scan(hdev);
2919 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2922 /* This function requires the caller holds hdev->lock */
2923 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2925 struct hci_conn_params *params, *tmp;
2927 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2928 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2929 continue;
2930 list_del(&params->list);
2931 kfree(params);
2934 BT_DBG("All LE disabled connection parameters were removed");
2937 /* This function requires the caller holds hdev->lock */
2938 void hci_conn_params_clear_all(struct hci_dev *hdev)
2940 struct hci_conn_params *params, *tmp;
2942 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2943 hci_conn_params_free(params);
2945 hci_update_background_scan(hdev);
2947 BT_DBG("All LE connection parameters were removed");
2950 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2952 if (status) {
2953 BT_ERR("Failed to start inquiry: status %d", status);
2955 hci_dev_lock(hdev);
2956 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2957 hci_dev_unlock(hdev);
2958 return;
2962 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2963 u16 opcode)
2965 /* General inquiry access code (GIAC) */
2966 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2967 struct hci_cp_inquiry cp;
2968 int err;
2970 if (status) {
2971 BT_ERR("Failed to disable LE scanning: status %d", status);
2972 return;
2975 hdev->discovery.scan_start = 0;
2977 switch (hdev->discovery.type) {
2978 case DISCOV_TYPE_LE:
2979 hci_dev_lock(hdev);
2980 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2981 hci_dev_unlock(hdev);
2982 break;
2984 case DISCOV_TYPE_INTERLEAVED:
2985 hci_dev_lock(hdev);
2987 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2988 &hdev->quirks)) {
2989 /* If we were running LE only scan, change discovery
2990 * state. If we were running both LE and BR/EDR inquiry
2991 * simultaneously, and BR/EDR inquiry is already
2992 * finished, stop discovery, otherwise BR/EDR inquiry
2993 * will stop discovery when finished. If we will resolve
2994 * remote device name, do not change discovery state.
2996 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2997 hdev->discovery.state != DISCOVERY_RESOLVING)
2998 hci_discovery_set_state(hdev,
2999 DISCOVERY_STOPPED);
3000 } else {
3001 struct hci_request req;
3003 hci_inquiry_cache_flush(hdev);
3005 hci_req_init(&req, hdev);
3007 memset(&cp, 0, sizeof(cp));
3008 memcpy(&cp.lap, lap, sizeof(cp.lap));
3009 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3010 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3012 err = hci_req_run(&req, inquiry_complete);
3013 if (err) {
3014 BT_ERR("Inquiry request failed: err %d", err);
3015 hci_discovery_set_state(hdev,
3016 DISCOVERY_STOPPED);
3020 hci_dev_unlock(hdev);
3021 break;
3025 static void le_scan_disable_work(struct work_struct *work)
3027 struct hci_dev *hdev = container_of(work, struct hci_dev,
3028 le_scan_disable.work);
3029 struct hci_request req;
3030 int err;
3032 BT_DBG("%s", hdev->name);
3034 cancel_delayed_work_sync(&hdev->le_scan_restart);
3036 hci_req_init(&req, hdev);
3038 hci_req_add_le_scan_disable(&req);
3040 err = hci_req_run(&req, le_scan_disable_work_complete);
3041 if (err)
3042 BT_ERR("Disable LE scanning request failed: err %d", err);
3045 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3046 u16 opcode)
3048 unsigned long timeout, duration, scan_start, now;
3050 BT_DBG("%s", hdev->name);
3052 if (status) {
3053 BT_ERR("Failed to restart LE scan: status %d", status);
3054 return;
3057 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3058 !hdev->discovery.scan_start)
3059 return;
3061 /* When the scan was started, hdev->le_scan_disable has been queued
3062 * after duration from scan_start. During scan restart this job
3063 * has been canceled, and we need to queue it again after proper
3064 * timeout, to make sure that scan does not run indefinitely.
3066 duration = hdev->discovery.scan_duration;
3067 scan_start = hdev->discovery.scan_start;
3068 now = jiffies;
3069 if (now - scan_start <= duration) {
3070 int elapsed;
3072 if (now >= scan_start)
3073 elapsed = now - scan_start;
3074 else
3075 elapsed = ULONG_MAX - scan_start + now;
3077 timeout = duration - elapsed;
3078 } else {
3079 timeout = 0;
3081 queue_delayed_work(hdev->workqueue,
3082 &hdev->le_scan_disable, timeout);
3085 static void le_scan_restart_work(struct work_struct *work)
3087 struct hci_dev *hdev = container_of(work, struct hci_dev,
3088 le_scan_restart.work);
3089 struct hci_request req;
3090 struct hci_cp_le_set_scan_enable cp;
3091 int err;
3093 BT_DBG("%s", hdev->name);
3095 /* If controller is not scanning we are done. */
3096 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3097 return;
3099 hci_req_init(&req, hdev);
3101 hci_req_add_le_scan_disable(&req);
3103 memset(&cp, 0, sizeof(cp));
3104 cp.enable = LE_SCAN_ENABLE;
3105 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3106 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3108 err = hci_req_run(&req, le_scan_restart_work_complete);
3109 if (err)
3110 BT_ERR("Restart LE scan request failed: err %d", err);
3113 /* Copy the Identity Address of the controller.
3115 * If the controller has a public BD_ADDR, then by default use that one.
3116 * If this is a LE only controller without a public address, default to
3117 * the static random address.
3119 * For debugging purposes it is possible to force controllers with a
3120 * public address to use the static random address instead.
3122 * In case BR/EDR has been disabled on a dual-mode controller and
3123 * userspace has configured a static address, then that address
3124 * becomes the identity address instead of the public BR/EDR address.
3126 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3127 u8 *bdaddr_type)
3129 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3130 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3131 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3132 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3133 bacpy(bdaddr, &hdev->static_addr);
3134 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3135 } else {
3136 bacpy(bdaddr, &hdev->bdaddr);
3137 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3141 /* Alloc HCI device */
3142 struct hci_dev *hci_alloc_dev(void)
3144 struct hci_dev *hdev;
3146 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3147 if (!hdev)
3148 return NULL;
3150 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3151 hdev->esco_type = (ESCO_HV1);
3152 hdev->link_mode = (HCI_LM_ACCEPT);
3153 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3154 hdev->io_capability = 0x03; /* No Input No Output */
3155 hdev->manufacturer = 0xffff; /* Default to internal use */
3156 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3157 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3158 hdev->adv_instance_cnt = 0;
3159 hdev->cur_adv_instance = 0x00;
3160 hdev->adv_instance_timeout = 0;
3162 hdev->sniff_max_interval = 800;
3163 hdev->sniff_min_interval = 80;
3165 hdev->le_adv_channel_map = 0x07;
3166 hdev->le_adv_min_interval = 0x0800;
3167 hdev->le_adv_max_interval = 0x0800;
3168 hdev->le_scan_interval = 0x0060;
3169 hdev->le_scan_window = 0x0030;
3170 hdev->le_conn_min_interval = 0x0028;
3171 hdev->le_conn_max_interval = 0x0038;
3172 hdev->le_conn_latency = 0x0000;
3173 hdev->le_supv_timeout = 0x002a;
3174 hdev->le_def_tx_len = 0x001b;
3175 hdev->le_def_tx_time = 0x0148;
3176 hdev->le_max_tx_len = 0x001b;
3177 hdev->le_max_tx_time = 0x0148;
3178 hdev->le_max_rx_len = 0x001b;
3179 hdev->le_max_rx_time = 0x0148;
3181 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3182 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3183 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3184 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3186 mutex_init(&hdev->lock);
3187 mutex_init(&hdev->req_lock);
3189 INIT_LIST_HEAD(&hdev->mgmt_pending);
3190 INIT_LIST_HEAD(&hdev->blacklist);
3191 INIT_LIST_HEAD(&hdev->whitelist);
3192 INIT_LIST_HEAD(&hdev->uuids);
3193 INIT_LIST_HEAD(&hdev->link_keys);
3194 INIT_LIST_HEAD(&hdev->long_term_keys);
3195 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3196 INIT_LIST_HEAD(&hdev->remote_oob_data);
3197 INIT_LIST_HEAD(&hdev->le_white_list);
3198 INIT_LIST_HEAD(&hdev->le_conn_params);
3199 INIT_LIST_HEAD(&hdev->pend_le_conns);
3200 INIT_LIST_HEAD(&hdev->pend_le_reports);
3201 INIT_LIST_HEAD(&hdev->conn_hash.list);
3202 INIT_LIST_HEAD(&hdev->adv_instances);
3204 INIT_WORK(&hdev->rx_work, hci_rx_work);
3205 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3206 INIT_WORK(&hdev->tx_work, hci_tx_work);
3207 INIT_WORK(&hdev->power_on, hci_power_on);
3208 INIT_WORK(&hdev->error_reset, hci_error_reset);
3210 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3211 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3212 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3213 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3214 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3216 skb_queue_head_init(&hdev->rx_q);
3217 skb_queue_head_init(&hdev->cmd_q);
3218 skb_queue_head_init(&hdev->raw_q);
3220 init_waitqueue_head(&hdev->req_wait_q);
3222 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3224 hci_init_sysfs(hdev);
3225 discovery_init(hdev);
3227 return hdev;
3229 EXPORT_SYMBOL(hci_alloc_dev);
3231 /* Free HCI device */
3232 void hci_free_dev(struct hci_dev *hdev)
3234 /* will free via device release */
3235 put_device(&hdev->dev);
3237 EXPORT_SYMBOL(hci_free_dev);
3239 /* Register HCI device */
3240 int hci_register_dev(struct hci_dev *hdev)
3242 int id, error;
3244 if (!hdev->open || !hdev->close || !hdev->send)
3245 return -EINVAL;
3247 /* Do not allow HCI_AMP devices to register at index 0,
3248 * so the index can be used as the AMP controller ID.
3250 switch (hdev->dev_type) {
3251 case HCI_BREDR:
3252 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3253 break;
3254 case HCI_AMP:
3255 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3256 break;
3257 default:
3258 return -EINVAL;
3261 if (id < 0)
3262 return id;
3264 sprintf(hdev->name, "hci%d", id);
3265 hdev->id = id;
3267 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3269 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3270 WQ_MEM_RECLAIM, 1, hdev->name);
3271 if (!hdev->workqueue) {
3272 error = -ENOMEM;
3273 goto err;
3276 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3277 WQ_MEM_RECLAIM, 1, hdev->name);
3278 if (!hdev->req_workqueue) {
3279 destroy_workqueue(hdev->workqueue);
3280 error = -ENOMEM;
3281 goto err;
3284 if (!IS_ERR_OR_NULL(bt_debugfs))
3285 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3287 dev_set_name(&hdev->dev, "%s", hdev->name);
3289 error = device_add(&hdev->dev);
3290 if (error < 0)
3291 goto err_wqueue;
3293 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3294 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3295 hdev);
3296 if (hdev->rfkill) {
3297 if (rfkill_register(hdev->rfkill) < 0) {
3298 rfkill_destroy(hdev->rfkill);
3299 hdev->rfkill = NULL;
3303 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3304 hci_dev_set_flag(hdev, HCI_RFKILLED);
3306 hci_dev_set_flag(hdev, HCI_SETUP);
3307 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3309 if (hdev->dev_type == HCI_BREDR) {
3310 /* Assume BR/EDR support until proven otherwise (such as
3311 * through reading supported features during init.
3313 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3316 write_lock(&hci_dev_list_lock);
3317 list_add(&hdev->list, &hci_dev_list);
3318 write_unlock(&hci_dev_list_lock);
3320 /* Devices that are marked for raw-only usage are unconfigured
3321 * and should not be included in normal operation.
3323 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3324 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3326 hci_notify(hdev, HCI_DEV_REG);
3327 hci_dev_hold(hdev);
3329 queue_work(hdev->req_workqueue, &hdev->power_on);
3331 return id;
3333 err_wqueue:
3334 destroy_workqueue(hdev->workqueue);
3335 destroy_workqueue(hdev->req_workqueue);
3336 err:
3337 ida_simple_remove(&hci_index_ida, hdev->id);
3339 return error;
3341 EXPORT_SYMBOL(hci_register_dev);
3343 /* Unregister HCI device */
3344 void hci_unregister_dev(struct hci_dev *hdev)
3346 int id;
3348 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3350 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3352 id = hdev->id;
3354 write_lock(&hci_dev_list_lock);
3355 list_del(&hdev->list);
3356 write_unlock(&hci_dev_list_lock);
3358 hci_dev_do_close(hdev);
3360 cancel_work_sync(&hdev->power_on);
3362 if (!test_bit(HCI_INIT, &hdev->flags) &&
3363 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3364 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3365 hci_dev_lock(hdev);
3366 mgmt_index_removed(hdev);
3367 hci_dev_unlock(hdev);
3370 /* mgmt_index_removed should take care of emptying the
3371 * pending list */
3372 BUG_ON(!list_empty(&hdev->mgmt_pending));
3374 hci_notify(hdev, HCI_DEV_UNREG);
3376 if (hdev->rfkill) {
3377 rfkill_unregister(hdev->rfkill);
3378 rfkill_destroy(hdev->rfkill);
3381 device_del(&hdev->dev);
3383 debugfs_remove_recursive(hdev->debugfs);
3385 destroy_workqueue(hdev->workqueue);
3386 destroy_workqueue(hdev->req_workqueue);
3388 hci_dev_lock(hdev);
3389 hci_bdaddr_list_clear(&hdev->blacklist);
3390 hci_bdaddr_list_clear(&hdev->whitelist);
3391 hci_uuids_clear(hdev);
3392 hci_link_keys_clear(hdev);
3393 hci_smp_ltks_clear(hdev);
3394 hci_smp_irks_clear(hdev);
3395 hci_remote_oob_data_clear(hdev);
3396 hci_adv_instances_clear(hdev);
3397 hci_bdaddr_list_clear(&hdev->le_white_list);
3398 hci_conn_params_clear_all(hdev);
3399 hci_discovery_filter_clear(hdev);
3400 hci_dev_unlock(hdev);
3402 hci_dev_put(hdev);
3404 ida_simple_remove(&hci_index_ida, id);
3406 EXPORT_SYMBOL(hci_unregister_dev);
3408 /* Suspend HCI device */
3409 int hci_suspend_dev(struct hci_dev *hdev)
3411 hci_notify(hdev, HCI_DEV_SUSPEND);
3412 return 0;
3414 EXPORT_SYMBOL(hci_suspend_dev);
3416 /* Resume HCI device */
3417 int hci_resume_dev(struct hci_dev *hdev)
3419 hci_notify(hdev, HCI_DEV_RESUME);
3420 return 0;
3422 EXPORT_SYMBOL(hci_resume_dev);
3424 /* Reset HCI device */
3425 int hci_reset_dev(struct hci_dev *hdev)
3427 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3428 struct sk_buff *skb;
3430 skb = bt_skb_alloc(3, GFP_ATOMIC);
3431 if (!skb)
3432 return -ENOMEM;
3434 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3435 memcpy(skb_put(skb, 3), hw_err, 3);
3437 /* Send Hardware Error to upper stack */
3438 return hci_recv_frame(hdev, skb);
3440 EXPORT_SYMBOL(hci_reset_dev);
3442 /* Receive frame from HCI drivers */
3443 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3445 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3446 && !test_bit(HCI_INIT, &hdev->flags))) {
3447 kfree_skb(skb);
3448 return -ENXIO;
3451 /* Incoming skb */
3452 bt_cb(skb)->incoming = 1;
3454 /* Time stamp */
3455 __net_timestamp(skb);
3457 skb_queue_tail(&hdev->rx_q, skb);
3458 queue_work(hdev->workqueue, &hdev->rx_work);
3460 return 0;
3462 EXPORT_SYMBOL(hci_recv_frame);
3464 /* ---- Interface to upper protocols ---- */
3466 int hci_register_cb(struct hci_cb *cb)
3468 BT_DBG("%p name %s", cb, cb->name);
3470 mutex_lock(&hci_cb_list_lock);
3471 list_add_tail(&cb->list, &hci_cb_list);
3472 mutex_unlock(&hci_cb_list_lock);
3474 return 0;
3476 EXPORT_SYMBOL(hci_register_cb);
3478 int hci_unregister_cb(struct hci_cb *cb)
3480 BT_DBG("%p name %s", cb, cb->name);
3482 mutex_lock(&hci_cb_list_lock);
3483 list_del(&cb->list);
3484 mutex_unlock(&hci_cb_list_lock);
3486 return 0;
3488 EXPORT_SYMBOL(hci_unregister_cb);
3490 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3492 int err;
3494 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3496 /* Time stamp */
3497 __net_timestamp(skb);
3499 /* Send copy to monitor */
3500 hci_send_to_monitor(hdev, skb);
3502 if (atomic_read(&hdev->promisc)) {
3503 /* Send copy to the sockets */
3504 hci_send_to_sock(hdev, skb);
3507 /* Get rid of skb owner, prior to sending to the driver. */
3508 skb_orphan(skb);
3510 err = hdev->send(hdev, skb);
3511 if (err < 0) {
3512 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3513 kfree_skb(skb);
3517 /* Send HCI command */
3518 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3519 const void *param)
3521 struct sk_buff *skb;
3523 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3525 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3526 if (!skb) {
3527 BT_ERR("%s no memory for command", hdev->name);
3528 return -ENOMEM;
3531 /* Stand-alone HCI commands must be flagged as
3532 * single-command requests.
3534 bt_cb(skb)->req.start = true;
3536 skb_queue_tail(&hdev->cmd_q, skb);
3537 queue_work(hdev->workqueue, &hdev->cmd_work);
3539 return 0;
3542 /* Get data from the previously sent command */
3543 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3545 struct hci_command_hdr *hdr;
3547 if (!hdev->sent_cmd)
3548 return NULL;
3550 hdr = (void *) hdev->sent_cmd->data;
3552 if (hdr->opcode != cpu_to_le16(opcode))
3553 return NULL;
3555 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3557 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3560 /* Send ACL data */
3561 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3563 struct hci_acl_hdr *hdr;
3564 int len = skb->len;
3566 skb_push(skb, HCI_ACL_HDR_SIZE);
3567 skb_reset_transport_header(skb);
3568 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3569 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3570 hdr->dlen = cpu_to_le16(len);
3573 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3574 struct sk_buff *skb, __u16 flags)
3576 struct hci_conn *conn = chan->conn;
3577 struct hci_dev *hdev = conn->hdev;
3578 struct sk_buff *list;
3580 skb->len = skb_headlen(skb);
3581 skb->data_len = 0;
3583 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3585 switch (hdev->dev_type) {
3586 case HCI_BREDR:
3587 hci_add_acl_hdr(skb, conn->handle, flags);
3588 break;
3589 case HCI_AMP:
3590 hci_add_acl_hdr(skb, chan->handle, flags);
3591 break;
3592 default:
3593 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3594 return;
3597 list = skb_shinfo(skb)->frag_list;
3598 if (!list) {
3599 /* Non fragmented */
3600 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3602 skb_queue_tail(queue, skb);
3603 } else {
3604 /* Fragmented */
3605 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3607 skb_shinfo(skb)->frag_list = NULL;
3609 /* Queue all fragments atomically. We need to use spin_lock_bh
3610 * here because of 6LoWPAN links, as there this function is
3611 * called from softirq and using normal spin lock could cause
3612 * deadlocks.
3614 spin_lock_bh(&queue->lock);
3616 __skb_queue_tail(queue, skb);
3618 flags &= ~ACL_START;
3619 flags |= ACL_CONT;
3620 do {
3621 skb = list; list = list->next;
3623 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3624 hci_add_acl_hdr(skb, conn->handle, flags);
3626 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3628 __skb_queue_tail(queue, skb);
3629 } while (list);
3631 spin_unlock_bh(&queue->lock);
3635 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3637 struct hci_dev *hdev = chan->conn->hdev;
3639 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3641 hci_queue_acl(chan, &chan->data_q, skb, flags);
3643 queue_work(hdev->workqueue, &hdev->tx_work);
3646 /* Send SCO data */
3647 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3649 struct hci_dev *hdev = conn->hdev;
3650 struct hci_sco_hdr hdr;
3652 BT_DBG("%s len %d", hdev->name, skb->len);
3654 hdr.handle = cpu_to_le16(conn->handle);
3655 hdr.dlen = skb->len;
3657 skb_push(skb, HCI_SCO_HDR_SIZE);
3658 skb_reset_transport_header(skb);
3659 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3661 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3663 skb_queue_tail(&conn->data_q, skb);
3664 queue_work(hdev->workqueue, &hdev->tx_work);
3667 /* ---- HCI TX task (outgoing data) ---- */
3669 /* HCI Connection scheduler */
3670 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3671 int *quote)
3673 struct hci_conn_hash *h = &hdev->conn_hash;
3674 struct hci_conn *conn = NULL, *c;
3675 unsigned int num = 0, min = ~0;
3677 /* We don't have to lock device here. Connections are always
3678 * added and removed with TX task disabled. */
3680 rcu_read_lock();
3682 list_for_each_entry_rcu(c, &h->list, list) {
3683 if (c->type != type || skb_queue_empty(&c->data_q))
3684 continue;
3686 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3687 continue;
3689 num++;
3691 if (c->sent < min) {
3692 min = c->sent;
3693 conn = c;
3696 if (hci_conn_num(hdev, type) == num)
3697 break;
3700 rcu_read_unlock();
3702 if (conn) {
3703 int cnt, q;
3705 switch (conn->type) {
3706 case ACL_LINK:
3707 cnt = hdev->acl_cnt;
3708 break;
3709 case SCO_LINK:
3710 case ESCO_LINK:
3711 cnt = hdev->sco_cnt;
3712 break;
3713 case LE_LINK:
3714 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3715 break;
3716 default:
3717 cnt = 0;
3718 BT_ERR("Unknown link type");
3721 q = cnt / num;
3722 *quote = q ? q : 1;
3723 } else
3724 *quote = 0;
3726 BT_DBG("conn %p quote %d", conn, *quote);
3727 return conn;
3730 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3732 struct hci_conn_hash *h = &hdev->conn_hash;
3733 struct hci_conn *c;
3735 BT_ERR("%s link tx timeout", hdev->name);
3737 rcu_read_lock();
3739 /* Kill stalled connections */
3740 list_for_each_entry_rcu(c, &h->list, list) {
3741 if (c->type == type && c->sent) {
3742 BT_ERR("%s killing stalled connection %pMR",
3743 hdev->name, &c->dst);
3744 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3748 rcu_read_unlock();
3751 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3752 int *quote)
3754 struct hci_conn_hash *h = &hdev->conn_hash;
3755 struct hci_chan *chan = NULL;
3756 unsigned int num = 0, min = ~0, cur_prio = 0;
3757 struct hci_conn *conn;
3758 int cnt, q, conn_num = 0;
3760 BT_DBG("%s", hdev->name);
3762 rcu_read_lock();
3764 list_for_each_entry_rcu(conn, &h->list, list) {
3765 struct hci_chan *tmp;
3767 if (conn->type != type)
3768 continue;
3770 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3771 continue;
3773 conn_num++;
3775 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3776 struct sk_buff *skb;
3778 if (skb_queue_empty(&tmp->data_q))
3779 continue;
3781 skb = skb_peek(&tmp->data_q);
3782 if (skb->priority < cur_prio)
3783 continue;
3785 if (skb->priority > cur_prio) {
3786 num = 0;
3787 min = ~0;
3788 cur_prio = skb->priority;
3791 num++;
3793 if (conn->sent < min) {
3794 min = conn->sent;
3795 chan = tmp;
3799 if (hci_conn_num(hdev, type) == conn_num)
3800 break;
3803 rcu_read_unlock();
3805 if (!chan)
3806 return NULL;
3808 switch (chan->conn->type) {
3809 case ACL_LINK:
3810 cnt = hdev->acl_cnt;
3811 break;
3812 case AMP_LINK:
3813 cnt = hdev->block_cnt;
3814 break;
3815 case SCO_LINK:
3816 case ESCO_LINK:
3817 cnt = hdev->sco_cnt;
3818 break;
3819 case LE_LINK:
3820 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3821 break;
3822 default:
3823 cnt = 0;
3824 BT_ERR("Unknown link type");
3827 q = cnt / num;
3828 *quote = q ? q : 1;
3829 BT_DBG("chan %p quote %d", chan, *quote);
3830 return chan;
3833 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3835 struct hci_conn_hash *h = &hdev->conn_hash;
3836 struct hci_conn *conn;
3837 int num = 0;
3839 BT_DBG("%s", hdev->name);
3841 rcu_read_lock();
3843 list_for_each_entry_rcu(conn, &h->list, list) {
3844 struct hci_chan *chan;
3846 if (conn->type != type)
3847 continue;
3849 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3850 continue;
3852 num++;
3854 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3855 struct sk_buff *skb;
3857 if (chan->sent) {
3858 chan->sent = 0;
3859 continue;
3862 if (skb_queue_empty(&chan->data_q))
3863 continue;
3865 skb = skb_peek(&chan->data_q);
3866 if (skb->priority >= HCI_PRIO_MAX - 1)
3867 continue;
3869 skb->priority = HCI_PRIO_MAX - 1;
3871 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3872 skb->priority);
3875 if (hci_conn_num(hdev, type) == num)
3876 break;
3879 rcu_read_unlock();
3883 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3885 /* Calculate count of blocks used by this packet */
3886 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3889 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3891 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3892 /* ACL tx timeout must be longer than maximum
3893 * link supervision timeout (40.9 seconds) */
3894 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3895 HCI_ACL_TX_TIMEOUT))
3896 hci_link_tx_to(hdev, ACL_LINK);
3900 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3902 unsigned int cnt = hdev->acl_cnt;
3903 struct hci_chan *chan;
3904 struct sk_buff *skb;
3905 int quote;
3907 __check_timeout(hdev, cnt);
3909 while (hdev->acl_cnt &&
3910 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3911 u32 priority = (skb_peek(&chan->data_q))->priority;
3912 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3913 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3914 skb->len, skb->priority);
3916 /* Stop if priority has changed */
3917 if (skb->priority < priority)
3918 break;
3920 skb = skb_dequeue(&chan->data_q);
3922 hci_conn_enter_active_mode(chan->conn,
3923 bt_cb(skb)->force_active);
3925 hci_send_frame(hdev, skb);
3926 hdev->acl_last_tx = jiffies;
3928 hdev->acl_cnt--;
3929 chan->sent++;
3930 chan->conn->sent++;
3934 if (cnt != hdev->acl_cnt)
3935 hci_prio_recalculate(hdev, ACL_LINK);
3938 static void hci_sched_acl_blk(struct hci_dev *hdev)
3940 unsigned int cnt = hdev->block_cnt;
3941 struct hci_chan *chan;
3942 struct sk_buff *skb;
3943 int quote;
3944 u8 type;
3946 __check_timeout(hdev, cnt);
3948 BT_DBG("%s", hdev->name);
3950 if (hdev->dev_type == HCI_AMP)
3951 type = AMP_LINK;
3952 else
3953 type = ACL_LINK;
3955 while (hdev->block_cnt > 0 &&
3956 (chan = hci_chan_sent(hdev, type, &quote))) {
3957 u32 priority = (skb_peek(&chan->data_q))->priority;
3958 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3959 int blocks;
3961 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3962 skb->len, skb->priority);
3964 /* Stop if priority has changed */
3965 if (skb->priority < priority)
3966 break;
3968 skb = skb_dequeue(&chan->data_q);
3970 blocks = __get_blocks(hdev, skb);
3971 if (blocks > hdev->block_cnt)
3972 return;
3974 hci_conn_enter_active_mode(chan->conn,
3975 bt_cb(skb)->force_active);
3977 hci_send_frame(hdev, skb);
3978 hdev->acl_last_tx = jiffies;
3980 hdev->block_cnt -= blocks;
3981 quote -= blocks;
3983 chan->sent += blocks;
3984 chan->conn->sent += blocks;
3988 if (cnt != hdev->block_cnt)
3989 hci_prio_recalculate(hdev, type);
3992 static void hci_sched_acl(struct hci_dev *hdev)
3994 BT_DBG("%s", hdev->name);
3996 /* No ACL link over BR/EDR controller */
3997 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3998 return;
4000 /* No AMP link over AMP controller */
4001 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4002 return;
4004 switch (hdev->flow_ctl_mode) {
4005 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4006 hci_sched_acl_pkt(hdev);
4007 break;
4009 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4010 hci_sched_acl_blk(hdev);
4011 break;
4015 /* Schedule SCO */
4016 static void hci_sched_sco(struct hci_dev *hdev)
4018 struct hci_conn *conn;
4019 struct sk_buff *skb;
4020 int quote;
4022 BT_DBG("%s", hdev->name);
4024 if (!hci_conn_num(hdev, SCO_LINK))
4025 return;
4027 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4028 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4029 BT_DBG("skb %p len %d", skb, skb->len);
4030 hci_send_frame(hdev, skb);
4032 conn->sent++;
4033 if (conn->sent == ~0)
4034 conn->sent = 0;
4039 static void hci_sched_esco(struct hci_dev *hdev)
4041 struct hci_conn *conn;
4042 struct sk_buff *skb;
4043 int quote;
4045 BT_DBG("%s", hdev->name);
4047 if (!hci_conn_num(hdev, ESCO_LINK))
4048 return;
4050 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4051 &quote))) {
4052 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4053 BT_DBG("skb %p len %d", skb, skb->len);
4054 hci_send_frame(hdev, skb);
4056 conn->sent++;
4057 if (conn->sent == ~0)
4058 conn->sent = 0;
4063 static void hci_sched_le(struct hci_dev *hdev)
4065 struct hci_chan *chan;
4066 struct sk_buff *skb;
4067 int quote, cnt, tmp;
4069 BT_DBG("%s", hdev->name);
4071 if (!hci_conn_num(hdev, LE_LINK))
4072 return;
4074 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4075 /* LE tx timeout must be longer than maximum
4076 * link supervision timeout (40.9 seconds) */
4077 if (!hdev->le_cnt && hdev->le_pkts &&
4078 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4079 hci_link_tx_to(hdev, LE_LINK);
4082 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4083 tmp = cnt;
4084 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4085 u32 priority = (skb_peek(&chan->data_q))->priority;
4086 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4087 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4088 skb->len, skb->priority);
4090 /* Stop if priority has changed */
4091 if (skb->priority < priority)
4092 break;
4094 skb = skb_dequeue(&chan->data_q);
4096 hci_send_frame(hdev, skb);
4097 hdev->le_last_tx = jiffies;
4099 cnt--;
4100 chan->sent++;
4101 chan->conn->sent++;
4105 if (hdev->le_pkts)
4106 hdev->le_cnt = cnt;
4107 else
4108 hdev->acl_cnt = cnt;
4110 if (cnt != tmp)
4111 hci_prio_recalculate(hdev, LE_LINK);
4114 static void hci_tx_work(struct work_struct *work)
4116 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4117 struct sk_buff *skb;
4119 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4120 hdev->sco_cnt, hdev->le_cnt);
4122 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4123 /* Schedule queues and send stuff to HCI driver */
4124 hci_sched_acl(hdev);
4125 hci_sched_sco(hdev);
4126 hci_sched_esco(hdev);
4127 hci_sched_le(hdev);
4130 /* Send next queued raw (unknown type) packet */
4131 while ((skb = skb_dequeue(&hdev->raw_q)))
4132 hci_send_frame(hdev, skb);
4135 /* ----- HCI RX task (incoming data processing) ----- */
4137 /* ACL data packet */
4138 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4140 struct hci_acl_hdr *hdr = (void *) skb->data;
4141 struct hci_conn *conn;
4142 __u16 handle, flags;
4144 skb_pull(skb, HCI_ACL_HDR_SIZE);
4146 handle = __le16_to_cpu(hdr->handle);
4147 flags = hci_flags(handle);
4148 handle = hci_handle(handle);
4150 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4151 handle, flags);
4153 hdev->stat.acl_rx++;
4155 hci_dev_lock(hdev);
4156 conn = hci_conn_hash_lookup_handle(hdev, handle);
4157 hci_dev_unlock(hdev);
4159 if (conn) {
4160 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4162 /* Send to upper protocol */
4163 l2cap_recv_acldata(conn, skb, flags);
4164 return;
4165 } else {
4166 BT_ERR("%s ACL packet for unknown connection handle %d",
4167 hdev->name, handle);
4170 kfree_skb(skb);
4173 /* SCO data packet */
4174 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4176 struct hci_sco_hdr *hdr = (void *) skb->data;
4177 struct hci_conn *conn;
4178 __u16 handle;
4180 skb_pull(skb, HCI_SCO_HDR_SIZE);
4182 handle = __le16_to_cpu(hdr->handle);
4184 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4186 hdev->stat.sco_rx++;
4188 hci_dev_lock(hdev);
4189 conn = hci_conn_hash_lookup_handle(hdev, handle);
4190 hci_dev_unlock(hdev);
4192 if (conn) {
4193 /* Send to upper protocol */
4194 sco_recv_scodata(conn, skb);
4195 return;
4196 } else {
4197 BT_ERR("%s SCO packet for unknown connection handle %d",
4198 hdev->name, handle);
4201 kfree_skb(skb);
4204 static bool hci_req_is_complete(struct hci_dev *hdev)
4206 struct sk_buff *skb;
4208 skb = skb_peek(&hdev->cmd_q);
4209 if (!skb)
4210 return true;
4212 return bt_cb(skb)->req.start;
4215 static void hci_resend_last(struct hci_dev *hdev)
4217 struct hci_command_hdr *sent;
4218 struct sk_buff *skb;
4219 u16 opcode;
4221 if (!hdev->sent_cmd)
4222 return;
4224 sent = (void *) hdev->sent_cmd->data;
4225 opcode = __le16_to_cpu(sent->opcode);
4226 if (opcode == HCI_OP_RESET)
4227 return;
4229 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4230 if (!skb)
4231 return;
4233 skb_queue_head(&hdev->cmd_q, skb);
4234 queue_work(hdev->workqueue, &hdev->cmd_work);
4237 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4238 hci_req_complete_t *req_complete,
4239 hci_req_complete_skb_t *req_complete_skb)
4241 struct sk_buff *skb;
4242 unsigned long flags;
4244 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4246 /* If the completed command doesn't match the last one that was
4247 * sent we need to do special handling of it.
4249 if (!hci_sent_cmd_data(hdev, opcode)) {
4250 /* Some CSR based controllers generate a spontaneous
4251 * reset complete event during init and any pending
4252 * command will never be completed. In such a case we
4253 * need to resend whatever was the last sent
4254 * command.
4256 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4257 hci_resend_last(hdev);
4259 return;
4262 /* If the command succeeded and there's still more commands in
4263 * this request the request is not yet complete.
4265 if (!status && !hci_req_is_complete(hdev))
4266 return;
4268 /* If this was the last command in a request the complete
4269 * callback would be found in hdev->sent_cmd instead of the
4270 * command queue (hdev->cmd_q).
4272 if (bt_cb(hdev->sent_cmd)->req.complete) {
4273 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4274 return;
4277 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4278 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4279 return;
4282 /* Remove all pending commands belonging to this request */
4283 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4284 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4285 if (bt_cb(skb)->req.start) {
4286 __skb_queue_head(&hdev->cmd_q, skb);
4287 break;
4290 *req_complete = bt_cb(skb)->req.complete;
4291 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4292 kfree_skb(skb);
4294 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4297 static void hci_rx_work(struct work_struct *work)
4299 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4300 struct sk_buff *skb;
4302 BT_DBG("%s", hdev->name);
4304 while ((skb = skb_dequeue(&hdev->rx_q))) {
4305 /* Send copy to monitor */
4306 hci_send_to_monitor(hdev, skb);
4308 if (atomic_read(&hdev->promisc)) {
4309 /* Send copy to the sockets */
4310 hci_send_to_sock(hdev, skb);
4313 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4314 kfree_skb(skb);
4315 continue;
4318 if (test_bit(HCI_INIT, &hdev->flags)) {
4319 /* Don't process data packets in this states. */
4320 switch (bt_cb(skb)->pkt_type) {
4321 case HCI_ACLDATA_PKT:
4322 case HCI_SCODATA_PKT:
4323 kfree_skb(skb);
4324 continue;
4328 /* Process frame */
4329 switch (bt_cb(skb)->pkt_type) {
4330 case HCI_EVENT_PKT:
4331 BT_DBG("%s Event packet", hdev->name);
4332 hci_event_packet(hdev, skb);
4333 break;
4335 case HCI_ACLDATA_PKT:
4336 BT_DBG("%s ACL data packet", hdev->name);
4337 hci_acldata_packet(hdev, skb);
4338 break;
4340 case HCI_SCODATA_PKT:
4341 BT_DBG("%s SCO data packet", hdev->name);
4342 hci_scodata_packet(hdev, skb);
4343 break;
4345 default:
4346 kfree_skb(skb);
4347 break;
4352 static void hci_cmd_work(struct work_struct *work)
4354 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4355 struct sk_buff *skb;
4357 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4358 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4360 /* Send queued commands */
4361 if (atomic_read(&hdev->cmd_cnt)) {
4362 skb = skb_dequeue(&hdev->cmd_q);
4363 if (!skb)
4364 return;
4366 kfree_skb(hdev->sent_cmd);
4368 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4369 if (hdev->sent_cmd) {
4370 atomic_dec(&hdev->cmd_cnt);
4371 hci_send_frame(hdev, skb);
4372 if (test_bit(HCI_RESET, &hdev->flags))
4373 cancel_delayed_work(&hdev->cmd_timer);
4374 else
4375 schedule_delayed_work(&hdev->cmd_timer,
4376 HCI_CMD_TIMEOUT);
4377 } else {
4378 skb_queue_head(&hdev->cmd_q, skb);
4379 queue_work(hdev->workqueue, &hdev->cmd_work);