mmc: omap_hsmmc: remove unused fields from struct omap_hsmmc_host
[linux-2.6/btrfs-unstable.git] / net / bluetooth / hci_core.c
blob3322d3f4c85a25eb4bed8dbfaa1802b907b3f1d3
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_RWLOCK(hci_cb_list_lock);
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
59 /* ----- HCI requests ----- */
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
68 /* ---- HCI notifications ---- */
70 static void hci_notify(struct hci_dev *hdev, int event)
72 hci_sock_dev_event(hdev, event);
75 /* ---- HCI debugfs entries ---- */
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
83 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
110 return -EALREADY;
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
127 if (err < 0)
128 return err;
130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
132 return count;
135 static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
142 /* ---- HCI requests ---- */
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
155 static void hci_req_cancel(struct hci_dev *hdev, int err)
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
166 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
173 hci_dev_lock(hdev);
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
178 hci_dev_unlock(hdev);
180 if (!skb)
181 return ERR_PTR(-ENODATA);
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
216 failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
221 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
222 const void *param, u8 event, u32 timeout)
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
226 int err = 0;
228 BT_DBG("%s", hdev->name);
230 hci_req_init(&req, hdev);
232 hci_req_add_ev(&req, opcode, plen, param, event);
234 hdev->req_status = HCI_REQ_PEND;
236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
239 err = hci_req_run(&req, hci_req_sync_complete);
240 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait);
242 set_current_state(TASK_RUNNING);
243 return ERR_PTR(err);
246 schedule_timeout(timeout);
248 remove_wait_queue(&hdev->req_wait_q, &wait);
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
253 switch (hdev->req_status) {
254 case HCI_REQ_DONE:
255 err = -bt_to_errno(hdev->req_result);
256 break;
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
260 break;
262 default:
263 err = -ETIMEDOUT;
264 break;
267 hdev->req_status = hdev->req_result = 0;
269 BT_DBG("%s end: err %d", hdev->name, err);
271 if (err < 0)
272 return ERR_PTR(err);
274 return hci_get_cmd_complete(hdev, opcode, event);
276 EXPORT_SYMBOL(__hci_cmd_sync_ev);
278 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
279 const void *param, u32 timeout)
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
283 EXPORT_SYMBOL(__hci_cmd_sync);
285 /* Execute request and wait for completion. */
286 static int __hci_req_sync(struct hci_dev *hdev,
287 void (*func)(struct hci_request *req,
288 unsigned long opt),
289 unsigned long opt, __u32 timeout)
291 struct hci_request req;
292 DECLARE_WAITQUEUE(wait, current);
293 int err = 0;
295 BT_DBG("%s start", hdev->name);
297 hci_req_init(&req, hdev);
299 hdev->req_status = HCI_REQ_PEND;
301 func(&req, opt);
303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
306 err = hci_req_run(&req, hci_req_sync_complete);
307 if (err < 0) {
308 hdev->req_status = 0;
310 remove_wait_queue(&hdev->req_wait_q, &wait);
311 set_current_state(TASK_RUNNING);
313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
318 if (err == -ENODATA)
319 return 0;
321 return err;
324 schedule_timeout(timeout);
326 remove_wait_queue(&hdev->req_wait_q, &wait);
328 if (signal_pending(current))
329 return -EINTR;
331 switch (hdev->req_status) {
332 case HCI_REQ_DONE:
333 err = -bt_to_errno(hdev->req_result);
334 break;
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
338 break;
340 default:
341 err = -ETIMEDOUT;
342 break;
345 hdev->req_status = hdev->req_result = 0;
347 BT_DBG("%s end: err %d", hdev->name, err);
349 return err;
352 static int hci_req_sync(struct hci_dev *hdev,
353 void (*req)(struct hci_request *req,
354 unsigned long opt),
355 unsigned long opt, __u32 timeout)
357 int ret;
359 if (!test_bit(HCI_UP, &hdev->flags))
360 return -ENETDOWN;
362 /* Serialize all requests */
363 hci_req_lock(hdev);
364 ret = __hci_req_sync(hdev, req, opt, timeout);
365 hci_req_unlock(hdev);
367 return ret;
370 static void hci_reset_req(struct hci_request *req, unsigned long opt)
372 BT_DBG("%s %ld", req->hdev->name, opt);
374 /* Reset device */
375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
379 static void bredr_init(struct hci_request *req)
381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
383 /* Read Local Supported Features */
384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
386 /* Read Local Version */
387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
389 /* Read BD Address */
390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
393 static void amp_init(struct hci_request *req)
395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
397 /* Read Local Version */
398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
406 /* Read Local AMP Info */
407 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
409 /* Read Data Blk size */
410 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
412 /* Read Flow Control Mode */
413 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
415 /* Read Location Data */
416 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
419 static void hci_init1_req(struct hci_request *req, unsigned long opt)
421 struct hci_dev *hdev = req->hdev;
423 BT_DBG("%s %ld", hdev->name, opt);
425 /* Reset */
426 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
427 hci_reset_req(req, 0);
429 switch (hdev->dev_type) {
430 case HCI_BREDR:
431 bredr_init(req);
432 break;
434 case HCI_AMP:
435 amp_init(req);
436 break;
438 default:
439 BT_ERR("Unknown device type %d", hdev->dev_type);
440 break;
444 static void bredr_setup(struct hci_request *req)
446 __le16 param;
447 __u8 flt_type;
449 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
450 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
452 /* Read Class of Device */
453 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
455 /* Read Local Name */
456 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
458 /* Read Voice Setting */
459 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
461 /* Read Number of Supported IAC */
462 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
464 /* Read Current IAC LAP */
465 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
467 /* Clear Event Filters */
468 flt_type = HCI_FLT_CLEAR_ALL;
469 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
471 /* Connection accept timeout ~20 secs */
472 param = cpu_to_le16(0x7d00);
473 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
476 static void le_setup(struct hci_request *req)
478 struct hci_dev *hdev = req->hdev;
480 /* Read LE Buffer Size */
481 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
483 /* Read LE Local Supported Features */
484 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
486 /* Read LE Supported States */
487 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
489 /* Read LE White List Size */
490 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
492 /* Clear LE White List */
493 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
495 /* LE-only controllers have LE implicitly enabled */
496 if (!lmp_bredr_capable(hdev))
497 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
500 static void hci_setup_event_mask(struct hci_request *req)
502 struct hci_dev *hdev = req->hdev;
504 /* The second byte is 0xff instead of 0x9f (two reserved bits
505 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
506 * command otherwise.
508 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
510 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
511 * any event mask for pre 1.2 devices.
513 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
514 return;
516 if (lmp_bredr_capable(hdev)) {
517 events[4] |= 0x01; /* Flow Specification Complete */
518 events[4] |= 0x02; /* Inquiry Result with RSSI */
519 events[4] |= 0x04; /* Read Remote Extended Features Complete */
520 events[5] |= 0x08; /* Synchronous Connection Complete */
521 events[5] |= 0x10; /* Synchronous Connection Changed */
522 } else {
523 /* Use a different default for LE-only devices */
524 memset(events, 0, sizeof(events));
525 events[0] |= 0x10; /* Disconnection Complete */
526 events[1] |= 0x08; /* Read Remote Version Information Complete */
527 events[1] |= 0x20; /* Command Complete */
528 events[1] |= 0x40; /* Command Status */
529 events[1] |= 0x80; /* Hardware Error */
530 events[2] |= 0x04; /* Number of Completed Packets */
531 events[3] |= 0x02; /* Data Buffer Overflow */
533 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
534 events[0] |= 0x80; /* Encryption Change */
535 events[5] |= 0x80; /* Encryption Key Refresh Complete */
539 if (lmp_inq_rssi_capable(hdev))
540 events[4] |= 0x02; /* Inquiry Result with RSSI */
542 if (lmp_sniffsubr_capable(hdev))
543 events[5] |= 0x20; /* Sniff Subrating */
545 if (lmp_pause_enc_capable(hdev))
546 events[5] |= 0x80; /* Encryption Key Refresh Complete */
548 if (lmp_ext_inq_capable(hdev))
549 events[5] |= 0x40; /* Extended Inquiry Result */
551 if (lmp_no_flush_capable(hdev))
552 events[7] |= 0x01; /* Enhanced Flush Complete */
554 if (lmp_lsto_capable(hdev))
555 events[6] |= 0x80; /* Link Supervision Timeout Changed */
557 if (lmp_ssp_capable(hdev)) {
558 events[6] |= 0x01; /* IO Capability Request */
559 events[6] |= 0x02; /* IO Capability Response */
560 events[6] |= 0x04; /* User Confirmation Request */
561 events[6] |= 0x08; /* User Passkey Request */
562 events[6] |= 0x10; /* Remote OOB Data Request */
563 events[6] |= 0x20; /* Simple Pairing Complete */
564 events[7] |= 0x04; /* User Passkey Notification */
565 events[7] |= 0x08; /* Keypress Notification */
566 events[7] |= 0x10; /* Remote Host Supported
567 * Features Notification
571 if (lmp_le_capable(hdev))
572 events[7] |= 0x20; /* LE Meta-Event */
574 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
577 static void hci_init2_req(struct hci_request *req, unsigned long opt)
579 struct hci_dev *hdev = req->hdev;
581 if (lmp_bredr_capable(hdev))
582 bredr_setup(req);
583 else
584 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
586 if (lmp_le_capable(hdev))
587 le_setup(req);
589 /* All Bluetooth 1.2 and later controllers should support the
590 * HCI command for reading the local supported commands.
592 * Unfortunately some controllers indicate Bluetooth 1.2 support,
593 * but do not have support for this command. If that is the case,
594 * the driver can quirk the behavior and skip reading the local
595 * supported commands.
597 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
598 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
599 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
601 if (lmp_ssp_capable(hdev)) {
602 /* When SSP is available, then the host features page
603 * should also be available as well. However some
604 * controllers list the max_page as 0 as long as SSP
605 * has not been enabled. To achieve proper debugging
606 * output, force the minimum max_page to 1 at least.
608 hdev->max_page = 0x01;
610 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
611 u8 mode = 0x01;
613 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
614 sizeof(mode), &mode);
615 } else {
616 struct hci_cp_write_eir cp;
618 memset(hdev->eir, 0, sizeof(hdev->eir));
619 memset(&cp, 0, sizeof(cp));
621 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
625 if (lmp_inq_rssi_capable(hdev) ||
626 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
627 u8 mode;
629 /* If Extended Inquiry Result events are supported, then
630 * they are clearly preferred over Inquiry Result with RSSI
631 * events.
633 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
635 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
638 if (lmp_inq_tx_pwr_capable(hdev))
639 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
641 if (lmp_ext_feat_capable(hdev)) {
642 struct hci_cp_read_local_ext_features cp;
644 cp.page = 0x01;
645 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
646 sizeof(cp), &cp);
649 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
650 u8 enable = 1;
651 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
652 &enable);
656 static void hci_setup_link_policy(struct hci_request *req)
658 struct hci_dev *hdev = req->hdev;
659 struct hci_cp_write_def_link_policy cp;
660 u16 link_policy = 0;
662 if (lmp_rswitch_capable(hdev))
663 link_policy |= HCI_LP_RSWITCH;
664 if (lmp_hold_capable(hdev))
665 link_policy |= HCI_LP_HOLD;
666 if (lmp_sniff_capable(hdev))
667 link_policy |= HCI_LP_SNIFF;
668 if (lmp_park_capable(hdev))
669 link_policy |= HCI_LP_PARK;
671 cp.policy = cpu_to_le16(link_policy);
672 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
675 static void hci_set_le_support(struct hci_request *req)
677 struct hci_dev *hdev = req->hdev;
678 struct hci_cp_write_le_host_supported cp;
680 /* LE-only devices do not support explicit enablement */
681 if (!lmp_bredr_capable(hdev))
682 return;
684 memset(&cp, 0, sizeof(cp));
686 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
687 cp.le = 0x01;
688 cp.simul = 0x00;
691 if (cp.le != lmp_host_le_capable(hdev))
692 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
693 &cp);
696 static void hci_set_event_mask_page_2(struct hci_request *req)
698 struct hci_dev *hdev = req->hdev;
699 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
701 /* If Connectionless Slave Broadcast master role is supported
702 * enable all necessary events for it.
704 if (lmp_csb_master_capable(hdev)) {
705 events[1] |= 0x40; /* Triggered Clock Capture */
706 events[1] |= 0x80; /* Synchronization Train Complete */
707 events[2] |= 0x10; /* Slave Page Response Timeout */
708 events[2] |= 0x20; /* CSB Channel Map Change */
711 /* If Connectionless Slave Broadcast slave role is supported
712 * enable all necessary events for it.
714 if (lmp_csb_slave_capable(hdev)) {
715 events[2] |= 0x01; /* Synchronization Train Received */
716 events[2] |= 0x02; /* CSB Receive */
717 events[2] |= 0x04; /* CSB Timeout */
718 events[2] |= 0x08; /* Truncated Page Complete */
721 /* Enable Authenticated Payload Timeout Expired event if supported */
722 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
723 events[2] |= 0x80;
725 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
728 static void hci_init3_req(struct hci_request *req, unsigned long opt)
730 struct hci_dev *hdev = req->hdev;
731 u8 p;
733 hci_setup_event_mask(req);
735 if (hdev->commands[6] & 0x20) {
736 struct hci_cp_read_stored_link_key cp;
738 bacpy(&cp.bdaddr, BDADDR_ANY);
739 cp.read_all = 0x01;
740 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
743 if (hdev->commands[5] & 0x10)
744 hci_setup_link_policy(req);
746 if (hdev->commands[8] & 0x01)
747 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
749 /* Some older Broadcom based Bluetooth 1.2 controllers do not
750 * support the Read Page Scan Type command. Check support for
751 * this command in the bit mask of supported commands.
753 if (hdev->commands[13] & 0x01)
754 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
756 if (lmp_le_capable(hdev)) {
757 u8 events[8];
759 memset(events, 0, sizeof(events));
760 events[0] = 0x0f;
762 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
763 events[0] |= 0x10; /* LE Long Term Key Request */
765 /* If controller supports the Connection Parameters Request
766 * Link Layer Procedure, enable the corresponding event.
768 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
769 events[0] |= 0x20; /* LE Remote Connection
770 * Parameter Request
773 /* If the controller supports the Data Length Extension
774 * feature, enable the corresponding event.
776 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
777 events[0] |= 0x40; /* LE Data Length Change */
779 /* If the controller supports Extended Scanner Filter
780 * Policies, enable the correspondig event.
782 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
783 events[1] |= 0x04; /* LE Direct Advertising
784 * Report
787 /* If the controller supports the LE Read Local P-256
788 * Public Key command, enable the corresponding event.
790 if (hdev->commands[34] & 0x02)
791 events[0] |= 0x80; /* LE Read Local P-256
792 * Public Key Complete
795 /* If the controller supports the LE Generate DHKey
796 * command, enable the corresponding event.
798 if (hdev->commands[34] & 0x04)
799 events[1] |= 0x01; /* LE Generate DHKey Complete */
801 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
802 events);
804 if (hdev->commands[25] & 0x40) {
805 /* Read LE Advertising Channel TX Power */
806 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
809 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
810 /* Read LE Maximum Data Length */
811 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
813 /* Read LE Suggested Default Data Length */
814 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
817 hci_set_le_support(req);
820 /* Read features beyond page 1 if available */
821 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
822 struct hci_cp_read_local_ext_features cp;
824 cp.page = p;
825 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
826 sizeof(cp), &cp);
830 static void hci_init4_req(struct hci_request *req, unsigned long opt)
832 struct hci_dev *hdev = req->hdev;
834 /* Some Broadcom based Bluetooth controllers do not support the
835 * Delete Stored Link Key command. They are clearly indicating its
836 * absence in the bit mask of supported commands.
838 * Check the supported commands and only if the the command is marked
839 * as supported send it. If not supported assume that the controller
840 * does not have actual support for stored link keys which makes this
841 * command redundant anyway.
843 * Some controllers indicate that they support handling deleting
844 * stored link keys, but they don't. The quirk lets a driver
845 * just disable this command.
847 if (hdev->commands[6] & 0x80 &&
848 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
849 struct hci_cp_delete_stored_link_key cp;
851 bacpy(&cp.bdaddr, BDADDR_ANY);
852 cp.delete_all = 0x01;
853 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
854 sizeof(cp), &cp);
857 /* Set event mask page 2 if the HCI command for it is supported */
858 if (hdev->commands[22] & 0x04)
859 hci_set_event_mask_page_2(req);
861 /* Read local codec list if the HCI command is supported */
862 if (hdev->commands[29] & 0x20)
863 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
865 /* Get MWS transport configuration if the HCI command is supported */
866 if (hdev->commands[30] & 0x08)
867 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
869 /* Check for Synchronization Train support */
870 if (lmp_sync_train_capable(hdev))
871 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
873 /* Enable Secure Connections if supported and configured */
874 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
875 bredr_sc_enabled(hdev)) {
876 u8 support = 0x01;
878 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
879 sizeof(support), &support);
883 static int __hci_init(struct hci_dev *hdev)
885 int err;
887 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
888 if (err < 0)
889 return err;
891 /* The Device Under Test (DUT) mode is special and available for
892 * all controller types. So just create it early on.
894 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
895 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
896 &dut_mode_fops);
899 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
900 * BR/EDR/LE type controllers. AMP controllers only need the
901 * first stage init.
903 if (hdev->dev_type != HCI_BREDR)
904 return 0;
906 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
907 if (err < 0)
908 return err;
910 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
911 if (err < 0)
912 return err;
914 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
915 if (err < 0)
916 return err;
918 /* This function is only called when the controller is actually in
919 * configured state. When the controller is marked as unconfigured,
920 * this initialization procedure is not run.
922 * It means that it is possible that a controller runs through its
923 * setup phase and then discovers missing settings. If that is the
924 * case, then this function will not be called. It then will only
925 * be called during the config phase.
927 * So only when in setup phase or config phase, create the debugfs
928 * entries and register the SMP channels.
930 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
931 !test_bit(HCI_CONFIG, &hdev->dev_flags))
932 return 0;
934 hci_debugfs_create_common(hdev);
936 if (lmp_bredr_capable(hdev))
937 hci_debugfs_create_bredr(hdev);
939 if (lmp_le_capable(hdev))
940 hci_debugfs_create_le(hdev);
942 return 0;
945 static void hci_init0_req(struct hci_request *req, unsigned long opt)
947 struct hci_dev *hdev = req->hdev;
949 BT_DBG("%s %ld", hdev->name, opt);
951 /* Reset */
952 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
953 hci_reset_req(req, 0);
955 /* Read Local Version */
956 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
958 /* Read BD Address */
959 if (hdev->set_bdaddr)
960 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
963 static int __hci_unconf_init(struct hci_dev *hdev)
965 int err;
967 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
968 return 0;
970 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
971 if (err < 0)
972 return err;
974 return 0;
977 static void hci_scan_req(struct hci_request *req, unsigned long opt)
979 __u8 scan = opt;
981 BT_DBG("%s %x", req->hdev->name, scan);
983 /* Inquiry and Page scans */
984 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
987 static void hci_auth_req(struct hci_request *req, unsigned long opt)
989 __u8 auth = opt;
991 BT_DBG("%s %x", req->hdev->name, auth);
993 /* Authentication */
994 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
997 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
999 __u8 encrypt = opt;
1001 BT_DBG("%s %x", req->hdev->name, encrypt);
1003 /* Encryption */
1004 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1007 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1009 __le16 policy = cpu_to_le16(opt);
1011 BT_DBG("%s %x", req->hdev->name, policy);
1013 /* Default link policy */
1014 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1017 /* Get HCI device by index.
1018 * Device is held on return. */
1019 struct hci_dev *hci_dev_get(int index)
1021 struct hci_dev *hdev = NULL, *d;
1023 BT_DBG("%d", index);
1025 if (index < 0)
1026 return NULL;
1028 read_lock(&hci_dev_list_lock);
1029 list_for_each_entry(d, &hci_dev_list, list) {
1030 if (d->id == index) {
1031 hdev = hci_dev_hold(d);
1032 break;
1035 read_unlock(&hci_dev_list_lock);
1036 return hdev;
1039 /* ---- Inquiry support ---- */
1041 bool hci_discovery_active(struct hci_dev *hdev)
1043 struct discovery_state *discov = &hdev->discovery;
1045 switch (discov->state) {
1046 case DISCOVERY_FINDING:
1047 case DISCOVERY_RESOLVING:
1048 return true;
1050 default:
1051 return false;
1055 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1057 int old_state = hdev->discovery.state;
1059 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1061 if (old_state == state)
1062 return;
1064 hdev->discovery.state = state;
1066 switch (state) {
1067 case DISCOVERY_STOPPED:
1068 hci_update_background_scan(hdev);
1070 if (old_state != DISCOVERY_STARTING)
1071 mgmt_discovering(hdev, 0);
1072 break;
1073 case DISCOVERY_STARTING:
1074 break;
1075 case DISCOVERY_FINDING:
1076 mgmt_discovering(hdev, 1);
1077 break;
1078 case DISCOVERY_RESOLVING:
1079 break;
1080 case DISCOVERY_STOPPING:
1081 break;
1085 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1087 struct discovery_state *cache = &hdev->discovery;
1088 struct inquiry_entry *p, *n;
1090 list_for_each_entry_safe(p, n, &cache->all, all) {
1091 list_del(&p->all);
1092 kfree(p);
1095 INIT_LIST_HEAD(&cache->unknown);
1096 INIT_LIST_HEAD(&cache->resolve);
1099 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1100 bdaddr_t *bdaddr)
1102 struct discovery_state *cache = &hdev->discovery;
1103 struct inquiry_entry *e;
1105 BT_DBG("cache %p, %pMR", cache, bdaddr);
1107 list_for_each_entry(e, &cache->all, all) {
1108 if (!bacmp(&e->data.bdaddr, bdaddr))
1109 return e;
1112 return NULL;
1115 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1116 bdaddr_t *bdaddr)
1118 struct discovery_state *cache = &hdev->discovery;
1119 struct inquiry_entry *e;
1121 BT_DBG("cache %p, %pMR", cache, bdaddr);
1123 list_for_each_entry(e, &cache->unknown, list) {
1124 if (!bacmp(&e->data.bdaddr, bdaddr))
1125 return e;
1128 return NULL;
1131 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1132 bdaddr_t *bdaddr,
1133 int state)
1135 struct discovery_state *cache = &hdev->discovery;
1136 struct inquiry_entry *e;
1138 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1140 list_for_each_entry(e, &cache->resolve, list) {
1141 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1142 return e;
1143 if (!bacmp(&e->data.bdaddr, bdaddr))
1144 return e;
1147 return NULL;
1150 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1151 struct inquiry_entry *ie)
1153 struct discovery_state *cache = &hdev->discovery;
1154 struct list_head *pos = &cache->resolve;
1155 struct inquiry_entry *p;
1157 list_del(&ie->list);
1159 list_for_each_entry(p, &cache->resolve, list) {
1160 if (p->name_state != NAME_PENDING &&
1161 abs(p->data.rssi) >= abs(ie->data.rssi))
1162 break;
1163 pos = &p->list;
1166 list_add(&ie->list, pos);
1169 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1170 bool name_known)
1172 struct discovery_state *cache = &hdev->discovery;
1173 struct inquiry_entry *ie;
1174 u32 flags = 0;
1176 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1178 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1180 if (!data->ssp_mode)
1181 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1183 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1184 if (ie) {
1185 if (!ie->data.ssp_mode)
1186 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1188 if (ie->name_state == NAME_NEEDED &&
1189 data->rssi != ie->data.rssi) {
1190 ie->data.rssi = data->rssi;
1191 hci_inquiry_cache_update_resolve(hdev, ie);
1194 goto update;
1197 /* Entry not in the cache. Add new one. */
1198 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1199 if (!ie) {
1200 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1201 goto done;
1204 list_add(&ie->all, &cache->all);
1206 if (name_known) {
1207 ie->name_state = NAME_KNOWN;
1208 } else {
1209 ie->name_state = NAME_NOT_KNOWN;
1210 list_add(&ie->list, &cache->unknown);
1213 update:
1214 if (name_known && ie->name_state != NAME_KNOWN &&
1215 ie->name_state != NAME_PENDING) {
1216 ie->name_state = NAME_KNOWN;
1217 list_del(&ie->list);
1220 memcpy(&ie->data, data, sizeof(*data));
1221 ie->timestamp = jiffies;
1222 cache->timestamp = jiffies;
1224 if (ie->name_state == NAME_NOT_KNOWN)
1225 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1227 done:
1228 return flags;
1231 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1233 struct discovery_state *cache = &hdev->discovery;
1234 struct inquiry_info *info = (struct inquiry_info *) buf;
1235 struct inquiry_entry *e;
1236 int copied = 0;
1238 list_for_each_entry(e, &cache->all, all) {
1239 struct inquiry_data *data = &e->data;
1241 if (copied >= num)
1242 break;
1244 bacpy(&info->bdaddr, &data->bdaddr);
1245 info->pscan_rep_mode = data->pscan_rep_mode;
1246 info->pscan_period_mode = data->pscan_period_mode;
1247 info->pscan_mode = data->pscan_mode;
1248 memcpy(info->dev_class, data->dev_class, 3);
1249 info->clock_offset = data->clock_offset;
1251 info++;
1252 copied++;
1255 BT_DBG("cache %p, copied %d", cache, copied);
1256 return copied;
1259 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1261 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1262 struct hci_dev *hdev = req->hdev;
1263 struct hci_cp_inquiry cp;
1265 BT_DBG("%s", hdev->name);
1267 if (test_bit(HCI_INQUIRY, &hdev->flags))
1268 return;
1270 /* Start Inquiry */
1271 memcpy(&cp.lap, &ir->lap, 3);
1272 cp.length = ir->length;
1273 cp.num_rsp = ir->num_rsp;
1274 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1277 int hci_inquiry(void __user *arg)
1279 __u8 __user *ptr = arg;
1280 struct hci_inquiry_req ir;
1281 struct hci_dev *hdev;
1282 int err = 0, do_inquiry = 0, max_rsp;
1283 long timeo;
1284 __u8 *buf;
1286 if (copy_from_user(&ir, ptr, sizeof(ir)))
1287 return -EFAULT;
1289 hdev = hci_dev_get(ir.dev_id);
1290 if (!hdev)
1291 return -ENODEV;
1293 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1294 err = -EBUSY;
1295 goto done;
1298 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1299 err = -EOPNOTSUPP;
1300 goto done;
1303 if (hdev->dev_type != HCI_BREDR) {
1304 err = -EOPNOTSUPP;
1305 goto done;
1308 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1309 err = -EOPNOTSUPP;
1310 goto done;
1313 hci_dev_lock(hdev);
1314 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1315 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1316 hci_inquiry_cache_flush(hdev);
1317 do_inquiry = 1;
1319 hci_dev_unlock(hdev);
1321 timeo = ir.length * msecs_to_jiffies(2000);
1323 if (do_inquiry) {
1324 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1325 timeo);
1326 if (err < 0)
1327 goto done;
1329 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1330 * cleared). If it is interrupted by a signal, return -EINTR.
1332 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1333 TASK_INTERRUPTIBLE))
1334 return -EINTR;
1337 /* for unlimited number of responses we will use buffer with
1338 * 255 entries
1340 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1342 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1343 * copy it to the user space.
1345 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1346 if (!buf) {
1347 err = -ENOMEM;
1348 goto done;
1351 hci_dev_lock(hdev);
1352 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1353 hci_dev_unlock(hdev);
1355 BT_DBG("num_rsp %d", ir.num_rsp);
1357 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1358 ptr += sizeof(ir);
1359 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1360 ir.num_rsp))
1361 err = -EFAULT;
1362 } else
1363 err = -EFAULT;
1365 kfree(buf);
1367 done:
1368 hci_dev_put(hdev);
1369 return err;
1372 static int hci_dev_do_open(struct hci_dev *hdev)
1374 int ret = 0;
1376 BT_DBG("%s %p", hdev->name, hdev);
1378 hci_req_lock(hdev);
1380 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1381 ret = -ENODEV;
1382 goto done;
1385 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1386 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1387 /* Check for rfkill but allow the HCI setup stage to
1388 * proceed (which in itself doesn't cause any RF activity).
1390 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1391 ret = -ERFKILL;
1392 goto done;
1395 /* Check for valid public address or a configured static
1396 * random adddress, but let the HCI setup proceed to
1397 * be able to determine if there is a public address
1398 * or not.
1400 * In case of user channel usage, it is not important
1401 * if a public address or static random address is
1402 * available.
1404 * This check is only valid for BR/EDR controllers
1405 * since AMP controllers do not have an address.
1407 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1408 hdev->dev_type == HCI_BREDR &&
1409 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1410 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1411 ret = -EADDRNOTAVAIL;
1412 goto done;
1416 if (test_bit(HCI_UP, &hdev->flags)) {
1417 ret = -EALREADY;
1418 goto done;
1421 if (hdev->open(hdev)) {
1422 ret = -EIO;
1423 goto done;
1426 atomic_set(&hdev->cmd_cnt, 1);
1427 set_bit(HCI_INIT, &hdev->flags);
1429 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1430 if (hdev->setup)
1431 ret = hdev->setup(hdev);
1433 /* The transport driver can set these quirks before
1434 * creating the HCI device or in its setup callback.
1436 * In case any of them is set, the controller has to
1437 * start up as unconfigured.
1439 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1440 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1441 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
1443 /* For an unconfigured controller it is required to
1444 * read at least the version information provided by
1445 * the Read Local Version Information command.
1447 * If the set_bdaddr driver callback is provided, then
1448 * also the original Bluetooth public device address
1449 * will be read using the Read BD Address command.
1451 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1452 ret = __hci_unconf_init(hdev);
1455 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1456 /* If public address change is configured, ensure that
1457 * the address gets programmed. If the driver does not
1458 * support changing the public address, fail the power
1459 * on procedure.
1461 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1462 hdev->set_bdaddr)
1463 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1464 else
1465 ret = -EADDRNOTAVAIL;
1468 if (!ret) {
1469 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1470 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1471 ret = __hci_init(hdev);
1474 clear_bit(HCI_INIT, &hdev->flags);
1476 if (!ret) {
1477 hci_dev_hold(hdev);
1478 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1479 set_bit(HCI_UP, &hdev->flags);
1480 hci_notify(hdev, HCI_DEV_UP);
1481 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1482 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
1483 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1484 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1485 hdev->dev_type == HCI_BREDR) {
1486 hci_dev_lock(hdev);
1487 mgmt_powered(hdev, 1);
1488 hci_dev_unlock(hdev);
1490 } else {
1491 /* Init failed, cleanup */
1492 flush_work(&hdev->tx_work);
1493 flush_work(&hdev->cmd_work);
1494 flush_work(&hdev->rx_work);
1496 skb_queue_purge(&hdev->cmd_q);
1497 skb_queue_purge(&hdev->rx_q);
1499 if (hdev->flush)
1500 hdev->flush(hdev);
1502 if (hdev->sent_cmd) {
1503 kfree_skb(hdev->sent_cmd);
1504 hdev->sent_cmd = NULL;
1507 hdev->close(hdev);
1508 hdev->flags &= BIT(HCI_RAW);
1511 done:
1512 hci_req_unlock(hdev);
1513 return ret;
1516 /* ---- HCI ioctl helpers ---- */
1518 int hci_dev_open(__u16 dev)
1520 struct hci_dev *hdev;
1521 int err;
1523 hdev = hci_dev_get(dev);
1524 if (!hdev)
1525 return -ENODEV;
1527 /* Devices that are marked as unconfigured can only be powered
1528 * up as user channel. Trying to bring them up as normal devices
1529 * will result into a failure. Only user channel operation is
1530 * possible.
1532 * When this function is called for a user channel, the flag
1533 * HCI_USER_CHANNEL will be set first before attempting to
1534 * open the device.
1536 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1537 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1538 err = -EOPNOTSUPP;
1539 goto done;
1542 /* We need to ensure that no other power on/off work is pending
1543 * before proceeding to call hci_dev_do_open. This is
1544 * particularly important if the setup procedure has not yet
1545 * completed.
1547 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1548 cancel_delayed_work(&hdev->power_off);
1550 /* After this call it is guaranteed that the setup procedure
1551 * has finished. This means that error conditions like RFKILL
1552 * or no valid public or static random address apply.
1554 flush_workqueue(hdev->req_workqueue);
1556 /* For controllers not using the management interface and that
1557 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1558 * so that pairing works for them. Once the management interface
1559 * is in use this bit will be cleared again and userspace has
1560 * to explicitly enable it.
1562 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1563 !test_bit(HCI_MGMT, &hdev->dev_flags))
1564 set_bit(HCI_BONDABLE, &hdev->dev_flags);
1566 err = hci_dev_do_open(hdev);
1568 done:
1569 hci_dev_put(hdev);
1570 return err;
1573 /* This function requires the caller holds hdev->lock */
1574 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1576 struct hci_conn_params *p;
1578 list_for_each_entry(p, &hdev->le_conn_params, list) {
1579 if (p->conn) {
1580 hci_conn_drop(p->conn);
1581 hci_conn_put(p->conn);
1582 p->conn = NULL;
1584 list_del_init(&p->action);
1587 BT_DBG("All LE pending actions cleared");
1590 static int hci_dev_do_close(struct hci_dev *hdev)
1592 BT_DBG("%s %p", hdev->name, hdev);
1594 cancel_delayed_work(&hdev->power_off);
1596 hci_req_cancel(hdev, ENODEV);
1597 hci_req_lock(hdev);
1599 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1600 cancel_delayed_work_sync(&hdev->cmd_timer);
1601 hci_req_unlock(hdev);
1602 return 0;
1605 /* Flush RX and TX works */
1606 flush_work(&hdev->tx_work);
1607 flush_work(&hdev->rx_work);
1609 if (hdev->discov_timeout > 0) {
1610 cancel_delayed_work(&hdev->discov_off);
1611 hdev->discov_timeout = 0;
1612 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1613 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1616 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1617 cancel_delayed_work(&hdev->service_cache);
1619 cancel_delayed_work_sync(&hdev->le_scan_disable);
1620 cancel_delayed_work_sync(&hdev->le_scan_restart);
1622 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1623 cancel_delayed_work_sync(&hdev->rpa_expired);
1625 /* Avoid potential lockdep warnings from the *_flush() calls by
1626 * ensuring the workqueue is empty up front.
1628 drain_workqueue(hdev->workqueue);
1630 hci_dev_lock(hdev);
1632 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1634 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1635 if (hdev->dev_type == HCI_BREDR)
1636 mgmt_powered(hdev, 0);
1639 hci_inquiry_cache_flush(hdev);
1640 hci_pend_le_actions_clear(hdev);
1641 hci_conn_hash_flush(hdev);
1642 hci_dev_unlock(hdev);
1644 smp_unregister(hdev);
1646 hci_notify(hdev, HCI_DEV_DOWN);
1648 if (hdev->flush)
1649 hdev->flush(hdev);
1651 /* Reset device */
1652 skb_queue_purge(&hdev->cmd_q);
1653 atomic_set(&hdev->cmd_cnt, 1);
1654 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1655 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1656 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1657 set_bit(HCI_INIT, &hdev->flags);
1658 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1659 clear_bit(HCI_INIT, &hdev->flags);
1662 /* flush cmd work */
1663 flush_work(&hdev->cmd_work);
1665 /* Drop queues */
1666 skb_queue_purge(&hdev->rx_q);
1667 skb_queue_purge(&hdev->cmd_q);
1668 skb_queue_purge(&hdev->raw_q);
1670 /* Drop last sent command */
1671 if (hdev->sent_cmd) {
1672 cancel_delayed_work_sync(&hdev->cmd_timer);
1673 kfree_skb(hdev->sent_cmd);
1674 hdev->sent_cmd = NULL;
1677 kfree_skb(hdev->recv_evt);
1678 hdev->recv_evt = NULL;
1680 /* After this point our queues are empty
1681 * and no tasks are scheduled. */
1682 hdev->close(hdev);
1684 /* Clear flags */
1685 hdev->flags &= BIT(HCI_RAW);
1686 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1688 /* Controller radio is available but is currently powered down */
1689 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1691 memset(hdev->eir, 0, sizeof(hdev->eir));
1692 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1693 bacpy(&hdev->random_addr, BDADDR_ANY);
1695 hci_req_unlock(hdev);
1697 hci_dev_put(hdev);
1698 return 0;
1701 int hci_dev_close(__u16 dev)
1703 struct hci_dev *hdev;
1704 int err;
1706 hdev = hci_dev_get(dev);
1707 if (!hdev)
1708 return -ENODEV;
1710 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1711 err = -EBUSY;
1712 goto done;
1715 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1716 cancel_delayed_work(&hdev->power_off);
1718 err = hci_dev_do_close(hdev);
1720 done:
1721 hci_dev_put(hdev);
1722 return err;
1725 static int hci_dev_do_reset(struct hci_dev *hdev)
1727 int ret;
1729 BT_DBG("%s %p", hdev->name, hdev);
1731 hci_req_lock(hdev);
1733 /* Drop queues */
1734 skb_queue_purge(&hdev->rx_q);
1735 skb_queue_purge(&hdev->cmd_q);
1737 /* Avoid potential lockdep warnings from the *_flush() calls by
1738 * ensuring the workqueue is empty up front.
1740 drain_workqueue(hdev->workqueue);
1742 hci_dev_lock(hdev);
1743 hci_inquiry_cache_flush(hdev);
1744 hci_conn_hash_flush(hdev);
1745 hci_dev_unlock(hdev);
1747 if (hdev->flush)
1748 hdev->flush(hdev);
1750 atomic_set(&hdev->cmd_cnt, 1);
1751 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1753 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1755 hci_req_unlock(hdev);
1756 return ret;
1759 int hci_dev_reset(__u16 dev)
1761 struct hci_dev *hdev;
1762 int err;
1764 hdev = hci_dev_get(dev);
1765 if (!hdev)
1766 return -ENODEV;
1768 if (!test_bit(HCI_UP, &hdev->flags)) {
1769 err = -ENETDOWN;
1770 goto done;
1773 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1774 err = -EBUSY;
1775 goto done;
1778 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1779 err = -EOPNOTSUPP;
1780 goto done;
1783 err = hci_dev_do_reset(hdev);
1785 done:
1786 hci_dev_put(hdev);
1787 return err;
1790 int hci_dev_reset_stat(__u16 dev)
1792 struct hci_dev *hdev;
1793 int ret = 0;
1795 hdev = hci_dev_get(dev);
1796 if (!hdev)
1797 return -ENODEV;
1799 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1800 ret = -EBUSY;
1801 goto done;
1804 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1805 ret = -EOPNOTSUPP;
1806 goto done;
1809 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1811 done:
1812 hci_dev_put(hdev);
1813 return ret;
1816 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1818 bool conn_changed, discov_changed;
1820 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1822 if ((scan & SCAN_PAGE))
1823 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1824 &hdev->dev_flags);
1825 else
1826 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1827 &hdev->dev_flags);
1829 if ((scan & SCAN_INQUIRY)) {
1830 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1831 &hdev->dev_flags);
1832 } else {
1833 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1834 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1835 &hdev->dev_flags);
1838 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1839 return;
1841 if (conn_changed || discov_changed) {
1842 /* In case this was disabled through mgmt */
1843 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1845 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1846 mgmt_update_adv_data(hdev);
1848 mgmt_new_settings(hdev);
1852 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1854 struct hci_dev *hdev;
1855 struct hci_dev_req dr;
1856 int err = 0;
1858 if (copy_from_user(&dr, arg, sizeof(dr)))
1859 return -EFAULT;
1861 hdev = hci_dev_get(dr.dev_id);
1862 if (!hdev)
1863 return -ENODEV;
1865 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1866 err = -EBUSY;
1867 goto done;
1870 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1871 err = -EOPNOTSUPP;
1872 goto done;
1875 if (hdev->dev_type != HCI_BREDR) {
1876 err = -EOPNOTSUPP;
1877 goto done;
1880 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1881 err = -EOPNOTSUPP;
1882 goto done;
1885 switch (cmd) {
1886 case HCISETAUTH:
1887 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1888 HCI_INIT_TIMEOUT);
1889 break;
1891 case HCISETENCRYPT:
1892 if (!lmp_encrypt_capable(hdev)) {
1893 err = -EOPNOTSUPP;
1894 break;
1897 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1898 /* Auth must be enabled first */
1899 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1900 HCI_INIT_TIMEOUT);
1901 if (err)
1902 break;
1905 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1906 HCI_INIT_TIMEOUT);
1907 break;
1909 case HCISETSCAN:
1910 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1911 HCI_INIT_TIMEOUT);
1913 /* Ensure that the connectable and discoverable states
1914 * get correctly modified as this was a non-mgmt change.
1916 if (!err)
1917 hci_update_scan_state(hdev, dr.dev_opt);
1918 break;
1920 case HCISETLINKPOL:
1921 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1922 HCI_INIT_TIMEOUT);
1923 break;
1925 case HCISETLINKMODE:
1926 hdev->link_mode = ((__u16) dr.dev_opt) &
1927 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1928 break;
1930 case HCISETPTYPE:
1931 hdev->pkt_type = (__u16) dr.dev_opt;
1932 break;
1934 case HCISETACLMTU:
1935 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1936 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1937 break;
1939 case HCISETSCOMTU:
1940 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1941 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1942 break;
1944 default:
1945 err = -EINVAL;
1946 break;
1949 done:
1950 hci_dev_put(hdev);
1951 return err;
1954 int hci_get_dev_list(void __user *arg)
1956 struct hci_dev *hdev;
1957 struct hci_dev_list_req *dl;
1958 struct hci_dev_req *dr;
1959 int n = 0, size, err;
1960 __u16 dev_num;
1962 if (get_user(dev_num, (__u16 __user *) arg))
1963 return -EFAULT;
1965 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1966 return -EINVAL;
1968 size = sizeof(*dl) + dev_num * sizeof(*dr);
1970 dl = kzalloc(size, GFP_KERNEL);
1971 if (!dl)
1972 return -ENOMEM;
1974 dr = dl->dev_req;
1976 read_lock(&hci_dev_list_lock);
1977 list_for_each_entry(hdev, &hci_dev_list, list) {
1978 unsigned long flags = hdev->flags;
1980 /* When the auto-off is configured it means the transport
1981 * is running, but in that case still indicate that the
1982 * device is actually down.
1984 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1985 flags &= ~BIT(HCI_UP);
1987 (dr + n)->dev_id = hdev->id;
1988 (dr + n)->dev_opt = flags;
1990 if (++n >= dev_num)
1991 break;
1993 read_unlock(&hci_dev_list_lock);
1995 dl->dev_num = n;
1996 size = sizeof(*dl) + n * sizeof(*dr);
1998 err = copy_to_user(arg, dl, size);
1999 kfree(dl);
2001 return err ? -EFAULT : 0;
2004 int hci_get_dev_info(void __user *arg)
2006 struct hci_dev *hdev;
2007 struct hci_dev_info di;
2008 unsigned long flags;
2009 int err = 0;
2011 if (copy_from_user(&di, arg, sizeof(di)))
2012 return -EFAULT;
2014 hdev = hci_dev_get(di.dev_id);
2015 if (!hdev)
2016 return -ENODEV;
2018 /* When the auto-off is configured it means the transport
2019 * is running, but in that case still indicate that the
2020 * device is actually down.
2022 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2023 flags = hdev->flags & ~BIT(HCI_UP);
2024 else
2025 flags = hdev->flags;
2027 strcpy(di.name, hdev->name);
2028 di.bdaddr = hdev->bdaddr;
2029 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2030 di.flags = flags;
2031 di.pkt_type = hdev->pkt_type;
2032 if (lmp_bredr_capable(hdev)) {
2033 di.acl_mtu = hdev->acl_mtu;
2034 di.acl_pkts = hdev->acl_pkts;
2035 di.sco_mtu = hdev->sco_mtu;
2036 di.sco_pkts = hdev->sco_pkts;
2037 } else {
2038 di.acl_mtu = hdev->le_mtu;
2039 di.acl_pkts = hdev->le_pkts;
2040 di.sco_mtu = 0;
2041 di.sco_pkts = 0;
2043 di.link_policy = hdev->link_policy;
2044 di.link_mode = hdev->link_mode;
2046 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2047 memcpy(&di.features, &hdev->features, sizeof(di.features));
2049 if (copy_to_user(arg, &di, sizeof(di)))
2050 err = -EFAULT;
2052 hci_dev_put(hdev);
2054 return err;
2057 /* ---- Interface to HCI drivers ---- */
2059 static int hci_rfkill_set_block(void *data, bool blocked)
2061 struct hci_dev *hdev = data;
2063 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2065 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2066 return -EBUSY;
2068 if (blocked) {
2069 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2070 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2071 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2072 hci_dev_do_close(hdev);
2073 } else {
2074 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2077 return 0;
2080 static const struct rfkill_ops hci_rfkill_ops = {
2081 .set_block = hci_rfkill_set_block,
2084 static void hci_power_on(struct work_struct *work)
2086 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2087 int err;
2089 BT_DBG("%s", hdev->name);
2091 err = hci_dev_do_open(hdev);
2092 if (err < 0) {
2093 hci_dev_lock(hdev);
2094 mgmt_set_powered_failed(hdev, err);
2095 hci_dev_unlock(hdev);
2096 return;
2099 /* During the HCI setup phase, a few error conditions are
2100 * ignored and they need to be checked now. If they are still
2101 * valid, it is important to turn the device back off.
2103 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2104 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2105 (hdev->dev_type == HCI_BREDR &&
2106 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2107 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2108 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2109 hci_dev_do_close(hdev);
2110 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2111 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2112 HCI_AUTO_OFF_TIMEOUT);
2115 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2116 /* For unconfigured devices, set the HCI_RAW flag
2117 * so that userspace can easily identify them.
2119 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2120 set_bit(HCI_RAW, &hdev->flags);
2122 /* For fully configured devices, this will send
2123 * the Index Added event. For unconfigured devices,
2124 * it will send Unconfigued Index Added event.
2126 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2127 * and no event will be send.
2129 mgmt_index_added(hdev);
2130 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2131 /* When the controller is now configured, then it
2132 * is important to clear the HCI_RAW flag.
2134 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2135 clear_bit(HCI_RAW, &hdev->flags);
2137 /* Powering on the controller with HCI_CONFIG set only
2138 * happens with the transition from unconfigured to
2139 * configured. This will send the Index Added event.
2141 mgmt_index_added(hdev);
2145 static void hci_power_off(struct work_struct *work)
2147 struct hci_dev *hdev = container_of(work, struct hci_dev,
2148 power_off.work);
2150 BT_DBG("%s", hdev->name);
2152 hci_dev_do_close(hdev);
2155 static void hci_error_reset(struct work_struct *work)
2157 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2159 BT_DBG("%s", hdev->name);
2161 if (hdev->hw_error)
2162 hdev->hw_error(hdev, hdev->hw_error_code);
2163 else
2164 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2165 hdev->hw_error_code);
2167 if (hci_dev_do_close(hdev))
2168 return;
2170 hci_dev_do_open(hdev);
2173 static void hci_discov_off(struct work_struct *work)
2175 struct hci_dev *hdev;
2177 hdev = container_of(work, struct hci_dev, discov_off.work);
2179 BT_DBG("%s", hdev->name);
2181 mgmt_discoverable_timeout(hdev);
2184 void hci_uuids_clear(struct hci_dev *hdev)
2186 struct bt_uuid *uuid, *tmp;
2188 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2189 list_del(&uuid->list);
2190 kfree(uuid);
2194 void hci_link_keys_clear(struct hci_dev *hdev)
2196 struct link_key *key;
2198 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2199 list_del_rcu(&key->list);
2200 kfree_rcu(key, rcu);
2204 void hci_smp_ltks_clear(struct hci_dev *hdev)
2206 struct smp_ltk *k;
2208 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2209 list_del_rcu(&k->list);
2210 kfree_rcu(k, rcu);
2214 void hci_smp_irks_clear(struct hci_dev *hdev)
2216 struct smp_irk *k;
2218 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2219 list_del_rcu(&k->list);
2220 kfree_rcu(k, rcu);
2224 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2226 struct link_key *k;
2228 rcu_read_lock();
2229 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2230 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2231 rcu_read_unlock();
2232 return k;
2235 rcu_read_unlock();
2237 return NULL;
2240 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2241 u8 key_type, u8 old_key_type)
2243 /* Legacy key */
2244 if (key_type < 0x03)
2245 return true;
2247 /* Debug keys are insecure so don't store them persistently */
2248 if (key_type == HCI_LK_DEBUG_COMBINATION)
2249 return false;
2251 /* Changed combination key and there's no previous one */
2252 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2253 return false;
2255 /* Security mode 3 case */
2256 if (!conn)
2257 return true;
2259 /* BR/EDR key derived using SC from an LE link */
2260 if (conn->type == LE_LINK)
2261 return true;
2263 /* Neither local nor remote side had no-bonding as requirement */
2264 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2265 return true;
2267 /* Local side had dedicated bonding as requirement */
2268 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2269 return true;
2271 /* Remote side had dedicated bonding as requirement */
2272 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2273 return true;
2275 /* If none of the above criteria match, then don't store the key
2276 * persistently */
2277 return false;
2280 static u8 ltk_role(u8 type)
2282 if (type == SMP_LTK)
2283 return HCI_ROLE_MASTER;
2285 return HCI_ROLE_SLAVE;
2288 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2289 u8 addr_type, u8 role)
2291 struct smp_ltk *k;
2293 rcu_read_lock();
2294 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2295 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2296 continue;
2298 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2299 rcu_read_unlock();
2300 return k;
2303 rcu_read_unlock();
2305 return NULL;
2308 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2310 struct smp_irk *irk;
2312 rcu_read_lock();
2313 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2314 if (!bacmp(&irk->rpa, rpa)) {
2315 rcu_read_unlock();
2316 return irk;
2320 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2321 if (smp_irk_matches(hdev, irk->val, rpa)) {
2322 bacpy(&irk->rpa, rpa);
2323 rcu_read_unlock();
2324 return irk;
2327 rcu_read_unlock();
2329 return NULL;
2332 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2333 u8 addr_type)
2335 struct smp_irk *irk;
2337 /* Identity Address must be public or static random */
2338 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2339 return NULL;
2341 rcu_read_lock();
2342 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2343 if (addr_type == irk->addr_type &&
2344 bacmp(bdaddr, &irk->bdaddr) == 0) {
2345 rcu_read_unlock();
2346 return irk;
2349 rcu_read_unlock();
2351 return NULL;
2354 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2355 bdaddr_t *bdaddr, u8 *val, u8 type,
2356 u8 pin_len, bool *persistent)
2358 struct link_key *key, *old_key;
2359 u8 old_key_type;
2361 old_key = hci_find_link_key(hdev, bdaddr);
2362 if (old_key) {
2363 old_key_type = old_key->type;
2364 key = old_key;
2365 } else {
2366 old_key_type = conn ? conn->key_type : 0xff;
2367 key = kzalloc(sizeof(*key), GFP_KERNEL);
2368 if (!key)
2369 return NULL;
2370 list_add_rcu(&key->list, &hdev->link_keys);
2373 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2375 /* Some buggy controller combinations generate a changed
2376 * combination key for legacy pairing even when there's no
2377 * previous key */
2378 if (type == HCI_LK_CHANGED_COMBINATION &&
2379 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2380 type = HCI_LK_COMBINATION;
2381 if (conn)
2382 conn->key_type = type;
2385 bacpy(&key->bdaddr, bdaddr);
2386 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2387 key->pin_len = pin_len;
2389 if (type == HCI_LK_CHANGED_COMBINATION)
2390 key->type = old_key_type;
2391 else
2392 key->type = type;
2394 if (persistent)
2395 *persistent = hci_persistent_key(hdev, conn, type,
2396 old_key_type);
2398 return key;
2401 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2402 u8 addr_type, u8 type, u8 authenticated,
2403 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2405 struct smp_ltk *key, *old_key;
2406 u8 role = ltk_role(type);
2408 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2409 if (old_key)
2410 key = old_key;
2411 else {
2412 key = kzalloc(sizeof(*key), GFP_KERNEL);
2413 if (!key)
2414 return NULL;
2415 list_add_rcu(&key->list, &hdev->long_term_keys);
2418 bacpy(&key->bdaddr, bdaddr);
2419 key->bdaddr_type = addr_type;
2420 memcpy(key->val, tk, sizeof(key->val));
2421 key->authenticated = authenticated;
2422 key->ediv = ediv;
2423 key->rand = rand;
2424 key->enc_size = enc_size;
2425 key->type = type;
2427 return key;
2430 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2431 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2433 struct smp_irk *irk;
2435 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2436 if (!irk) {
2437 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2438 if (!irk)
2439 return NULL;
2441 bacpy(&irk->bdaddr, bdaddr);
2442 irk->addr_type = addr_type;
2444 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2447 memcpy(irk->val, val, 16);
2448 bacpy(&irk->rpa, rpa);
2450 return irk;
2453 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2455 struct link_key *key;
2457 key = hci_find_link_key(hdev, bdaddr);
2458 if (!key)
2459 return -ENOENT;
2461 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2463 list_del_rcu(&key->list);
2464 kfree_rcu(key, rcu);
2466 return 0;
2469 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2471 struct smp_ltk *k;
2472 int removed = 0;
2474 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2475 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2476 continue;
2478 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2480 list_del_rcu(&k->list);
2481 kfree_rcu(k, rcu);
2482 removed++;
2485 return removed ? 0 : -ENOENT;
2488 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2490 struct smp_irk *k;
2492 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2493 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2494 continue;
2496 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2498 list_del_rcu(&k->list);
2499 kfree_rcu(k, rcu);
2503 /* HCI command timer function */
2504 static void hci_cmd_timeout(struct work_struct *work)
2506 struct hci_dev *hdev = container_of(work, struct hci_dev,
2507 cmd_timer.work);
2509 if (hdev->sent_cmd) {
2510 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2511 u16 opcode = __le16_to_cpu(sent->opcode);
2513 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2514 } else {
2515 BT_ERR("%s command tx timeout", hdev->name);
2518 atomic_set(&hdev->cmd_cnt, 1);
2519 queue_work(hdev->workqueue, &hdev->cmd_work);
2522 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2523 bdaddr_t *bdaddr, u8 bdaddr_type)
2525 struct oob_data *data;
2527 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2528 if (bacmp(bdaddr, &data->bdaddr) != 0)
2529 continue;
2530 if (data->bdaddr_type != bdaddr_type)
2531 continue;
2532 return data;
2535 return NULL;
2538 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2539 u8 bdaddr_type)
2541 struct oob_data *data;
2543 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2544 if (!data)
2545 return -ENOENT;
2547 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2549 list_del(&data->list);
2550 kfree(data);
2552 return 0;
2555 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2557 struct oob_data *data, *n;
2559 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2560 list_del(&data->list);
2561 kfree(data);
2565 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2566 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2567 u8 *hash256, u8 *rand256)
2569 struct oob_data *data;
2571 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2572 if (!data) {
2573 data = kmalloc(sizeof(*data), GFP_KERNEL);
2574 if (!data)
2575 return -ENOMEM;
2577 bacpy(&data->bdaddr, bdaddr);
2578 data->bdaddr_type = bdaddr_type;
2579 list_add(&data->list, &hdev->remote_oob_data);
2582 if (hash192 && rand192) {
2583 memcpy(data->hash192, hash192, sizeof(data->hash192));
2584 memcpy(data->rand192, rand192, sizeof(data->rand192));
2585 if (hash256 && rand256)
2586 data->present = 0x03;
2587 } else {
2588 memset(data->hash192, 0, sizeof(data->hash192));
2589 memset(data->rand192, 0, sizeof(data->rand192));
2590 if (hash256 && rand256)
2591 data->present = 0x02;
2592 else
2593 data->present = 0x00;
2596 if (hash256 && rand256) {
2597 memcpy(data->hash256, hash256, sizeof(data->hash256));
2598 memcpy(data->rand256, rand256, sizeof(data->rand256));
2599 } else {
2600 memset(data->hash256, 0, sizeof(data->hash256));
2601 memset(data->rand256, 0, sizeof(data->rand256));
2602 if (hash192 && rand192)
2603 data->present = 0x01;
2606 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2608 return 0;
2611 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2612 bdaddr_t *bdaddr, u8 type)
2614 struct bdaddr_list *b;
2616 list_for_each_entry(b, bdaddr_list, list) {
2617 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2618 return b;
2621 return NULL;
2624 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2626 struct list_head *p, *n;
2628 list_for_each_safe(p, n, bdaddr_list) {
2629 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2631 list_del(p);
2632 kfree(b);
2636 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2638 struct bdaddr_list *entry;
2640 if (!bacmp(bdaddr, BDADDR_ANY))
2641 return -EBADF;
2643 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2644 return -EEXIST;
2646 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2647 if (!entry)
2648 return -ENOMEM;
2650 bacpy(&entry->bdaddr, bdaddr);
2651 entry->bdaddr_type = type;
2653 list_add(&entry->list, list);
2655 return 0;
2658 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2660 struct bdaddr_list *entry;
2662 if (!bacmp(bdaddr, BDADDR_ANY)) {
2663 hci_bdaddr_list_clear(list);
2664 return 0;
2667 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2668 if (!entry)
2669 return -ENOENT;
2671 list_del(&entry->list);
2672 kfree(entry);
2674 return 0;
2677 /* This function requires the caller holds hdev->lock */
2678 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2679 bdaddr_t *addr, u8 addr_type)
2681 struct hci_conn_params *params;
2683 /* The conn params list only contains identity addresses */
2684 if (!hci_is_identity_address(addr, addr_type))
2685 return NULL;
2687 list_for_each_entry(params, &hdev->le_conn_params, list) {
2688 if (bacmp(&params->addr, addr) == 0 &&
2689 params->addr_type == addr_type) {
2690 return params;
2694 return NULL;
2697 /* This function requires the caller holds hdev->lock */
2698 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2699 bdaddr_t *addr, u8 addr_type)
2701 struct hci_conn_params *param;
2703 /* The list only contains identity addresses */
2704 if (!hci_is_identity_address(addr, addr_type))
2705 return NULL;
2707 list_for_each_entry(param, list, action) {
2708 if (bacmp(&param->addr, addr) == 0 &&
2709 param->addr_type == addr_type)
2710 return param;
2713 return NULL;
2716 /* This function requires the caller holds hdev->lock */
2717 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2718 bdaddr_t *addr, u8 addr_type)
2720 struct hci_conn_params *params;
2722 if (!hci_is_identity_address(addr, addr_type))
2723 return NULL;
2725 params = hci_conn_params_lookup(hdev, addr, addr_type);
2726 if (params)
2727 return params;
2729 params = kzalloc(sizeof(*params), GFP_KERNEL);
2730 if (!params) {
2731 BT_ERR("Out of memory");
2732 return NULL;
2735 bacpy(&params->addr, addr);
2736 params->addr_type = addr_type;
2738 list_add(&params->list, &hdev->le_conn_params);
2739 INIT_LIST_HEAD(&params->action);
2741 params->conn_min_interval = hdev->le_conn_min_interval;
2742 params->conn_max_interval = hdev->le_conn_max_interval;
2743 params->conn_latency = hdev->le_conn_latency;
2744 params->supervision_timeout = hdev->le_supv_timeout;
2745 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2747 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2749 return params;
2752 static void hci_conn_params_free(struct hci_conn_params *params)
2754 if (params->conn) {
2755 hci_conn_drop(params->conn);
2756 hci_conn_put(params->conn);
2759 list_del(&params->action);
2760 list_del(&params->list);
2761 kfree(params);
2764 /* This function requires the caller holds hdev->lock */
2765 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2767 struct hci_conn_params *params;
2769 params = hci_conn_params_lookup(hdev, addr, addr_type);
2770 if (!params)
2771 return;
2773 hci_conn_params_free(params);
2775 hci_update_background_scan(hdev);
2777 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2780 /* This function requires the caller holds hdev->lock */
2781 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2783 struct hci_conn_params *params, *tmp;
2785 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2786 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2787 continue;
2788 list_del(&params->list);
2789 kfree(params);
2792 BT_DBG("All LE disabled connection parameters were removed");
2795 /* This function requires the caller holds hdev->lock */
2796 void hci_conn_params_clear_all(struct hci_dev *hdev)
2798 struct hci_conn_params *params, *tmp;
2800 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2801 hci_conn_params_free(params);
2803 hci_update_background_scan(hdev);
2805 BT_DBG("All LE connection parameters were removed");
2808 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2810 if (status) {
2811 BT_ERR("Failed to start inquiry: status %d", status);
2813 hci_dev_lock(hdev);
2814 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2815 hci_dev_unlock(hdev);
2816 return;
2820 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2821 u16 opcode)
2823 /* General inquiry access code (GIAC) */
2824 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2825 struct hci_request req;
2826 struct hci_cp_inquiry cp;
2827 int err;
2829 if (status) {
2830 BT_ERR("Failed to disable LE scanning: status %d", status);
2831 return;
2834 hdev->discovery.scan_start = 0;
2836 switch (hdev->discovery.type) {
2837 case DISCOV_TYPE_LE:
2838 hci_dev_lock(hdev);
2839 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2840 hci_dev_unlock(hdev);
2841 break;
2843 case DISCOV_TYPE_INTERLEAVED:
2844 hci_req_init(&req, hdev);
2846 memset(&cp, 0, sizeof(cp));
2847 memcpy(&cp.lap, lap, sizeof(cp.lap));
2848 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2849 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2851 hci_dev_lock(hdev);
2853 hci_inquiry_cache_flush(hdev);
2855 err = hci_req_run(&req, inquiry_complete);
2856 if (err) {
2857 BT_ERR("Inquiry request failed: err %d", err);
2858 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2861 hci_dev_unlock(hdev);
2862 break;
2866 static void le_scan_disable_work(struct work_struct *work)
2868 struct hci_dev *hdev = container_of(work, struct hci_dev,
2869 le_scan_disable.work);
2870 struct hci_request req;
2871 int err;
2873 BT_DBG("%s", hdev->name);
2875 cancel_delayed_work_sync(&hdev->le_scan_restart);
2877 hci_req_init(&req, hdev);
2879 hci_req_add_le_scan_disable(&req);
2881 err = hci_req_run(&req, le_scan_disable_work_complete);
2882 if (err)
2883 BT_ERR("Disable LE scanning request failed: err %d", err);
2886 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2887 u16 opcode)
2889 unsigned long timeout, duration, scan_start, now;
2891 BT_DBG("%s", hdev->name);
2893 if (status) {
2894 BT_ERR("Failed to restart LE scan: status %d", status);
2895 return;
2898 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2899 !hdev->discovery.scan_start)
2900 return;
2902 /* When the scan was started, hdev->le_scan_disable has been queued
2903 * after duration from scan_start. During scan restart this job
2904 * has been canceled, and we need to queue it again after proper
2905 * timeout, to make sure that scan does not run indefinitely.
2907 duration = hdev->discovery.scan_duration;
2908 scan_start = hdev->discovery.scan_start;
2909 now = jiffies;
2910 if (now - scan_start <= duration) {
2911 int elapsed;
2913 if (now >= scan_start)
2914 elapsed = now - scan_start;
2915 else
2916 elapsed = ULONG_MAX - scan_start + now;
2918 timeout = duration - elapsed;
2919 } else {
2920 timeout = 0;
2922 queue_delayed_work(hdev->workqueue,
2923 &hdev->le_scan_disable, timeout);
2926 static void le_scan_restart_work(struct work_struct *work)
2928 struct hci_dev *hdev = container_of(work, struct hci_dev,
2929 le_scan_restart.work);
2930 struct hci_request req;
2931 struct hci_cp_le_set_scan_enable cp;
2932 int err;
2934 BT_DBG("%s", hdev->name);
2936 /* If controller is not scanning we are done. */
2937 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2938 return;
2940 hci_req_init(&req, hdev);
2942 hci_req_add_le_scan_disable(&req);
2944 memset(&cp, 0, sizeof(cp));
2945 cp.enable = LE_SCAN_ENABLE;
2946 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2947 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2949 err = hci_req_run(&req, le_scan_restart_work_complete);
2950 if (err)
2951 BT_ERR("Restart LE scan request failed: err %d", err);
2954 /* Copy the Identity Address of the controller.
2956 * If the controller has a public BD_ADDR, then by default use that one.
2957 * If this is a LE only controller without a public address, default to
2958 * the static random address.
2960 * For debugging purposes it is possible to force controllers with a
2961 * public address to use the static random address instead.
2963 * In case BR/EDR has been disabled on a dual-mode controller and
2964 * userspace has configured a static address, then that address
2965 * becomes the identity address instead of the public BR/EDR address.
2967 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2968 u8 *bdaddr_type)
2970 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
2971 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2972 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2973 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2974 bacpy(bdaddr, &hdev->static_addr);
2975 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2976 } else {
2977 bacpy(bdaddr, &hdev->bdaddr);
2978 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2982 /* Alloc HCI device */
2983 struct hci_dev *hci_alloc_dev(void)
2985 struct hci_dev *hdev;
2987 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2988 if (!hdev)
2989 return NULL;
2991 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2992 hdev->esco_type = (ESCO_HV1);
2993 hdev->link_mode = (HCI_LM_ACCEPT);
2994 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2995 hdev->io_capability = 0x03; /* No Input No Output */
2996 hdev->manufacturer = 0xffff; /* Default to internal use */
2997 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2998 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3000 hdev->sniff_max_interval = 800;
3001 hdev->sniff_min_interval = 80;
3003 hdev->le_adv_channel_map = 0x07;
3004 hdev->le_adv_min_interval = 0x0800;
3005 hdev->le_adv_max_interval = 0x0800;
3006 hdev->le_scan_interval = 0x0060;
3007 hdev->le_scan_window = 0x0030;
3008 hdev->le_conn_min_interval = 0x0028;
3009 hdev->le_conn_max_interval = 0x0038;
3010 hdev->le_conn_latency = 0x0000;
3011 hdev->le_supv_timeout = 0x002a;
3012 hdev->le_def_tx_len = 0x001b;
3013 hdev->le_def_tx_time = 0x0148;
3014 hdev->le_max_tx_len = 0x001b;
3015 hdev->le_max_tx_time = 0x0148;
3016 hdev->le_max_rx_len = 0x001b;
3017 hdev->le_max_rx_time = 0x0148;
3019 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3020 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3021 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3022 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3024 mutex_init(&hdev->lock);
3025 mutex_init(&hdev->req_lock);
3027 INIT_LIST_HEAD(&hdev->mgmt_pending);
3028 INIT_LIST_HEAD(&hdev->blacklist);
3029 INIT_LIST_HEAD(&hdev->whitelist);
3030 INIT_LIST_HEAD(&hdev->uuids);
3031 INIT_LIST_HEAD(&hdev->link_keys);
3032 INIT_LIST_HEAD(&hdev->long_term_keys);
3033 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3034 INIT_LIST_HEAD(&hdev->remote_oob_data);
3035 INIT_LIST_HEAD(&hdev->le_white_list);
3036 INIT_LIST_HEAD(&hdev->le_conn_params);
3037 INIT_LIST_HEAD(&hdev->pend_le_conns);
3038 INIT_LIST_HEAD(&hdev->pend_le_reports);
3039 INIT_LIST_HEAD(&hdev->conn_hash.list);
3041 INIT_WORK(&hdev->rx_work, hci_rx_work);
3042 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3043 INIT_WORK(&hdev->tx_work, hci_tx_work);
3044 INIT_WORK(&hdev->power_on, hci_power_on);
3045 INIT_WORK(&hdev->error_reset, hci_error_reset);
3047 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3048 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3049 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3050 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3052 skb_queue_head_init(&hdev->rx_q);
3053 skb_queue_head_init(&hdev->cmd_q);
3054 skb_queue_head_init(&hdev->raw_q);
3056 init_waitqueue_head(&hdev->req_wait_q);
3058 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3060 hci_init_sysfs(hdev);
3061 discovery_init(hdev);
3063 return hdev;
3065 EXPORT_SYMBOL(hci_alloc_dev);
3067 /* Free HCI device */
3068 void hci_free_dev(struct hci_dev *hdev)
3070 /* will free via device release */
3071 put_device(&hdev->dev);
3073 EXPORT_SYMBOL(hci_free_dev);
3075 /* Register HCI device */
3076 int hci_register_dev(struct hci_dev *hdev)
3078 int id, error;
3080 if (!hdev->open || !hdev->close || !hdev->send)
3081 return -EINVAL;
3083 /* Do not allow HCI_AMP devices to register at index 0,
3084 * so the index can be used as the AMP controller ID.
3086 switch (hdev->dev_type) {
3087 case HCI_BREDR:
3088 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3089 break;
3090 case HCI_AMP:
3091 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3092 break;
3093 default:
3094 return -EINVAL;
3097 if (id < 0)
3098 return id;
3100 sprintf(hdev->name, "hci%d", id);
3101 hdev->id = id;
3103 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3105 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3106 WQ_MEM_RECLAIM, 1, hdev->name);
3107 if (!hdev->workqueue) {
3108 error = -ENOMEM;
3109 goto err;
3112 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3113 WQ_MEM_RECLAIM, 1, hdev->name);
3114 if (!hdev->req_workqueue) {
3115 destroy_workqueue(hdev->workqueue);
3116 error = -ENOMEM;
3117 goto err;
3120 if (!IS_ERR_OR_NULL(bt_debugfs))
3121 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3123 dev_set_name(&hdev->dev, "%s", hdev->name);
3125 error = device_add(&hdev->dev);
3126 if (error < 0)
3127 goto err_wqueue;
3129 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3130 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3131 hdev);
3132 if (hdev->rfkill) {
3133 if (rfkill_register(hdev->rfkill) < 0) {
3134 rfkill_destroy(hdev->rfkill);
3135 hdev->rfkill = NULL;
3139 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3140 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3142 set_bit(HCI_SETUP, &hdev->dev_flags);
3143 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3145 if (hdev->dev_type == HCI_BREDR) {
3146 /* Assume BR/EDR support until proven otherwise (such as
3147 * through reading supported features during init.
3149 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3152 write_lock(&hci_dev_list_lock);
3153 list_add(&hdev->list, &hci_dev_list);
3154 write_unlock(&hci_dev_list_lock);
3156 /* Devices that are marked for raw-only usage are unconfigured
3157 * and should not be included in normal operation.
3159 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3160 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3162 hci_notify(hdev, HCI_DEV_REG);
3163 hci_dev_hold(hdev);
3165 queue_work(hdev->req_workqueue, &hdev->power_on);
3167 return id;
3169 err_wqueue:
3170 destroy_workqueue(hdev->workqueue);
3171 destroy_workqueue(hdev->req_workqueue);
3172 err:
3173 ida_simple_remove(&hci_index_ida, hdev->id);
3175 return error;
3177 EXPORT_SYMBOL(hci_register_dev);
3179 /* Unregister HCI device */
3180 void hci_unregister_dev(struct hci_dev *hdev)
3182 int i, id;
3184 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3186 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3188 id = hdev->id;
3190 write_lock(&hci_dev_list_lock);
3191 list_del(&hdev->list);
3192 write_unlock(&hci_dev_list_lock);
3194 hci_dev_do_close(hdev);
3196 for (i = 0; i < NUM_REASSEMBLY; i++)
3197 kfree_skb(hdev->reassembly[i]);
3199 cancel_work_sync(&hdev->power_on);
3201 if (!test_bit(HCI_INIT, &hdev->flags) &&
3202 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3203 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
3204 hci_dev_lock(hdev);
3205 mgmt_index_removed(hdev);
3206 hci_dev_unlock(hdev);
3209 /* mgmt_index_removed should take care of emptying the
3210 * pending list */
3211 BUG_ON(!list_empty(&hdev->mgmt_pending));
3213 hci_notify(hdev, HCI_DEV_UNREG);
3215 if (hdev->rfkill) {
3216 rfkill_unregister(hdev->rfkill);
3217 rfkill_destroy(hdev->rfkill);
3220 device_del(&hdev->dev);
3222 debugfs_remove_recursive(hdev->debugfs);
3224 destroy_workqueue(hdev->workqueue);
3225 destroy_workqueue(hdev->req_workqueue);
3227 hci_dev_lock(hdev);
3228 hci_bdaddr_list_clear(&hdev->blacklist);
3229 hci_bdaddr_list_clear(&hdev->whitelist);
3230 hci_uuids_clear(hdev);
3231 hci_link_keys_clear(hdev);
3232 hci_smp_ltks_clear(hdev);
3233 hci_smp_irks_clear(hdev);
3234 hci_remote_oob_data_clear(hdev);
3235 hci_bdaddr_list_clear(&hdev->le_white_list);
3236 hci_conn_params_clear_all(hdev);
3237 hci_discovery_filter_clear(hdev);
3238 hci_dev_unlock(hdev);
3240 hci_dev_put(hdev);
3242 ida_simple_remove(&hci_index_ida, id);
3244 EXPORT_SYMBOL(hci_unregister_dev);
3246 /* Suspend HCI device */
3247 int hci_suspend_dev(struct hci_dev *hdev)
3249 hci_notify(hdev, HCI_DEV_SUSPEND);
3250 return 0;
3252 EXPORT_SYMBOL(hci_suspend_dev);
3254 /* Resume HCI device */
3255 int hci_resume_dev(struct hci_dev *hdev)
3257 hci_notify(hdev, HCI_DEV_RESUME);
3258 return 0;
3260 EXPORT_SYMBOL(hci_resume_dev);
3262 /* Reset HCI device */
3263 int hci_reset_dev(struct hci_dev *hdev)
3265 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3266 struct sk_buff *skb;
3268 skb = bt_skb_alloc(3, GFP_ATOMIC);
3269 if (!skb)
3270 return -ENOMEM;
3272 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3273 memcpy(skb_put(skb, 3), hw_err, 3);
3275 /* Send Hardware Error to upper stack */
3276 return hci_recv_frame(hdev, skb);
3278 EXPORT_SYMBOL(hci_reset_dev);
3280 /* Receive frame from HCI drivers */
3281 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3283 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3284 && !test_bit(HCI_INIT, &hdev->flags))) {
3285 kfree_skb(skb);
3286 return -ENXIO;
3289 /* Incoming skb */
3290 bt_cb(skb)->incoming = 1;
3292 /* Time stamp */
3293 __net_timestamp(skb);
3295 skb_queue_tail(&hdev->rx_q, skb);
3296 queue_work(hdev->workqueue, &hdev->rx_work);
3298 return 0;
3300 EXPORT_SYMBOL(hci_recv_frame);
3302 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3303 int count, __u8 index)
3305 int len = 0;
3306 int hlen = 0;
3307 int remain = count;
3308 struct sk_buff *skb;
3309 struct bt_skb_cb *scb;
3311 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3312 index >= NUM_REASSEMBLY)
3313 return -EILSEQ;
3315 skb = hdev->reassembly[index];
3317 if (!skb) {
3318 switch (type) {
3319 case HCI_ACLDATA_PKT:
3320 len = HCI_MAX_FRAME_SIZE;
3321 hlen = HCI_ACL_HDR_SIZE;
3322 break;
3323 case HCI_EVENT_PKT:
3324 len = HCI_MAX_EVENT_SIZE;
3325 hlen = HCI_EVENT_HDR_SIZE;
3326 break;
3327 case HCI_SCODATA_PKT:
3328 len = HCI_MAX_SCO_SIZE;
3329 hlen = HCI_SCO_HDR_SIZE;
3330 break;
3333 skb = bt_skb_alloc(len, GFP_ATOMIC);
3334 if (!skb)
3335 return -ENOMEM;
3337 scb = (void *) skb->cb;
3338 scb->expect = hlen;
3339 scb->pkt_type = type;
3341 hdev->reassembly[index] = skb;
3344 while (count) {
3345 scb = (void *) skb->cb;
3346 len = min_t(uint, scb->expect, count);
3348 memcpy(skb_put(skb, len), data, len);
3350 count -= len;
3351 data += len;
3352 scb->expect -= len;
3353 remain = count;
3355 switch (type) {
3356 case HCI_EVENT_PKT:
3357 if (skb->len == HCI_EVENT_HDR_SIZE) {
3358 struct hci_event_hdr *h = hci_event_hdr(skb);
3359 scb->expect = h->plen;
3361 if (skb_tailroom(skb) < scb->expect) {
3362 kfree_skb(skb);
3363 hdev->reassembly[index] = NULL;
3364 return -ENOMEM;
3367 break;
3369 case HCI_ACLDATA_PKT:
3370 if (skb->len == HCI_ACL_HDR_SIZE) {
3371 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3372 scb->expect = __le16_to_cpu(h->dlen);
3374 if (skb_tailroom(skb) < scb->expect) {
3375 kfree_skb(skb);
3376 hdev->reassembly[index] = NULL;
3377 return -ENOMEM;
3380 break;
3382 case HCI_SCODATA_PKT:
3383 if (skb->len == HCI_SCO_HDR_SIZE) {
3384 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3385 scb->expect = h->dlen;
3387 if (skb_tailroom(skb) < scb->expect) {
3388 kfree_skb(skb);
3389 hdev->reassembly[index] = NULL;
3390 return -ENOMEM;
3393 break;
3396 if (scb->expect == 0) {
3397 /* Complete frame */
3399 bt_cb(skb)->pkt_type = type;
3400 hci_recv_frame(hdev, skb);
3402 hdev->reassembly[index] = NULL;
3403 return remain;
3407 return remain;
3410 #define STREAM_REASSEMBLY 0
3412 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3414 int type;
3415 int rem = 0;
3417 while (count) {
3418 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3420 if (!skb) {
3421 struct { char type; } *pkt;
3423 /* Start of the frame */
3424 pkt = data;
3425 type = pkt->type;
3427 data++;
3428 count--;
3429 } else
3430 type = bt_cb(skb)->pkt_type;
3432 rem = hci_reassembly(hdev, type, data, count,
3433 STREAM_REASSEMBLY);
3434 if (rem < 0)
3435 return rem;
3437 data += (count - rem);
3438 count = rem;
3441 return rem;
3443 EXPORT_SYMBOL(hci_recv_stream_fragment);
3445 /* ---- Interface to upper protocols ---- */
3447 int hci_register_cb(struct hci_cb *cb)
3449 BT_DBG("%p name %s", cb, cb->name);
3451 write_lock(&hci_cb_list_lock);
3452 list_add(&cb->list, &hci_cb_list);
3453 write_unlock(&hci_cb_list_lock);
3455 return 0;
3457 EXPORT_SYMBOL(hci_register_cb);
3459 int hci_unregister_cb(struct hci_cb *cb)
3461 BT_DBG("%p name %s", cb, cb->name);
3463 write_lock(&hci_cb_list_lock);
3464 list_del(&cb->list);
3465 write_unlock(&hci_cb_list_lock);
3467 return 0;
3469 EXPORT_SYMBOL(hci_unregister_cb);
3471 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3473 int err;
3475 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3477 /* Time stamp */
3478 __net_timestamp(skb);
3480 /* Send copy to monitor */
3481 hci_send_to_monitor(hdev, skb);
3483 if (atomic_read(&hdev->promisc)) {
3484 /* Send copy to the sockets */
3485 hci_send_to_sock(hdev, skb);
3488 /* Get rid of skb owner, prior to sending to the driver. */
3489 skb_orphan(skb);
3491 err = hdev->send(hdev, skb);
3492 if (err < 0) {
3493 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3494 kfree_skb(skb);
3498 bool hci_req_pending(struct hci_dev *hdev)
3500 return (hdev->req_status == HCI_REQ_PEND);
3503 /* Send HCI command */
3504 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3505 const void *param)
3507 struct sk_buff *skb;
3509 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3511 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3512 if (!skb) {
3513 BT_ERR("%s no memory for command", hdev->name);
3514 return -ENOMEM;
3517 /* Stand-alone HCI commands must be flagged as
3518 * single-command requests.
3520 bt_cb(skb)->req.start = true;
3522 skb_queue_tail(&hdev->cmd_q, skb);
3523 queue_work(hdev->workqueue, &hdev->cmd_work);
3525 return 0;
3528 /* Get data from the previously sent command */
3529 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3531 struct hci_command_hdr *hdr;
3533 if (!hdev->sent_cmd)
3534 return NULL;
3536 hdr = (void *) hdev->sent_cmd->data;
3538 if (hdr->opcode != cpu_to_le16(opcode))
3539 return NULL;
3541 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3543 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3546 /* Send ACL data */
3547 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3549 struct hci_acl_hdr *hdr;
3550 int len = skb->len;
3552 skb_push(skb, HCI_ACL_HDR_SIZE);
3553 skb_reset_transport_header(skb);
3554 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3555 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3556 hdr->dlen = cpu_to_le16(len);
3559 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3560 struct sk_buff *skb, __u16 flags)
3562 struct hci_conn *conn = chan->conn;
3563 struct hci_dev *hdev = conn->hdev;
3564 struct sk_buff *list;
3566 skb->len = skb_headlen(skb);
3567 skb->data_len = 0;
3569 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3571 switch (hdev->dev_type) {
3572 case HCI_BREDR:
3573 hci_add_acl_hdr(skb, conn->handle, flags);
3574 break;
3575 case HCI_AMP:
3576 hci_add_acl_hdr(skb, chan->handle, flags);
3577 break;
3578 default:
3579 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3580 return;
3583 list = skb_shinfo(skb)->frag_list;
3584 if (!list) {
3585 /* Non fragmented */
3586 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3588 skb_queue_tail(queue, skb);
3589 } else {
3590 /* Fragmented */
3591 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3593 skb_shinfo(skb)->frag_list = NULL;
3595 /* Queue all fragments atomically. We need to use spin_lock_bh
3596 * here because of 6LoWPAN links, as there this function is
3597 * called from softirq and using normal spin lock could cause
3598 * deadlocks.
3600 spin_lock_bh(&queue->lock);
3602 __skb_queue_tail(queue, skb);
3604 flags &= ~ACL_START;
3605 flags |= ACL_CONT;
3606 do {
3607 skb = list; list = list->next;
3609 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3610 hci_add_acl_hdr(skb, conn->handle, flags);
3612 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3614 __skb_queue_tail(queue, skb);
3615 } while (list);
3617 spin_unlock_bh(&queue->lock);
3621 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3623 struct hci_dev *hdev = chan->conn->hdev;
3625 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3627 hci_queue_acl(chan, &chan->data_q, skb, flags);
3629 queue_work(hdev->workqueue, &hdev->tx_work);
3632 /* Send SCO data */
3633 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3635 struct hci_dev *hdev = conn->hdev;
3636 struct hci_sco_hdr hdr;
3638 BT_DBG("%s len %d", hdev->name, skb->len);
3640 hdr.handle = cpu_to_le16(conn->handle);
3641 hdr.dlen = skb->len;
3643 skb_push(skb, HCI_SCO_HDR_SIZE);
3644 skb_reset_transport_header(skb);
3645 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3647 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3649 skb_queue_tail(&conn->data_q, skb);
3650 queue_work(hdev->workqueue, &hdev->tx_work);
3653 /* ---- HCI TX task (outgoing data) ---- */
3655 /* HCI Connection scheduler */
3656 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3657 int *quote)
3659 struct hci_conn_hash *h = &hdev->conn_hash;
3660 struct hci_conn *conn = NULL, *c;
3661 unsigned int num = 0, min = ~0;
3663 /* We don't have to lock device here. Connections are always
3664 * added and removed with TX task disabled. */
3666 rcu_read_lock();
3668 list_for_each_entry_rcu(c, &h->list, list) {
3669 if (c->type != type || skb_queue_empty(&c->data_q))
3670 continue;
3672 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3673 continue;
3675 num++;
3677 if (c->sent < min) {
3678 min = c->sent;
3679 conn = c;
3682 if (hci_conn_num(hdev, type) == num)
3683 break;
3686 rcu_read_unlock();
3688 if (conn) {
3689 int cnt, q;
3691 switch (conn->type) {
3692 case ACL_LINK:
3693 cnt = hdev->acl_cnt;
3694 break;
3695 case SCO_LINK:
3696 case ESCO_LINK:
3697 cnt = hdev->sco_cnt;
3698 break;
3699 case LE_LINK:
3700 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3701 break;
3702 default:
3703 cnt = 0;
3704 BT_ERR("Unknown link type");
3707 q = cnt / num;
3708 *quote = q ? q : 1;
3709 } else
3710 *quote = 0;
3712 BT_DBG("conn %p quote %d", conn, *quote);
3713 return conn;
3716 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3718 struct hci_conn_hash *h = &hdev->conn_hash;
3719 struct hci_conn *c;
3721 BT_ERR("%s link tx timeout", hdev->name);
3723 rcu_read_lock();
3725 /* Kill stalled connections */
3726 list_for_each_entry_rcu(c, &h->list, list) {
3727 if (c->type == type && c->sent) {
3728 BT_ERR("%s killing stalled connection %pMR",
3729 hdev->name, &c->dst);
3730 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3734 rcu_read_unlock();
3737 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3738 int *quote)
3740 struct hci_conn_hash *h = &hdev->conn_hash;
3741 struct hci_chan *chan = NULL;
3742 unsigned int num = 0, min = ~0, cur_prio = 0;
3743 struct hci_conn *conn;
3744 int cnt, q, conn_num = 0;
3746 BT_DBG("%s", hdev->name);
3748 rcu_read_lock();
3750 list_for_each_entry_rcu(conn, &h->list, list) {
3751 struct hci_chan *tmp;
3753 if (conn->type != type)
3754 continue;
3756 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3757 continue;
3759 conn_num++;
3761 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3762 struct sk_buff *skb;
3764 if (skb_queue_empty(&tmp->data_q))
3765 continue;
3767 skb = skb_peek(&tmp->data_q);
3768 if (skb->priority < cur_prio)
3769 continue;
3771 if (skb->priority > cur_prio) {
3772 num = 0;
3773 min = ~0;
3774 cur_prio = skb->priority;
3777 num++;
3779 if (conn->sent < min) {
3780 min = conn->sent;
3781 chan = tmp;
3785 if (hci_conn_num(hdev, type) == conn_num)
3786 break;
3789 rcu_read_unlock();
3791 if (!chan)
3792 return NULL;
3794 switch (chan->conn->type) {
3795 case ACL_LINK:
3796 cnt = hdev->acl_cnt;
3797 break;
3798 case AMP_LINK:
3799 cnt = hdev->block_cnt;
3800 break;
3801 case SCO_LINK:
3802 case ESCO_LINK:
3803 cnt = hdev->sco_cnt;
3804 break;
3805 case LE_LINK:
3806 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3807 break;
3808 default:
3809 cnt = 0;
3810 BT_ERR("Unknown link type");
3813 q = cnt / num;
3814 *quote = q ? q : 1;
3815 BT_DBG("chan %p quote %d", chan, *quote);
3816 return chan;
3819 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3821 struct hci_conn_hash *h = &hdev->conn_hash;
3822 struct hci_conn *conn;
3823 int num = 0;
3825 BT_DBG("%s", hdev->name);
3827 rcu_read_lock();
3829 list_for_each_entry_rcu(conn, &h->list, list) {
3830 struct hci_chan *chan;
3832 if (conn->type != type)
3833 continue;
3835 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3836 continue;
3838 num++;
3840 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3841 struct sk_buff *skb;
3843 if (chan->sent) {
3844 chan->sent = 0;
3845 continue;
3848 if (skb_queue_empty(&chan->data_q))
3849 continue;
3851 skb = skb_peek(&chan->data_q);
3852 if (skb->priority >= HCI_PRIO_MAX - 1)
3853 continue;
3855 skb->priority = HCI_PRIO_MAX - 1;
3857 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3858 skb->priority);
3861 if (hci_conn_num(hdev, type) == num)
3862 break;
3865 rcu_read_unlock();
3869 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3871 /* Calculate count of blocks used by this packet */
3872 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3875 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3877 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3878 /* ACL tx timeout must be longer than maximum
3879 * link supervision timeout (40.9 seconds) */
3880 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3881 HCI_ACL_TX_TIMEOUT))
3882 hci_link_tx_to(hdev, ACL_LINK);
3886 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3888 unsigned int cnt = hdev->acl_cnt;
3889 struct hci_chan *chan;
3890 struct sk_buff *skb;
3891 int quote;
3893 __check_timeout(hdev, cnt);
3895 while (hdev->acl_cnt &&
3896 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3897 u32 priority = (skb_peek(&chan->data_q))->priority;
3898 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3899 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3900 skb->len, skb->priority);
3902 /* Stop if priority has changed */
3903 if (skb->priority < priority)
3904 break;
3906 skb = skb_dequeue(&chan->data_q);
3908 hci_conn_enter_active_mode(chan->conn,
3909 bt_cb(skb)->force_active);
3911 hci_send_frame(hdev, skb);
3912 hdev->acl_last_tx = jiffies;
3914 hdev->acl_cnt--;
3915 chan->sent++;
3916 chan->conn->sent++;
3920 if (cnt != hdev->acl_cnt)
3921 hci_prio_recalculate(hdev, ACL_LINK);
3924 static void hci_sched_acl_blk(struct hci_dev *hdev)
3926 unsigned int cnt = hdev->block_cnt;
3927 struct hci_chan *chan;
3928 struct sk_buff *skb;
3929 int quote;
3930 u8 type;
3932 __check_timeout(hdev, cnt);
3934 BT_DBG("%s", hdev->name);
3936 if (hdev->dev_type == HCI_AMP)
3937 type = AMP_LINK;
3938 else
3939 type = ACL_LINK;
3941 while (hdev->block_cnt > 0 &&
3942 (chan = hci_chan_sent(hdev, type, &quote))) {
3943 u32 priority = (skb_peek(&chan->data_q))->priority;
3944 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3945 int blocks;
3947 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3948 skb->len, skb->priority);
3950 /* Stop if priority has changed */
3951 if (skb->priority < priority)
3952 break;
3954 skb = skb_dequeue(&chan->data_q);
3956 blocks = __get_blocks(hdev, skb);
3957 if (blocks > hdev->block_cnt)
3958 return;
3960 hci_conn_enter_active_mode(chan->conn,
3961 bt_cb(skb)->force_active);
3963 hci_send_frame(hdev, skb);
3964 hdev->acl_last_tx = jiffies;
3966 hdev->block_cnt -= blocks;
3967 quote -= blocks;
3969 chan->sent += blocks;
3970 chan->conn->sent += blocks;
3974 if (cnt != hdev->block_cnt)
3975 hci_prio_recalculate(hdev, type);
3978 static void hci_sched_acl(struct hci_dev *hdev)
3980 BT_DBG("%s", hdev->name);
3982 /* No ACL link over BR/EDR controller */
3983 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3984 return;
3986 /* No AMP link over AMP controller */
3987 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3988 return;
3990 switch (hdev->flow_ctl_mode) {
3991 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3992 hci_sched_acl_pkt(hdev);
3993 break;
3995 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3996 hci_sched_acl_blk(hdev);
3997 break;
4001 /* Schedule SCO */
4002 static void hci_sched_sco(struct hci_dev *hdev)
4004 struct hci_conn *conn;
4005 struct sk_buff *skb;
4006 int quote;
4008 BT_DBG("%s", hdev->name);
4010 if (!hci_conn_num(hdev, SCO_LINK))
4011 return;
4013 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4014 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4015 BT_DBG("skb %p len %d", skb, skb->len);
4016 hci_send_frame(hdev, skb);
4018 conn->sent++;
4019 if (conn->sent == ~0)
4020 conn->sent = 0;
4025 static void hci_sched_esco(struct hci_dev *hdev)
4027 struct hci_conn *conn;
4028 struct sk_buff *skb;
4029 int quote;
4031 BT_DBG("%s", hdev->name);
4033 if (!hci_conn_num(hdev, ESCO_LINK))
4034 return;
4036 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4037 &quote))) {
4038 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4039 BT_DBG("skb %p len %d", skb, skb->len);
4040 hci_send_frame(hdev, skb);
4042 conn->sent++;
4043 if (conn->sent == ~0)
4044 conn->sent = 0;
4049 static void hci_sched_le(struct hci_dev *hdev)
4051 struct hci_chan *chan;
4052 struct sk_buff *skb;
4053 int quote, cnt, tmp;
4055 BT_DBG("%s", hdev->name);
4057 if (!hci_conn_num(hdev, LE_LINK))
4058 return;
4060 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4061 /* LE tx timeout must be longer than maximum
4062 * link supervision timeout (40.9 seconds) */
4063 if (!hdev->le_cnt && hdev->le_pkts &&
4064 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4065 hci_link_tx_to(hdev, LE_LINK);
4068 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4069 tmp = cnt;
4070 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4071 u32 priority = (skb_peek(&chan->data_q))->priority;
4072 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4073 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4074 skb->len, skb->priority);
4076 /* Stop if priority has changed */
4077 if (skb->priority < priority)
4078 break;
4080 skb = skb_dequeue(&chan->data_q);
4082 hci_send_frame(hdev, skb);
4083 hdev->le_last_tx = jiffies;
4085 cnt--;
4086 chan->sent++;
4087 chan->conn->sent++;
4091 if (hdev->le_pkts)
4092 hdev->le_cnt = cnt;
4093 else
4094 hdev->acl_cnt = cnt;
4096 if (cnt != tmp)
4097 hci_prio_recalculate(hdev, LE_LINK);
4100 static void hci_tx_work(struct work_struct *work)
4102 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4103 struct sk_buff *skb;
4105 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4106 hdev->sco_cnt, hdev->le_cnt);
4108 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4109 /* Schedule queues and send stuff to HCI driver */
4110 hci_sched_acl(hdev);
4111 hci_sched_sco(hdev);
4112 hci_sched_esco(hdev);
4113 hci_sched_le(hdev);
4116 /* Send next queued raw (unknown type) packet */
4117 while ((skb = skb_dequeue(&hdev->raw_q)))
4118 hci_send_frame(hdev, skb);
4121 /* ----- HCI RX task (incoming data processing) ----- */
4123 /* ACL data packet */
4124 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4126 struct hci_acl_hdr *hdr = (void *) skb->data;
4127 struct hci_conn *conn;
4128 __u16 handle, flags;
4130 skb_pull(skb, HCI_ACL_HDR_SIZE);
4132 handle = __le16_to_cpu(hdr->handle);
4133 flags = hci_flags(handle);
4134 handle = hci_handle(handle);
4136 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4137 handle, flags);
4139 hdev->stat.acl_rx++;
4141 hci_dev_lock(hdev);
4142 conn = hci_conn_hash_lookup_handle(hdev, handle);
4143 hci_dev_unlock(hdev);
4145 if (conn) {
4146 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4148 /* Send to upper protocol */
4149 l2cap_recv_acldata(conn, skb, flags);
4150 return;
4151 } else {
4152 BT_ERR("%s ACL packet for unknown connection handle %d",
4153 hdev->name, handle);
4156 kfree_skb(skb);
4159 /* SCO data packet */
4160 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4162 struct hci_sco_hdr *hdr = (void *) skb->data;
4163 struct hci_conn *conn;
4164 __u16 handle;
4166 skb_pull(skb, HCI_SCO_HDR_SIZE);
4168 handle = __le16_to_cpu(hdr->handle);
4170 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4172 hdev->stat.sco_rx++;
4174 hci_dev_lock(hdev);
4175 conn = hci_conn_hash_lookup_handle(hdev, handle);
4176 hci_dev_unlock(hdev);
4178 if (conn) {
4179 /* Send to upper protocol */
4180 sco_recv_scodata(conn, skb);
4181 return;
4182 } else {
4183 BT_ERR("%s SCO packet for unknown connection handle %d",
4184 hdev->name, handle);
4187 kfree_skb(skb);
4190 static bool hci_req_is_complete(struct hci_dev *hdev)
4192 struct sk_buff *skb;
4194 skb = skb_peek(&hdev->cmd_q);
4195 if (!skb)
4196 return true;
4198 return bt_cb(skb)->req.start;
4201 static void hci_resend_last(struct hci_dev *hdev)
4203 struct hci_command_hdr *sent;
4204 struct sk_buff *skb;
4205 u16 opcode;
4207 if (!hdev->sent_cmd)
4208 return;
4210 sent = (void *) hdev->sent_cmd->data;
4211 opcode = __le16_to_cpu(sent->opcode);
4212 if (opcode == HCI_OP_RESET)
4213 return;
4215 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4216 if (!skb)
4217 return;
4219 skb_queue_head(&hdev->cmd_q, skb);
4220 queue_work(hdev->workqueue, &hdev->cmd_work);
4223 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4225 hci_req_complete_t req_complete = NULL;
4226 struct sk_buff *skb;
4227 unsigned long flags;
4229 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4231 /* If the completed command doesn't match the last one that was
4232 * sent we need to do special handling of it.
4234 if (!hci_sent_cmd_data(hdev, opcode)) {
4235 /* Some CSR based controllers generate a spontaneous
4236 * reset complete event during init and any pending
4237 * command will never be completed. In such a case we
4238 * need to resend whatever was the last sent
4239 * command.
4241 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4242 hci_resend_last(hdev);
4244 return;
4247 /* If the command succeeded and there's still more commands in
4248 * this request the request is not yet complete.
4250 if (!status && !hci_req_is_complete(hdev))
4251 return;
4253 /* If this was the last command in a request the complete
4254 * callback would be found in hdev->sent_cmd instead of the
4255 * command queue (hdev->cmd_q).
4257 if (hdev->sent_cmd) {
4258 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4260 if (req_complete) {
4261 /* We must set the complete callback to NULL to
4262 * avoid calling the callback more than once if
4263 * this function gets called again.
4265 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4267 goto call_complete;
4271 /* Remove all pending commands belonging to this request */
4272 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4273 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4274 if (bt_cb(skb)->req.start) {
4275 __skb_queue_head(&hdev->cmd_q, skb);
4276 break;
4279 req_complete = bt_cb(skb)->req.complete;
4280 kfree_skb(skb);
4282 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4284 call_complete:
4285 if (req_complete)
4286 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
4289 static void hci_rx_work(struct work_struct *work)
4291 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4292 struct sk_buff *skb;
4294 BT_DBG("%s", hdev->name);
4296 while ((skb = skb_dequeue(&hdev->rx_q))) {
4297 /* Send copy to monitor */
4298 hci_send_to_monitor(hdev, skb);
4300 if (atomic_read(&hdev->promisc)) {
4301 /* Send copy to the sockets */
4302 hci_send_to_sock(hdev, skb);
4305 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4306 kfree_skb(skb);
4307 continue;
4310 if (test_bit(HCI_INIT, &hdev->flags)) {
4311 /* Don't process data packets in this states. */
4312 switch (bt_cb(skb)->pkt_type) {
4313 case HCI_ACLDATA_PKT:
4314 case HCI_SCODATA_PKT:
4315 kfree_skb(skb);
4316 continue;
4320 /* Process frame */
4321 switch (bt_cb(skb)->pkt_type) {
4322 case HCI_EVENT_PKT:
4323 BT_DBG("%s Event packet", hdev->name);
4324 hci_event_packet(hdev, skb);
4325 break;
4327 case HCI_ACLDATA_PKT:
4328 BT_DBG("%s ACL data packet", hdev->name);
4329 hci_acldata_packet(hdev, skb);
4330 break;
4332 case HCI_SCODATA_PKT:
4333 BT_DBG("%s SCO data packet", hdev->name);
4334 hci_scodata_packet(hdev, skb);
4335 break;
4337 default:
4338 kfree_skb(skb);
4339 break;
4344 static void hci_cmd_work(struct work_struct *work)
4346 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4347 struct sk_buff *skb;
4349 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4350 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4352 /* Send queued commands */
4353 if (atomic_read(&hdev->cmd_cnt)) {
4354 skb = skb_dequeue(&hdev->cmd_q);
4355 if (!skb)
4356 return;
4358 kfree_skb(hdev->sent_cmd);
4360 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4361 if (hdev->sent_cmd) {
4362 atomic_dec(&hdev->cmd_cnt);
4363 hci_send_frame(hdev, skb);
4364 if (test_bit(HCI_RESET, &hdev->flags))
4365 cancel_delayed_work(&hdev->cmd_timer);
4366 else
4367 schedule_delayed_work(&hdev->cmd_timer,
4368 HCI_CMD_TIMEOUT);
4369 } else {
4370 skb_queue_head(&hdev->cmd_q, skb);
4371 queue_work(hdev->workqueue, &hdev->cmd_work);