Merge branch 'doc-tweaks' into docs-next
[linux-2.6/btrfs-unstable.git] / net / bluetooth / hci_request.c
blobe2288421fe6b79775d1bb3ddde69341782298cc7
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <net/bluetooth/bluetooth.h>
25 #include <net/bluetooth/hci_core.h>
26 #include <net/bluetooth/mgmt.h>
28 #include "smp.h"
29 #include "hci_request.h"
31 #define HCI_REQ_DONE 0
32 #define HCI_REQ_PEND 1
33 #define HCI_REQ_CANCELED 2
35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
37 skb_queue_head_init(&req->cmd_q);
38 req->hdev = hdev;
39 req->err = 0;
42 static int req_run(struct hci_request *req, hci_req_complete_t complete,
43 hci_req_complete_skb_t complete_skb)
45 struct hci_dev *hdev = req->hdev;
46 struct sk_buff *skb;
47 unsigned long flags;
49 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
51 /* If an error occurred during request building, remove all HCI
52 * commands queued on the HCI request queue.
54 if (req->err) {
55 skb_queue_purge(&req->cmd_q);
56 return req->err;
59 /* Do not allow empty requests */
60 if (skb_queue_empty(&req->cmd_q))
61 return -ENODATA;
63 skb = skb_peek_tail(&req->cmd_q);
64 if (complete) {
65 bt_cb(skb)->hci.req_complete = complete;
66 } else if (complete_skb) {
67 bt_cb(skb)->hci.req_complete_skb = complete_skb;
68 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
72 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
73 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
75 queue_work(hdev->workqueue, &hdev->cmd_work);
77 return 0;
80 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
82 return req_run(req, complete, NULL);
85 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
87 return req_run(req, NULL, complete);
90 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
91 struct sk_buff *skb)
93 BT_DBG("%s result 0x%2.2x", hdev->name, result);
95 if (hdev->req_status == HCI_REQ_PEND) {
96 hdev->req_result = result;
97 hdev->req_status = HCI_REQ_DONE;
98 if (skb)
99 hdev->req_skb = skb_get(skb);
100 wake_up_interruptible(&hdev->req_wait_q);
104 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
106 BT_DBG("%s err 0x%2.2x", hdev->name, err);
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = err;
110 hdev->req_status = HCI_REQ_CANCELED;
111 wake_up_interruptible(&hdev->req_wait_q);
115 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
116 const void *param, u8 event, u32 timeout)
118 DECLARE_WAITQUEUE(wait, current);
119 struct hci_request req;
120 struct sk_buff *skb;
121 int err = 0;
123 BT_DBG("%s", hdev->name);
125 hci_req_init(&req, hdev);
127 hci_req_add_ev(&req, opcode, plen, param, event);
129 hdev->req_status = HCI_REQ_PEND;
131 add_wait_queue(&hdev->req_wait_q, &wait);
132 set_current_state(TASK_INTERRUPTIBLE);
134 err = hci_req_run_skb(&req, hci_req_sync_complete);
135 if (err < 0) {
136 remove_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_RUNNING);
138 return ERR_PTR(err);
141 schedule_timeout(timeout);
143 remove_wait_queue(&hdev->req_wait_q, &wait);
145 if (signal_pending(current))
146 return ERR_PTR(-EINTR);
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
150 err = -bt_to_errno(hdev->req_result);
151 break;
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
157 default:
158 err = -ETIMEDOUT;
159 break;
162 hdev->req_status = hdev->req_result = 0;
163 skb = hdev->req_skb;
164 hdev->req_skb = NULL;
166 BT_DBG("%s end: err %d", hdev->name, err);
168 if (err < 0) {
169 kfree_skb(skb);
170 return ERR_PTR(err);
173 if (!skb)
174 return ERR_PTR(-ENODATA);
176 return skb;
178 EXPORT_SYMBOL(__hci_cmd_sync_ev);
180 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
181 const void *param, u32 timeout)
183 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
185 EXPORT_SYMBOL(__hci_cmd_sync);
187 /* Execute request and wait for completion. */
188 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
189 unsigned long opt),
190 unsigned long opt, u32 timeout, u8 *hci_status)
192 struct hci_request req;
193 DECLARE_WAITQUEUE(wait, current);
194 int err = 0;
196 BT_DBG("%s start", hdev->name);
198 hci_req_init(&req, hdev);
200 hdev->req_status = HCI_REQ_PEND;
202 err = func(&req, opt);
203 if (err) {
204 if (hci_status)
205 *hci_status = HCI_ERROR_UNSPECIFIED;
206 return err;
209 add_wait_queue(&hdev->req_wait_q, &wait);
210 set_current_state(TASK_INTERRUPTIBLE);
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
216 remove_wait_queue(&hdev->req_wait_q, &wait);
217 set_current_state(TASK_RUNNING);
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
224 if (err == -ENODATA) {
225 if (hci_status)
226 *hci_status = 0;
227 return 0;
230 if (hci_status)
231 *hci_status = HCI_ERROR_UNSPECIFIED;
233 return err;
236 schedule_timeout(timeout);
238 remove_wait_queue(&hdev->req_wait_q, &wait);
240 if (signal_pending(current))
241 return -EINTR;
243 switch (hdev->req_status) {
244 case HCI_REQ_DONE:
245 err = -bt_to_errno(hdev->req_result);
246 if (hci_status)
247 *hci_status = hdev->req_result;
248 break;
250 case HCI_REQ_CANCELED:
251 err = -hdev->req_result;
252 if (hci_status)
253 *hci_status = HCI_ERROR_UNSPECIFIED;
254 break;
256 default:
257 err = -ETIMEDOUT;
258 if (hci_status)
259 *hci_status = HCI_ERROR_UNSPECIFIED;
260 break;
263 kfree_skb(hdev->req_skb);
264 hdev->req_skb = NULL;
265 hdev->req_status = hdev->req_result = 0;
267 BT_DBG("%s end: err %d", hdev->name, err);
269 return err;
272 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273 unsigned long opt),
274 unsigned long opt, u32 timeout, u8 *hci_status)
276 int ret;
278 if (!test_bit(HCI_UP, &hdev->flags))
279 return -ENETDOWN;
281 /* Serialize all requests */
282 hci_req_sync_lock(hdev);
283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284 hci_req_sync_unlock(hdev);
286 return ret;
289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
300 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
304 if (plen)
305 memcpy(skb_put(skb, plen), param, plen);
307 BT_DBG("skb len %d", skb->len);
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
312 return skb;
315 /* Queue a command to an asynchronous HCI request */
316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
327 if (req->err)
328 return;
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333 hdev->name, opcode);
334 req->err = -ENOMEM;
335 return;
338 if (skb_queue_empty(&req->cmd_q))
339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
341 bt_cb(skb)->hci.req_event = event;
343 skb_queue_tail(&req->cmd_q, skb);
346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
349 hci_req_add_ev(req, opcode, plen, param, 0);
352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
356 u8 type;
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 return;
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 return;
364 if (enable) {
365 type = PAGE_SCAN_TYPE_INTERLACED;
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
369 } else {
370 type = PAGE_SCAN_TYPE_STANDARD; /* default */
372 /* default 1.28 sec page scan */
373 acp.interval = cpu_to_le16(0x0800);
376 acp.window = cpu_to_le16(0x0012);
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 sizeof(acp), &acp);
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
387 /* This function controls the background scanning based on hdev->pend_le_conns
388 * list. If there are pending LE connection we start the background scanning,
389 * otherwise we stop it.
391 * This function requires the caller holds hdev->lock.
393 static void __hci_update_background_scan(struct hci_request *req)
395 struct hci_dev *hdev = req->hdev;
397 if (!test_bit(HCI_UP, &hdev->flags) ||
398 test_bit(HCI_INIT, &hdev->flags) ||
399 hci_dev_test_flag(hdev, HCI_SETUP) ||
400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 hci_dev_test_flag(hdev, HCI_UNREGISTER))
403 return;
405 /* No point in doing scanning if LE support hasn't been enabled */
406 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407 return;
409 /* If discovery is active don't interfere with it */
410 if (hdev->discovery.state != DISCOVERY_STOPPED)
411 return;
413 /* Reset RSSI and UUID filters when starting background scanning
414 * since these filters are meant for service discovery only.
416 * The Start Discovery and Start Service Discovery operations
417 * ensure to set proper values for RSSI threshold and UUID
418 * filter list. So it is safe to just reset them here.
420 hci_discovery_filter_clear(hdev);
422 if (list_empty(&hdev->pend_le_conns) &&
423 list_empty(&hdev->pend_le_reports)) {
424 /* If there is no pending LE connections or devices
425 * to be scanned for, we should stop the background
426 * scanning.
429 /* If controller is not scanning we are done. */
430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431 return;
433 hci_req_add_le_scan_disable(req);
435 BT_DBG("%s stopping background scanning", hdev->name);
436 } else {
437 /* If there is at least one pending LE connection, we should
438 * keep the background scan running.
441 /* If controller is connecting, we should not start scanning
442 * since some controllers are not able to scan and connect at
443 * the same time.
445 if (hci_lookup_le_connect(hdev))
446 return;
448 /* If controller is currently scanning, we stop it to ensure we
449 * don't miss any advertising (due to duplicates filter).
451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 hci_req_add_le_scan_disable(req);
454 hci_req_add_le_passive_scan(req);
456 BT_DBG("%s starting background scanning", hdev->name);
460 void __hci_req_update_name(struct hci_request *req)
462 struct hci_dev *hdev = req->hdev;
463 struct hci_cp_write_local_name cp;
465 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
467 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470 #define PNP_INFO_SVCLASS_ID 0x1200
472 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
474 u8 *ptr = data, *uuids_start = NULL;
475 struct bt_uuid *uuid;
477 if (len < 4)
478 return ptr;
480 list_for_each_entry(uuid, &hdev->uuids, list) {
481 u16 uuid16;
483 if (uuid->size != 16)
484 continue;
486 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487 if (uuid16 < 0x1100)
488 continue;
490 if (uuid16 == PNP_INFO_SVCLASS_ID)
491 continue;
493 if (!uuids_start) {
494 uuids_start = ptr;
495 uuids_start[0] = 1;
496 uuids_start[1] = EIR_UUID16_ALL;
497 ptr += 2;
500 /* Stop if not enough space to put next UUID */
501 if ((ptr - data) + sizeof(u16) > len) {
502 uuids_start[1] = EIR_UUID16_SOME;
503 break;
506 *ptr++ = (uuid16 & 0x00ff);
507 *ptr++ = (uuid16 & 0xff00) >> 8;
508 uuids_start[0] += sizeof(uuid16);
511 return ptr;
514 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
519 if (len < 6)
520 return ptr;
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 32)
524 continue;
526 if (!uuids_start) {
527 uuids_start = ptr;
528 uuids_start[0] = 1;
529 uuids_start[1] = EIR_UUID32_ALL;
530 ptr += 2;
533 /* Stop if not enough space to put next UUID */
534 if ((ptr - data) + sizeof(u32) > len) {
535 uuids_start[1] = EIR_UUID32_SOME;
536 break;
539 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540 ptr += sizeof(u32);
541 uuids_start[0] += sizeof(u32);
544 return ptr;
547 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
549 u8 *ptr = data, *uuids_start = NULL;
550 struct bt_uuid *uuid;
552 if (len < 18)
553 return ptr;
555 list_for_each_entry(uuid, &hdev->uuids, list) {
556 if (uuid->size != 128)
557 continue;
559 if (!uuids_start) {
560 uuids_start = ptr;
561 uuids_start[0] = 1;
562 uuids_start[1] = EIR_UUID128_ALL;
563 ptr += 2;
566 /* Stop if not enough space to put next UUID */
567 if ((ptr - data) + 16 > len) {
568 uuids_start[1] = EIR_UUID128_SOME;
569 break;
572 memcpy(ptr, uuid->uuid, 16);
573 ptr += 16;
574 uuids_start[0] += 16;
577 return ptr;
580 static void create_eir(struct hci_dev *hdev, u8 *data)
582 u8 *ptr = data;
583 size_t name_len;
585 name_len = strlen(hdev->dev_name);
587 if (name_len > 0) {
588 /* EIR Data type */
589 if (name_len > 48) {
590 name_len = 48;
591 ptr[1] = EIR_NAME_SHORT;
592 } else
593 ptr[1] = EIR_NAME_COMPLETE;
595 /* EIR Data length */
596 ptr[0] = name_len + 1;
598 memcpy(ptr + 2, hdev->dev_name, name_len);
600 ptr += (name_len + 2);
603 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604 ptr[0] = 2;
605 ptr[1] = EIR_TX_POWER;
606 ptr[2] = (u8) hdev->inq_tx_power;
608 ptr += 3;
611 if (hdev->devid_source > 0) {
612 ptr[0] = 9;
613 ptr[1] = EIR_DEVICE_ID;
615 put_unaligned_le16(hdev->devid_source, ptr + 2);
616 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 put_unaligned_le16(hdev->devid_product, ptr + 6);
618 put_unaligned_le16(hdev->devid_version, ptr + 8);
620 ptr += 10;
623 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 void __hci_req_update_eir(struct hci_request *req)
630 struct hci_dev *hdev = req->hdev;
631 struct hci_cp_write_eir cp;
633 if (!hdev_is_powered(hdev))
634 return;
636 if (!lmp_ext_inq_capable(hdev))
637 return;
639 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640 return;
642 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643 return;
645 memset(&cp, 0, sizeof(cp));
647 create_eir(hdev, cp.data);
649 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650 return;
652 memcpy(hdev->eir, cp.data, sizeof(cp.data));
654 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657 void hci_req_add_le_scan_disable(struct hci_request *req)
659 struct hci_cp_le_set_scan_enable cp;
661 memset(&cp, 0, sizeof(cp));
662 cp.enable = LE_SCAN_DISABLE;
663 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
666 static void add_to_white_list(struct hci_request *req,
667 struct hci_conn_params *params)
669 struct hci_cp_le_add_to_white_list cp;
671 cp.bdaddr_type = params->addr_type;
672 bacpy(&cp.bdaddr, &params->addr);
674 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
677 static u8 update_white_list(struct hci_request *req)
679 struct hci_dev *hdev = req->hdev;
680 struct hci_conn_params *params;
681 struct bdaddr_list *b;
682 uint8_t white_list_entries = 0;
684 /* Go through the current white list programmed into the
685 * controller one by one and check if that address is still
686 * in the list of pending connections or list of devices to
687 * report. If not present in either list, then queue the
688 * command to remove it from the controller.
690 list_for_each_entry(b, &hdev->le_white_list, list) {
691 /* If the device is neither in pend_le_conns nor
692 * pend_le_reports then remove it from the whitelist.
694 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
695 &b->bdaddr, b->bdaddr_type) &&
696 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
697 &b->bdaddr, b->bdaddr_type)) {
698 struct hci_cp_le_del_from_white_list cp;
700 cp.bdaddr_type = b->bdaddr_type;
701 bacpy(&cp.bdaddr, &b->bdaddr);
703 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
704 sizeof(cp), &cp);
705 continue;
708 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
709 /* White list can not be used with RPAs */
710 return 0x00;
713 white_list_entries++;
716 /* Since all no longer valid white list entries have been
717 * removed, walk through the list of pending connections
718 * and ensure that any new device gets programmed into
719 * the controller.
721 * If the list of the devices is larger than the list of
722 * available white list entries in the controller, then
723 * just abort and return filer policy value to not use the
724 * white list.
726 list_for_each_entry(params, &hdev->pend_le_conns, action) {
727 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
728 &params->addr, params->addr_type))
729 continue;
731 if (white_list_entries >= hdev->le_white_list_size) {
732 /* Select filter policy to accept all advertising */
733 return 0x00;
736 if (hci_find_irk_by_addr(hdev, &params->addr,
737 params->addr_type)) {
738 /* White list can not be used with RPAs */
739 return 0x00;
742 white_list_entries++;
743 add_to_white_list(req, params);
746 /* After adding all new pending connections, walk through
747 * the list of pending reports and also add these to the
748 * white list if there is still space.
750 list_for_each_entry(params, &hdev->pend_le_reports, action) {
751 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
752 &params->addr, params->addr_type))
753 continue;
755 if (white_list_entries >= hdev->le_white_list_size) {
756 /* Select filter policy to accept all advertising */
757 return 0x00;
760 if (hci_find_irk_by_addr(hdev, &params->addr,
761 params->addr_type)) {
762 /* White list can not be used with RPAs */
763 return 0x00;
766 white_list_entries++;
767 add_to_white_list(req, params);
770 /* Select filter policy to use white list */
771 return 0x01;
774 static bool scan_use_rpa(struct hci_dev *hdev)
776 return hci_dev_test_flag(hdev, HCI_PRIVACY);
779 void hci_req_add_le_passive_scan(struct hci_request *req)
781 struct hci_cp_le_set_scan_param param_cp;
782 struct hci_cp_le_set_scan_enable enable_cp;
783 struct hci_dev *hdev = req->hdev;
784 u8 own_addr_type;
785 u8 filter_policy;
787 /* Set require_privacy to false since no SCAN_REQ are send
788 * during passive scanning. Not using an non-resolvable address
789 * here is important so that peer devices using direct
790 * advertising with our address will be correctly reported
791 * by the controller.
793 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
794 &own_addr_type))
795 return;
797 /* Adding or removing entries from the white list must
798 * happen before enabling scanning. The controller does
799 * not allow white list modification while scanning.
801 filter_policy = update_white_list(req);
803 /* When the controller is using random resolvable addresses and
804 * with that having LE privacy enabled, then controllers with
805 * Extended Scanner Filter Policies support can now enable support
806 * for handling directed advertising.
808 * So instead of using filter polices 0x00 (no whitelist)
809 * and 0x01 (whitelist enabled) use the new filter policies
810 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
812 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
813 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
814 filter_policy |= 0x02;
816 memset(&param_cp, 0, sizeof(param_cp));
817 param_cp.type = LE_SCAN_PASSIVE;
818 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
819 param_cp.window = cpu_to_le16(hdev->le_scan_window);
820 param_cp.own_address_type = own_addr_type;
821 param_cp.filter_policy = filter_policy;
822 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
823 &param_cp);
825 memset(&enable_cp, 0, sizeof(enable_cp));
826 enable_cp.enable = LE_SCAN_ENABLE;
827 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
828 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
829 &enable_cp);
832 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
834 u8 instance = hdev->cur_adv_instance;
835 struct adv_info *adv_instance;
837 /* Ignore instance 0 */
838 if (instance == 0x00)
839 return 0;
841 adv_instance = hci_find_adv_instance(hdev, instance);
842 if (!adv_instance)
843 return 0;
845 /* TODO: Take into account the "appearance" and "local-name" flags here.
846 * These are currently being ignored as they are not supported.
848 return adv_instance->scan_rsp_len;
851 void __hci_req_disable_advertising(struct hci_request *req)
853 u8 enable = 0x00;
855 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
858 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
860 u32 flags;
861 struct adv_info *adv_instance;
863 if (instance == 0x00) {
864 /* Instance 0 always manages the "Tx Power" and "Flags"
865 * fields
867 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
869 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
870 * corresponds to the "connectable" instance flag.
872 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
873 flags |= MGMT_ADV_FLAG_CONNECTABLE;
875 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
876 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
877 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
878 flags |= MGMT_ADV_FLAG_DISCOV;
880 return flags;
883 adv_instance = hci_find_adv_instance(hdev, instance);
885 /* Return 0 when we got an invalid instance identifier. */
886 if (!adv_instance)
887 return 0;
889 return adv_instance->flags;
892 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
894 /* If privacy is not enabled don't use RPA */
895 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
896 return false;
898 /* If basic privacy mode is enabled use RPA */
899 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
900 return true;
902 /* If limited privacy mode is enabled don't use RPA if we're
903 * both discoverable and bondable.
905 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
906 hci_dev_test_flag(hdev, HCI_BONDABLE))
907 return false;
909 /* We're neither bondable nor discoverable in the limited
910 * privacy mode, therefore use RPA.
912 return true;
915 void __hci_req_enable_advertising(struct hci_request *req)
917 struct hci_dev *hdev = req->hdev;
918 struct hci_cp_le_set_adv_param cp;
919 u8 own_addr_type, enable = 0x01;
920 bool connectable;
921 u32 flags;
923 if (hci_conn_num(hdev, LE_LINK) > 0)
924 return;
926 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
927 __hci_req_disable_advertising(req);
929 /* Clear the HCI_LE_ADV bit temporarily so that the
930 * hci_update_random_address knows that it's safe to go ahead
931 * and write a new random address. The flag will be set back on
932 * as soon as the SET_ADV_ENABLE HCI command completes.
934 hci_dev_clear_flag(hdev, HCI_LE_ADV);
936 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
938 /* If the "connectable" instance flag was not set, then choose between
939 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
941 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
942 mgmt_get_connectable(hdev);
944 /* Set require_privacy to true only when non-connectable
945 * advertising is used. In that case it is fine to use a
946 * non-resolvable private address.
948 if (hci_update_random_address(req, !connectable,
949 adv_use_rpa(hdev, flags),
950 &own_addr_type) < 0)
951 return;
953 memset(&cp, 0, sizeof(cp));
954 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
955 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
957 if (connectable)
958 cp.type = LE_ADV_IND;
959 else if (get_cur_adv_instance_scan_rsp_len(hdev))
960 cp.type = LE_ADV_SCAN_IND;
961 else
962 cp.type = LE_ADV_NONCONN_IND;
964 cp.own_address_type = own_addr_type;
965 cp.channel_map = hdev->le_adv_channel_map;
967 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
969 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
972 static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
974 size_t complete_len;
975 size_t short_len;
976 int max_len;
978 max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
979 complete_len = strlen(hdev->dev_name);
980 short_len = strlen(hdev->short_name);
982 /* no space left for name */
983 if (max_len < 1)
984 return ad_len;
986 /* no name set */
987 if (!complete_len)
988 return ad_len;
990 /* complete name fits and is eq to max short name len or smaller */
991 if (complete_len <= max_len &&
992 complete_len <= HCI_MAX_SHORT_NAME_LENGTH) {
993 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
994 hdev->dev_name, complete_len);
997 /* short name set and fits */
998 if (short_len && short_len <= max_len) {
999 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1000 hdev->short_name, short_len);
1003 /* no short name set so shorten complete name */
1004 if (!short_len) {
1005 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1006 hdev->dev_name, max_len);
1009 return ad_len;
1012 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1014 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1017 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1019 u8 scan_rsp_len = 0;
1021 if (hdev->appearance) {
1022 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1025 return append_local_name(hdev, ptr, scan_rsp_len);
1028 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1029 u8 *ptr)
1031 struct adv_info *adv_instance;
1032 u32 instance_flags;
1033 u8 scan_rsp_len = 0;
1035 adv_instance = hci_find_adv_instance(hdev, instance);
1036 if (!adv_instance)
1037 return 0;
1039 instance_flags = adv_instance->flags;
1041 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1042 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1045 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1046 adv_instance->scan_rsp_len);
1048 scan_rsp_len += adv_instance->scan_rsp_len;
1050 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1051 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1053 return scan_rsp_len;
1056 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1058 struct hci_dev *hdev = req->hdev;
1059 struct hci_cp_le_set_scan_rsp_data cp;
1060 u8 len;
1062 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1063 return;
1065 memset(&cp, 0, sizeof(cp));
1067 if (instance)
1068 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1069 else
1070 len = create_default_scan_rsp_data(hdev, cp.data);
1072 if (hdev->scan_rsp_data_len == len &&
1073 !memcmp(cp.data, hdev->scan_rsp_data, len))
1074 return;
1076 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1077 hdev->scan_rsp_data_len = len;
1079 cp.length = len;
1081 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1084 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1086 struct adv_info *adv_instance = NULL;
1087 u8 ad_len = 0, flags = 0;
1088 u32 instance_flags;
1090 /* Return 0 when the current instance identifier is invalid. */
1091 if (instance) {
1092 adv_instance = hci_find_adv_instance(hdev, instance);
1093 if (!adv_instance)
1094 return 0;
1097 instance_flags = get_adv_instance_flags(hdev, instance);
1099 /* The Add Advertising command allows userspace to set both the general
1100 * and limited discoverable flags.
1102 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1103 flags |= LE_AD_GENERAL;
1105 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1106 flags |= LE_AD_LIMITED;
1108 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1109 flags |= LE_AD_NO_BREDR;
1111 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1112 /* If a discovery flag wasn't provided, simply use the global
1113 * settings.
1115 if (!flags)
1116 flags |= mgmt_get_adv_discov_flags(hdev);
1118 /* If flags would still be empty, then there is no need to
1119 * include the "Flags" AD field".
1121 if (flags) {
1122 ptr[0] = 0x02;
1123 ptr[1] = EIR_FLAGS;
1124 ptr[2] = flags;
1126 ad_len += 3;
1127 ptr += 3;
1131 if (adv_instance) {
1132 memcpy(ptr, adv_instance->adv_data,
1133 adv_instance->adv_data_len);
1134 ad_len += adv_instance->adv_data_len;
1135 ptr += adv_instance->adv_data_len;
1138 /* Provide Tx Power only if we can provide a valid value for it */
1139 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1140 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1141 ptr[0] = 0x02;
1142 ptr[1] = EIR_TX_POWER;
1143 ptr[2] = (u8)hdev->adv_tx_power;
1145 ad_len += 3;
1146 ptr += 3;
1149 return ad_len;
1152 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1154 struct hci_dev *hdev = req->hdev;
1155 struct hci_cp_le_set_adv_data cp;
1156 u8 len;
1158 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1159 return;
1161 memset(&cp, 0, sizeof(cp));
1163 len = create_instance_adv_data(hdev, instance, cp.data);
1165 /* There's nothing to do if the data hasn't changed */
1166 if (hdev->adv_data_len == len &&
1167 memcmp(cp.data, hdev->adv_data, len) == 0)
1168 return;
1170 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1171 hdev->adv_data_len = len;
1173 cp.length = len;
1175 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1178 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1180 struct hci_request req;
1182 hci_req_init(&req, hdev);
1183 __hci_req_update_adv_data(&req, instance);
1185 return hci_req_run(&req, NULL);
1188 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1190 BT_DBG("%s status %u", hdev->name, status);
1193 void hci_req_reenable_advertising(struct hci_dev *hdev)
1195 struct hci_request req;
1197 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1198 list_empty(&hdev->adv_instances))
1199 return;
1201 hci_req_init(&req, hdev);
1203 if (hdev->cur_adv_instance) {
1204 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1205 true);
1206 } else {
1207 __hci_req_update_adv_data(&req, 0x00);
1208 __hci_req_update_scan_rsp_data(&req, 0x00);
1209 __hci_req_enable_advertising(&req);
1212 hci_req_run(&req, adv_enable_complete);
1215 static void adv_timeout_expire(struct work_struct *work)
1217 struct hci_dev *hdev = container_of(work, struct hci_dev,
1218 adv_instance_expire.work);
1220 struct hci_request req;
1221 u8 instance;
1223 BT_DBG("%s", hdev->name);
1225 hci_dev_lock(hdev);
1227 hdev->adv_instance_timeout = 0;
1229 instance = hdev->cur_adv_instance;
1230 if (instance == 0x00)
1231 goto unlock;
1233 hci_req_init(&req, hdev);
1235 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1237 if (list_empty(&hdev->adv_instances))
1238 __hci_req_disable_advertising(&req);
1240 hci_req_run(&req, NULL);
1242 unlock:
1243 hci_dev_unlock(hdev);
1246 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1247 bool force)
1249 struct hci_dev *hdev = req->hdev;
1250 struct adv_info *adv_instance = NULL;
1251 u16 timeout;
1253 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1254 list_empty(&hdev->adv_instances))
1255 return -EPERM;
1257 if (hdev->adv_instance_timeout)
1258 return -EBUSY;
1260 adv_instance = hci_find_adv_instance(hdev, instance);
1261 if (!adv_instance)
1262 return -ENOENT;
1264 /* A zero timeout means unlimited advertising. As long as there is
1265 * only one instance, duration should be ignored. We still set a timeout
1266 * in case further instances are being added later on.
1268 * If the remaining lifetime of the instance is more than the duration
1269 * then the timeout corresponds to the duration, otherwise it will be
1270 * reduced to the remaining instance lifetime.
1272 if (adv_instance->timeout == 0 ||
1273 adv_instance->duration <= adv_instance->remaining_time)
1274 timeout = adv_instance->duration;
1275 else
1276 timeout = adv_instance->remaining_time;
1278 /* The remaining time is being reduced unless the instance is being
1279 * advertised without time limit.
1281 if (adv_instance->timeout)
1282 adv_instance->remaining_time =
1283 adv_instance->remaining_time - timeout;
1285 hdev->adv_instance_timeout = timeout;
1286 queue_delayed_work(hdev->req_workqueue,
1287 &hdev->adv_instance_expire,
1288 msecs_to_jiffies(timeout * 1000));
1290 /* If we're just re-scheduling the same instance again then do not
1291 * execute any HCI commands. This happens when a single instance is
1292 * being advertised.
1294 if (!force && hdev->cur_adv_instance == instance &&
1295 hci_dev_test_flag(hdev, HCI_LE_ADV))
1296 return 0;
1298 hdev->cur_adv_instance = instance;
1299 __hci_req_update_adv_data(req, instance);
1300 __hci_req_update_scan_rsp_data(req, instance);
1301 __hci_req_enable_advertising(req);
1303 return 0;
1306 static void cancel_adv_timeout(struct hci_dev *hdev)
1308 if (hdev->adv_instance_timeout) {
1309 hdev->adv_instance_timeout = 0;
1310 cancel_delayed_work(&hdev->adv_instance_expire);
1314 /* For a single instance:
1315 * - force == true: The instance will be removed even when its remaining
1316 * lifetime is not zero.
1317 * - force == false: the instance will be deactivated but kept stored unless
1318 * the remaining lifetime is zero.
1320 * For instance == 0x00:
1321 * - force == true: All instances will be removed regardless of their timeout
1322 * setting.
1323 * - force == false: Only instances that have a timeout will be removed.
1325 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1326 struct hci_request *req, u8 instance,
1327 bool force)
1329 struct adv_info *adv_instance, *n, *next_instance = NULL;
1330 int err;
1331 u8 rem_inst;
1333 /* Cancel any timeout concerning the removed instance(s). */
1334 if (!instance || hdev->cur_adv_instance == instance)
1335 cancel_adv_timeout(hdev);
1337 /* Get the next instance to advertise BEFORE we remove
1338 * the current one. This can be the same instance again
1339 * if there is only one instance.
1341 if (instance && hdev->cur_adv_instance == instance)
1342 next_instance = hci_get_next_instance(hdev, instance);
1344 if (instance == 0x00) {
1345 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1346 list) {
1347 if (!(force || adv_instance->timeout))
1348 continue;
1350 rem_inst = adv_instance->instance;
1351 err = hci_remove_adv_instance(hdev, rem_inst);
1352 if (!err)
1353 mgmt_advertising_removed(sk, hdev, rem_inst);
1355 } else {
1356 adv_instance = hci_find_adv_instance(hdev, instance);
1358 if (force || (adv_instance && adv_instance->timeout &&
1359 !adv_instance->remaining_time)) {
1360 /* Don't advertise a removed instance. */
1361 if (next_instance &&
1362 next_instance->instance == instance)
1363 next_instance = NULL;
1365 err = hci_remove_adv_instance(hdev, instance);
1366 if (!err)
1367 mgmt_advertising_removed(sk, hdev, instance);
1371 if (!req || !hdev_is_powered(hdev) ||
1372 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1373 return;
1375 if (next_instance)
1376 __hci_req_schedule_adv_instance(req, next_instance->instance,
1377 false);
1380 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1382 struct hci_dev *hdev = req->hdev;
1384 /* If we're advertising or initiating an LE connection we can't
1385 * go ahead and change the random address at this time. This is
1386 * because the eventual initiator address used for the
1387 * subsequently created connection will be undefined (some
1388 * controllers use the new address and others the one we had
1389 * when the operation started).
1391 * In this kind of scenario skip the update and let the random
1392 * address be updated at the next cycle.
1394 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1395 hci_lookup_le_connect(hdev)) {
1396 BT_DBG("Deferring random address update");
1397 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1398 return;
1401 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1404 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1405 bool use_rpa, u8 *own_addr_type)
1407 struct hci_dev *hdev = req->hdev;
1408 int err;
1410 /* If privacy is enabled use a resolvable private address. If
1411 * current RPA has expired or there is something else than
1412 * the current RPA in use, then generate a new one.
1414 if (use_rpa) {
1415 int to;
1417 *own_addr_type = ADDR_LE_DEV_RANDOM;
1419 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1420 !bacmp(&hdev->random_addr, &hdev->rpa))
1421 return 0;
1423 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1424 if (err < 0) {
1425 BT_ERR("%s failed to generate new RPA", hdev->name);
1426 return err;
1429 set_random_addr(req, &hdev->rpa);
1431 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1432 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1434 return 0;
1437 /* In case of required privacy without resolvable private address,
1438 * use an non-resolvable private address. This is useful for active
1439 * scanning and non-connectable advertising.
1441 if (require_privacy) {
1442 bdaddr_t nrpa;
1444 while (true) {
1445 /* The non-resolvable private address is generated
1446 * from random six bytes with the two most significant
1447 * bits cleared.
1449 get_random_bytes(&nrpa, 6);
1450 nrpa.b[5] &= 0x3f;
1452 /* The non-resolvable private address shall not be
1453 * equal to the public address.
1455 if (bacmp(&hdev->bdaddr, &nrpa))
1456 break;
1459 *own_addr_type = ADDR_LE_DEV_RANDOM;
1460 set_random_addr(req, &nrpa);
1461 return 0;
1464 /* If forcing static address is in use or there is no public
1465 * address use the static address as random address (but skip
1466 * the HCI command if the current random address is already the
1467 * static one.
1469 * In case BR/EDR has been disabled on a dual-mode controller
1470 * and a static address has been configured, then use that
1471 * address instead of the public BR/EDR address.
1473 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1474 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1475 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1476 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1477 *own_addr_type = ADDR_LE_DEV_RANDOM;
1478 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1479 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1480 &hdev->static_addr);
1481 return 0;
1484 /* Neither privacy nor static address is being used so use a
1485 * public address.
1487 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1489 return 0;
1492 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1494 struct bdaddr_list *b;
1496 list_for_each_entry(b, &hdev->whitelist, list) {
1497 struct hci_conn *conn;
1499 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1500 if (!conn)
1501 return true;
1503 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1504 return true;
1507 return false;
1510 void __hci_req_update_scan(struct hci_request *req)
1512 struct hci_dev *hdev = req->hdev;
1513 u8 scan;
1515 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1516 return;
1518 if (!hdev_is_powered(hdev))
1519 return;
1521 if (mgmt_powering_down(hdev))
1522 return;
1524 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1525 disconnected_whitelist_entries(hdev))
1526 scan = SCAN_PAGE;
1527 else
1528 scan = SCAN_DISABLED;
1530 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1531 scan |= SCAN_INQUIRY;
1533 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1534 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1535 return;
1537 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1540 static int update_scan(struct hci_request *req, unsigned long opt)
1542 hci_dev_lock(req->hdev);
1543 __hci_req_update_scan(req);
1544 hci_dev_unlock(req->hdev);
1545 return 0;
1548 static void scan_update_work(struct work_struct *work)
1550 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1552 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1555 static int connectable_update(struct hci_request *req, unsigned long opt)
1557 struct hci_dev *hdev = req->hdev;
1559 hci_dev_lock(hdev);
1561 __hci_req_update_scan(req);
1563 /* If BR/EDR is not enabled and we disable advertising as a
1564 * by-product of disabling connectable, we need to update the
1565 * advertising flags.
1567 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1568 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1570 /* Update the advertising parameters if necessary */
1571 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1572 !list_empty(&hdev->adv_instances))
1573 __hci_req_enable_advertising(req);
1575 __hci_update_background_scan(req);
1577 hci_dev_unlock(hdev);
1579 return 0;
1582 static void connectable_update_work(struct work_struct *work)
1584 struct hci_dev *hdev = container_of(work, struct hci_dev,
1585 connectable_update);
1586 u8 status;
1588 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1589 mgmt_set_connectable_complete(hdev, status);
1592 static u8 get_service_classes(struct hci_dev *hdev)
1594 struct bt_uuid *uuid;
1595 u8 val = 0;
1597 list_for_each_entry(uuid, &hdev->uuids, list)
1598 val |= uuid->svc_hint;
1600 return val;
1603 void __hci_req_update_class(struct hci_request *req)
1605 struct hci_dev *hdev = req->hdev;
1606 u8 cod[3];
1608 BT_DBG("%s", hdev->name);
1610 if (!hdev_is_powered(hdev))
1611 return;
1613 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1614 return;
1616 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1617 return;
1619 cod[0] = hdev->minor_class;
1620 cod[1] = hdev->major_class;
1621 cod[2] = get_service_classes(hdev);
1623 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1624 cod[1] |= 0x20;
1626 if (memcmp(cod, hdev->dev_class, 3) == 0)
1627 return;
1629 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1632 static void write_iac(struct hci_request *req)
1634 struct hci_dev *hdev = req->hdev;
1635 struct hci_cp_write_current_iac_lap cp;
1637 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1638 return;
1640 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1641 /* Limited discoverable mode */
1642 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1643 cp.iac_lap[0] = 0x00; /* LIAC */
1644 cp.iac_lap[1] = 0x8b;
1645 cp.iac_lap[2] = 0x9e;
1646 cp.iac_lap[3] = 0x33; /* GIAC */
1647 cp.iac_lap[4] = 0x8b;
1648 cp.iac_lap[5] = 0x9e;
1649 } else {
1650 /* General discoverable mode */
1651 cp.num_iac = 1;
1652 cp.iac_lap[0] = 0x33; /* GIAC */
1653 cp.iac_lap[1] = 0x8b;
1654 cp.iac_lap[2] = 0x9e;
1657 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1658 (cp.num_iac * 3) + 1, &cp);
1661 static int discoverable_update(struct hci_request *req, unsigned long opt)
1663 struct hci_dev *hdev = req->hdev;
1665 hci_dev_lock(hdev);
1667 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1668 write_iac(req);
1669 __hci_req_update_scan(req);
1670 __hci_req_update_class(req);
1673 /* Advertising instances don't use the global discoverable setting, so
1674 * only update AD if advertising was enabled using Set Advertising.
1676 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1677 __hci_req_update_adv_data(req, 0x00);
1679 /* Discoverable mode affects the local advertising
1680 * address in limited privacy mode.
1682 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1683 __hci_req_enable_advertising(req);
1686 hci_dev_unlock(hdev);
1688 return 0;
1691 static void discoverable_update_work(struct work_struct *work)
1693 struct hci_dev *hdev = container_of(work, struct hci_dev,
1694 discoverable_update);
1695 u8 status;
1697 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1698 mgmt_set_discoverable_complete(hdev, status);
1701 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1702 u8 reason)
1704 switch (conn->state) {
1705 case BT_CONNECTED:
1706 case BT_CONFIG:
1707 if (conn->type == AMP_LINK) {
1708 struct hci_cp_disconn_phy_link cp;
1710 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1711 cp.reason = reason;
1712 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1713 &cp);
1714 } else {
1715 struct hci_cp_disconnect dc;
1717 dc.handle = cpu_to_le16(conn->handle);
1718 dc.reason = reason;
1719 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1722 conn->state = BT_DISCONN;
1724 break;
1725 case BT_CONNECT:
1726 if (conn->type == LE_LINK) {
1727 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1728 break;
1729 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1730 0, NULL);
1731 } else if (conn->type == ACL_LINK) {
1732 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1733 break;
1734 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1735 6, &conn->dst);
1737 break;
1738 case BT_CONNECT2:
1739 if (conn->type == ACL_LINK) {
1740 struct hci_cp_reject_conn_req rej;
1742 bacpy(&rej.bdaddr, &conn->dst);
1743 rej.reason = reason;
1745 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1746 sizeof(rej), &rej);
1747 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1748 struct hci_cp_reject_sync_conn_req rej;
1750 bacpy(&rej.bdaddr, &conn->dst);
1752 /* SCO rejection has its own limited set of
1753 * allowed error values (0x0D-0x0F) which isn't
1754 * compatible with most values passed to this
1755 * function. To be safe hard-code one of the
1756 * values that's suitable for SCO.
1758 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1760 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1761 sizeof(rej), &rej);
1763 break;
1764 default:
1765 conn->state = BT_CLOSED;
1766 break;
1770 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1772 if (status)
1773 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1776 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1778 struct hci_request req;
1779 int err;
1781 hci_req_init(&req, conn->hdev);
1783 __hci_abort_conn(&req, conn, reason);
1785 err = hci_req_run(&req, abort_conn_complete);
1786 if (err && err != -ENODATA) {
1787 BT_ERR("Failed to run HCI request: err %d", err);
1788 return err;
1791 return 0;
1794 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1796 hci_dev_lock(req->hdev);
1797 __hci_update_background_scan(req);
1798 hci_dev_unlock(req->hdev);
1799 return 0;
1802 static void bg_scan_update(struct work_struct *work)
1804 struct hci_dev *hdev = container_of(work, struct hci_dev,
1805 bg_scan_update);
1806 struct hci_conn *conn;
1807 u8 status;
1808 int err;
1810 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1811 if (!err)
1812 return;
1814 hci_dev_lock(hdev);
1816 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1817 if (conn)
1818 hci_le_conn_failed(conn, status);
1820 hci_dev_unlock(hdev);
1823 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1825 hci_req_add_le_scan_disable(req);
1826 return 0;
1829 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1831 u8 length = opt;
1832 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1833 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1834 struct hci_cp_inquiry cp;
1836 BT_DBG("%s", req->hdev->name);
1838 hci_dev_lock(req->hdev);
1839 hci_inquiry_cache_flush(req->hdev);
1840 hci_dev_unlock(req->hdev);
1842 memset(&cp, 0, sizeof(cp));
1844 if (req->hdev->discovery.limited)
1845 memcpy(&cp.lap, liac, sizeof(cp.lap));
1846 else
1847 memcpy(&cp.lap, giac, sizeof(cp.lap));
1849 cp.length = length;
1851 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1853 return 0;
1856 static void le_scan_disable_work(struct work_struct *work)
1858 struct hci_dev *hdev = container_of(work, struct hci_dev,
1859 le_scan_disable.work);
1860 u8 status;
1862 BT_DBG("%s", hdev->name);
1864 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1865 return;
1867 cancel_delayed_work(&hdev->le_scan_restart);
1869 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1870 if (status) {
1871 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1872 return;
1875 hdev->discovery.scan_start = 0;
1877 /* If we were running LE only scan, change discovery state. If
1878 * we were running both LE and BR/EDR inquiry simultaneously,
1879 * and BR/EDR inquiry is already finished, stop discovery,
1880 * otherwise BR/EDR inquiry will stop discovery when finished.
1881 * If we will resolve remote device name, do not change
1882 * discovery state.
1885 if (hdev->discovery.type == DISCOV_TYPE_LE)
1886 goto discov_stopped;
1888 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1889 return;
1891 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1892 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1893 hdev->discovery.state != DISCOVERY_RESOLVING)
1894 goto discov_stopped;
1896 return;
1899 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1900 HCI_CMD_TIMEOUT, &status);
1901 if (status) {
1902 BT_ERR("Inquiry failed: status 0x%02x", status);
1903 goto discov_stopped;
1906 return;
1908 discov_stopped:
1909 hci_dev_lock(hdev);
1910 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1911 hci_dev_unlock(hdev);
1914 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1916 struct hci_dev *hdev = req->hdev;
1917 struct hci_cp_le_set_scan_enable cp;
1919 /* If controller is not scanning we are done. */
1920 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1921 return 0;
1923 hci_req_add_le_scan_disable(req);
1925 memset(&cp, 0, sizeof(cp));
1926 cp.enable = LE_SCAN_ENABLE;
1927 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1928 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1930 return 0;
1933 static void le_scan_restart_work(struct work_struct *work)
1935 struct hci_dev *hdev = container_of(work, struct hci_dev,
1936 le_scan_restart.work);
1937 unsigned long timeout, duration, scan_start, now;
1938 u8 status;
1940 BT_DBG("%s", hdev->name);
1942 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1943 if (status) {
1944 BT_ERR("Failed to restart LE scan: status %d", status);
1945 return;
1948 hci_dev_lock(hdev);
1950 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1951 !hdev->discovery.scan_start)
1952 goto unlock;
1954 /* When the scan was started, hdev->le_scan_disable has been queued
1955 * after duration from scan_start. During scan restart this job
1956 * has been canceled, and we need to queue it again after proper
1957 * timeout, to make sure that scan does not run indefinitely.
1959 duration = hdev->discovery.scan_duration;
1960 scan_start = hdev->discovery.scan_start;
1961 now = jiffies;
1962 if (now - scan_start <= duration) {
1963 int elapsed;
1965 if (now >= scan_start)
1966 elapsed = now - scan_start;
1967 else
1968 elapsed = ULONG_MAX - scan_start + now;
1970 timeout = duration - elapsed;
1971 } else {
1972 timeout = 0;
1975 queue_delayed_work(hdev->req_workqueue,
1976 &hdev->le_scan_disable, timeout);
1978 unlock:
1979 hci_dev_unlock(hdev);
1982 static void disable_advertising(struct hci_request *req)
1984 u8 enable = 0x00;
1986 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1989 static int active_scan(struct hci_request *req, unsigned long opt)
1991 uint16_t interval = opt;
1992 struct hci_dev *hdev = req->hdev;
1993 struct hci_cp_le_set_scan_param param_cp;
1994 struct hci_cp_le_set_scan_enable enable_cp;
1995 u8 own_addr_type;
1996 int err;
1998 BT_DBG("%s", hdev->name);
2000 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2001 hci_dev_lock(hdev);
2003 /* Don't let discovery abort an outgoing connection attempt
2004 * that's using directed advertising.
2006 if (hci_lookup_le_connect(hdev)) {
2007 hci_dev_unlock(hdev);
2008 return -EBUSY;
2011 cancel_adv_timeout(hdev);
2012 hci_dev_unlock(hdev);
2014 disable_advertising(req);
2017 /* If controller is scanning, it means the background scanning is
2018 * running. Thus, we should temporarily stop it in order to set the
2019 * discovery scanning parameters.
2021 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2022 hci_req_add_le_scan_disable(req);
2024 /* All active scans will be done with either a resolvable private
2025 * address (when privacy feature has been enabled) or non-resolvable
2026 * private address.
2028 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2029 &own_addr_type);
2030 if (err < 0)
2031 own_addr_type = ADDR_LE_DEV_PUBLIC;
2033 memset(&param_cp, 0, sizeof(param_cp));
2034 param_cp.type = LE_SCAN_ACTIVE;
2035 param_cp.interval = cpu_to_le16(interval);
2036 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2037 param_cp.own_address_type = own_addr_type;
2039 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2040 &param_cp);
2042 memset(&enable_cp, 0, sizeof(enable_cp));
2043 enable_cp.enable = LE_SCAN_ENABLE;
2044 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2046 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2047 &enable_cp);
2049 return 0;
2052 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2054 int err;
2056 BT_DBG("%s", req->hdev->name);
2058 err = active_scan(req, opt);
2059 if (err)
2060 return err;
2062 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2065 static void start_discovery(struct hci_dev *hdev, u8 *status)
2067 unsigned long timeout;
2069 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2071 switch (hdev->discovery.type) {
2072 case DISCOV_TYPE_BREDR:
2073 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2074 hci_req_sync(hdev, bredr_inquiry,
2075 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2076 status);
2077 return;
2078 case DISCOV_TYPE_INTERLEAVED:
2079 /* When running simultaneous discovery, the LE scanning time
2080 * should occupy the whole discovery time sine BR/EDR inquiry
2081 * and LE scanning are scheduled by the controller.
2083 * For interleaving discovery in comparison, BR/EDR inquiry
2084 * and LE scanning are done sequentially with separate
2085 * timeouts.
2087 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2088 &hdev->quirks)) {
2089 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2090 /* During simultaneous discovery, we double LE scan
2091 * interval. We must leave some time for the controller
2092 * to do BR/EDR inquiry.
2094 hci_req_sync(hdev, interleaved_discov,
2095 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2096 status);
2097 break;
2100 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2101 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2102 HCI_CMD_TIMEOUT, status);
2103 break;
2104 case DISCOV_TYPE_LE:
2105 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2106 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2107 HCI_CMD_TIMEOUT, status);
2108 break;
2109 default:
2110 *status = HCI_ERROR_UNSPECIFIED;
2111 return;
2114 if (*status)
2115 return;
2117 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2119 /* When service discovery is used and the controller has a
2120 * strict duplicate filter, it is important to remember the
2121 * start and duration of the scan. This is required for
2122 * restarting scanning during the discovery phase.
2124 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2125 hdev->discovery.result_filtering) {
2126 hdev->discovery.scan_start = jiffies;
2127 hdev->discovery.scan_duration = timeout;
2130 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2131 timeout);
2134 bool hci_req_stop_discovery(struct hci_request *req)
2136 struct hci_dev *hdev = req->hdev;
2137 struct discovery_state *d = &hdev->discovery;
2138 struct hci_cp_remote_name_req_cancel cp;
2139 struct inquiry_entry *e;
2140 bool ret = false;
2142 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2144 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2145 if (test_bit(HCI_INQUIRY, &hdev->flags))
2146 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2148 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2149 cancel_delayed_work(&hdev->le_scan_disable);
2150 hci_req_add_le_scan_disable(req);
2153 ret = true;
2154 } else {
2155 /* Passive scanning */
2156 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2157 hci_req_add_le_scan_disable(req);
2158 ret = true;
2162 /* No further actions needed for LE-only discovery */
2163 if (d->type == DISCOV_TYPE_LE)
2164 return ret;
2166 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2167 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2168 NAME_PENDING);
2169 if (!e)
2170 return ret;
2172 bacpy(&cp.bdaddr, &e->data.bdaddr);
2173 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2174 &cp);
2175 ret = true;
2178 return ret;
2181 static int stop_discovery(struct hci_request *req, unsigned long opt)
2183 hci_dev_lock(req->hdev);
2184 hci_req_stop_discovery(req);
2185 hci_dev_unlock(req->hdev);
2187 return 0;
2190 static void discov_update(struct work_struct *work)
2192 struct hci_dev *hdev = container_of(work, struct hci_dev,
2193 discov_update);
2194 u8 status = 0;
2196 switch (hdev->discovery.state) {
2197 case DISCOVERY_STARTING:
2198 start_discovery(hdev, &status);
2199 mgmt_start_discovery_complete(hdev, status);
2200 if (status)
2201 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2202 else
2203 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2204 break;
2205 case DISCOVERY_STOPPING:
2206 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2207 mgmt_stop_discovery_complete(hdev, status);
2208 if (!status)
2209 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2210 break;
2211 case DISCOVERY_STOPPED:
2212 default:
2213 return;
2217 static void discov_off(struct work_struct *work)
2219 struct hci_dev *hdev = container_of(work, struct hci_dev,
2220 discov_off.work);
2222 BT_DBG("%s", hdev->name);
2224 hci_dev_lock(hdev);
2226 /* When discoverable timeout triggers, then just make sure
2227 * the limited discoverable flag is cleared. Even in the case
2228 * of a timeout triggered from general discoverable, it is
2229 * safe to unconditionally clear the flag.
2231 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2232 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2233 hdev->discov_timeout = 0;
2235 hci_dev_unlock(hdev);
2237 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2238 mgmt_new_settings(hdev);
2241 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2243 struct hci_dev *hdev = req->hdev;
2244 u8 link_sec;
2246 hci_dev_lock(hdev);
2248 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2249 !lmp_host_ssp_capable(hdev)) {
2250 u8 mode = 0x01;
2252 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2254 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2255 u8 support = 0x01;
2257 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2258 sizeof(support), &support);
2262 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2263 lmp_bredr_capable(hdev)) {
2264 struct hci_cp_write_le_host_supported cp;
2266 cp.le = 0x01;
2267 cp.simul = 0x00;
2269 /* Check first if we already have the right
2270 * host state (host features set)
2272 if (cp.le != lmp_host_le_capable(hdev) ||
2273 cp.simul != lmp_host_le_br_capable(hdev))
2274 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2275 sizeof(cp), &cp);
2278 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2279 /* Make sure the controller has a good default for
2280 * advertising data. This also applies to the case
2281 * where BR/EDR was toggled during the AUTO_OFF phase.
2283 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2284 list_empty(&hdev->adv_instances)) {
2285 __hci_req_update_adv_data(req, 0x00);
2286 __hci_req_update_scan_rsp_data(req, 0x00);
2288 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2289 __hci_req_enable_advertising(req);
2290 } else if (!list_empty(&hdev->adv_instances)) {
2291 struct adv_info *adv_instance;
2293 adv_instance = list_first_entry(&hdev->adv_instances,
2294 struct adv_info, list);
2295 __hci_req_schedule_adv_instance(req,
2296 adv_instance->instance,
2297 true);
2301 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2302 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2303 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2304 sizeof(link_sec), &link_sec);
2306 if (lmp_bredr_capable(hdev)) {
2307 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2308 __hci_req_write_fast_connectable(req, true);
2309 else
2310 __hci_req_write_fast_connectable(req, false);
2311 __hci_req_update_scan(req);
2312 __hci_req_update_class(req);
2313 __hci_req_update_name(req);
2314 __hci_req_update_eir(req);
2317 hci_dev_unlock(hdev);
2318 return 0;
2321 int __hci_req_hci_power_on(struct hci_dev *hdev)
2323 /* Register the available SMP channels (BR/EDR and LE) only when
2324 * successfully powering on the controller. This late
2325 * registration is required so that LE SMP can clearly decide if
2326 * the public address or static address is used.
2328 smp_register(hdev);
2330 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2331 NULL);
2334 void hci_request_setup(struct hci_dev *hdev)
2336 INIT_WORK(&hdev->discov_update, discov_update);
2337 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2338 INIT_WORK(&hdev->scan_update, scan_update_work);
2339 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2340 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2341 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2342 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2343 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2344 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2347 void hci_request_cancel_all(struct hci_dev *hdev)
2349 hci_req_sync_cancel(hdev, ENODEV);
2351 cancel_work_sync(&hdev->discov_update);
2352 cancel_work_sync(&hdev->bg_scan_update);
2353 cancel_work_sync(&hdev->scan_update);
2354 cancel_work_sync(&hdev->connectable_update);
2355 cancel_work_sync(&hdev->discoverable_update);
2356 cancel_delayed_work_sync(&hdev->discov_off);
2357 cancel_delayed_work_sync(&hdev->le_scan_disable);
2358 cancel_delayed_work_sync(&hdev->le_scan_restart);
2360 if (hdev->adv_instance_timeout) {
2361 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2362 hdev->adv_instance_timeout = 0;