2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct
*work
);
37 static void hci_cmd_work(struct work_struct
*work
);
38 static void hci_tx_work(struct work_struct
*work
);
41 LIST_HEAD(hci_dev_list
);
42 DEFINE_RWLOCK(hci_dev_list_lock
);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list
);
46 DEFINE_RWLOCK(hci_cb_list_lock
);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida
);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev
*hdev
, int event
)
55 hci_sock_dev_event(hdev
, event
);
58 /* ---- HCI requests ---- */
60 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev
->name
, cmd
, result
);
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
67 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
) {
68 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
69 u16 opcode
= __le16_to_cpu(sent
->opcode
);
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
79 if (cmd
!= HCI_OP_RESET
|| opcode
== HCI_OP_RESET
)
82 skb
= skb_clone(hdev
->sent_cmd
, GFP_ATOMIC
);
84 skb_queue_head(&hdev
->cmd_q
, skb
);
85 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
91 if (hdev
->req_status
== HCI_REQ_PEND
) {
92 hdev
->req_result
= result
;
93 hdev
->req_status
= HCI_REQ_DONE
;
94 wake_up_interruptible(&hdev
->req_wait_q
);
98 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
100 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
102 if (hdev
->req_status
== HCI_REQ_PEND
) {
103 hdev
->req_result
= err
;
104 hdev
->req_status
= HCI_REQ_CANCELED
;
105 wake_up_interruptible(&hdev
->req_wait_q
);
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev
*hdev
,
111 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
112 unsigned long opt
, __u32 timeout
)
114 DECLARE_WAITQUEUE(wait
, current
);
117 BT_DBG("%s start", hdev
->name
);
119 hdev
->req_status
= HCI_REQ_PEND
;
121 add_wait_queue(&hdev
->req_wait_q
, &wait
);
122 set_current_state(TASK_INTERRUPTIBLE
);
125 schedule_timeout(timeout
);
127 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
129 if (signal_pending(current
))
132 switch (hdev
->req_status
) {
134 err
= -bt_to_errno(hdev
->req_result
);
137 case HCI_REQ_CANCELED
:
138 err
= -hdev
->req_result
;
146 hdev
->req_status
= hdev
->req_result
= 0;
148 BT_DBG("%s end: err %d", hdev
->name
, err
);
153 static int hci_request(struct hci_dev
*hdev
,
154 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
155 unsigned long opt
, __u32 timeout
)
159 if (!test_bit(HCI_UP
, &hdev
->flags
))
162 /* Serialize all requests */
164 ret
= __hci_request(hdev
, req
, opt
, timeout
);
165 hci_req_unlock(hdev
);
170 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
172 BT_DBG("%s %ld", hdev
->name
, opt
);
175 set_bit(HCI_RESET
, &hdev
->flags
);
176 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
179 static void bredr_init(struct hci_dev
*hdev
)
181 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
183 /* Read Local Supported Features */
184 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
186 /* Read Local Version */
187 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
190 static void amp_init(struct hci_dev
*hdev
)
192 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
194 /* Read Local Version */
195 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
197 /* Read Local AMP Info */
198 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
200 /* Read Data Blk size */
201 hci_send_cmd(hdev
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
204 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
208 BT_DBG("%s %ld", hdev
->name
, opt
);
210 /* Driver initialization */
212 /* Special commands */
213 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
214 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
215 skb
->dev
= (void *) hdev
;
217 skb_queue_tail(&hdev
->cmd_q
, skb
);
218 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
220 skb_queue_purge(&hdev
->driver_init
);
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
224 hci_reset_req(hdev
, 0);
226 switch (hdev
->dev_type
) {
236 BT_ERR("Unknown device type %d", hdev
->dev_type
);
241 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
245 BT_DBG("%s %x", hdev
->name
, scan
);
247 /* Inquiry and Page scans */
248 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
251 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
255 BT_DBG("%s %x", hdev
->name
, auth
);
258 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
261 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
265 BT_DBG("%s %x", hdev
->name
, encrypt
);
268 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
271 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
273 __le16 policy
= cpu_to_le16(opt
);
275 BT_DBG("%s %x", hdev
->name
, policy
);
277 /* Default link policy */
278 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
281 /* Get HCI device by index.
282 * Device is held on return. */
283 struct hci_dev
*hci_dev_get(int index
)
285 struct hci_dev
*hdev
= NULL
, *d
;
292 read_lock(&hci_dev_list_lock
);
293 list_for_each_entry(d
, &hci_dev_list
, list
) {
294 if (d
->id
== index
) {
295 hdev
= hci_dev_hold(d
);
299 read_unlock(&hci_dev_list_lock
);
303 /* ---- Inquiry support ---- */
305 bool hci_discovery_active(struct hci_dev
*hdev
)
307 struct discovery_state
*discov
= &hdev
->discovery
;
309 switch (discov
->state
) {
310 case DISCOVERY_FINDING
:
311 case DISCOVERY_RESOLVING
:
319 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
321 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
323 if (hdev
->discovery
.state
== state
)
327 case DISCOVERY_STOPPED
:
328 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
329 mgmt_discovering(hdev
, 0);
331 case DISCOVERY_STARTING
:
333 case DISCOVERY_FINDING
:
334 mgmt_discovering(hdev
, 1);
336 case DISCOVERY_RESOLVING
:
338 case DISCOVERY_STOPPING
:
342 hdev
->discovery
.state
= state
;
345 static void inquiry_cache_flush(struct hci_dev
*hdev
)
347 struct discovery_state
*cache
= &hdev
->discovery
;
348 struct inquiry_entry
*p
, *n
;
350 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
355 INIT_LIST_HEAD(&cache
->unknown
);
356 INIT_LIST_HEAD(&cache
->resolve
);
359 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
362 struct discovery_state
*cache
= &hdev
->discovery
;
363 struct inquiry_entry
*e
;
365 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
367 list_for_each_entry(e
, &cache
->all
, all
) {
368 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
375 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
378 struct discovery_state
*cache
= &hdev
->discovery
;
379 struct inquiry_entry
*e
;
381 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
383 list_for_each_entry(e
, &cache
->unknown
, list
) {
384 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
391 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
395 struct discovery_state
*cache
= &hdev
->discovery
;
396 struct inquiry_entry
*e
;
398 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
400 list_for_each_entry(e
, &cache
->resolve
, list
) {
401 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
403 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
410 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
411 struct inquiry_entry
*ie
)
413 struct discovery_state
*cache
= &hdev
->discovery
;
414 struct list_head
*pos
= &cache
->resolve
;
415 struct inquiry_entry
*p
;
419 list_for_each_entry(p
, &cache
->resolve
, list
) {
420 if (p
->name_state
!= NAME_PENDING
&&
421 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
426 list_add(&ie
->list
, pos
);
429 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
430 bool name_known
, bool *ssp
)
432 struct discovery_state
*cache
= &hdev
->discovery
;
433 struct inquiry_entry
*ie
;
435 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
437 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
);
440 *ssp
= data
->ssp_mode
;
442 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
444 if (ie
->data
.ssp_mode
&& ssp
)
447 if (ie
->name_state
== NAME_NEEDED
&&
448 data
->rssi
!= ie
->data
.rssi
) {
449 ie
->data
.rssi
= data
->rssi
;
450 hci_inquiry_cache_update_resolve(hdev
, ie
);
456 /* Entry not in the cache. Add new one. */
457 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
461 list_add(&ie
->all
, &cache
->all
);
464 ie
->name_state
= NAME_KNOWN
;
466 ie
->name_state
= NAME_NOT_KNOWN
;
467 list_add(&ie
->list
, &cache
->unknown
);
471 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
472 ie
->name_state
!= NAME_PENDING
) {
473 ie
->name_state
= NAME_KNOWN
;
477 memcpy(&ie
->data
, data
, sizeof(*data
));
478 ie
->timestamp
= jiffies
;
479 cache
->timestamp
= jiffies
;
481 if (ie
->name_state
== NAME_NOT_KNOWN
)
487 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
489 struct discovery_state
*cache
= &hdev
->discovery
;
490 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
491 struct inquiry_entry
*e
;
494 list_for_each_entry(e
, &cache
->all
, all
) {
495 struct inquiry_data
*data
= &e
->data
;
500 bacpy(&info
->bdaddr
, &data
->bdaddr
);
501 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
502 info
->pscan_period_mode
= data
->pscan_period_mode
;
503 info
->pscan_mode
= data
->pscan_mode
;
504 memcpy(info
->dev_class
, data
->dev_class
, 3);
505 info
->clock_offset
= data
->clock_offset
;
511 BT_DBG("cache %p, copied %d", cache
, copied
);
515 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
517 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
518 struct hci_cp_inquiry cp
;
520 BT_DBG("%s", hdev
->name
);
522 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
526 memcpy(&cp
.lap
, &ir
->lap
, 3);
527 cp
.length
= ir
->length
;
528 cp
.num_rsp
= ir
->num_rsp
;
529 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
532 int hci_inquiry(void __user
*arg
)
534 __u8 __user
*ptr
= arg
;
535 struct hci_inquiry_req ir
;
536 struct hci_dev
*hdev
;
537 int err
= 0, do_inquiry
= 0, max_rsp
;
541 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
544 hdev
= hci_dev_get(ir
.dev_id
);
549 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
550 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
551 inquiry_cache_flush(hdev
);
554 hci_dev_unlock(hdev
);
556 timeo
= ir
.length
* msecs_to_jiffies(2000);
559 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
564 /* for unlimited number of responses we will use buffer with
567 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
569 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
570 * copy it to the user space.
572 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
579 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
580 hci_dev_unlock(hdev
);
582 BT_DBG("num_rsp %d", ir
.num_rsp
);
584 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
586 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
599 static u8
create_ad(struct hci_dev
*hdev
, u8
*ptr
)
601 u8 ad_len
= 0, flags
= 0;
604 if (test_bit(HCI_LE_PERIPHERAL
, &hdev
->dev_flags
))
605 flags
|= LE_AD_GENERAL
;
607 if (!lmp_bredr_capable(hdev
))
608 flags
|= LE_AD_NO_BREDR
;
610 if (lmp_le_br_capable(hdev
))
611 flags
|= LE_AD_SIM_LE_BREDR_CTRL
;
613 if (lmp_host_le_br_capable(hdev
))
614 flags
|= LE_AD_SIM_LE_BREDR_HOST
;
617 BT_DBG("adv flags 0x%02x", flags
);
627 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
629 ptr
[1] = EIR_TX_POWER
;
630 ptr
[2] = (u8
) hdev
->adv_tx_power
;
636 name_len
= strlen(hdev
->dev_name
);
638 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
640 if (name_len
> max_len
) {
642 ptr
[1] = EIR_NAME_SHORT
;
644 ptr
[1] = EIR_NAME_COMPLETE
;
646 ptr
[0] = name_len
+ 1;
648 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
650 ad_len
+= (name_len
+ 2);
651 ptr
+= (name_len
+ 2);
657 int hci_update_ad(struct hci_dev
*hdev
)
659 struct hci_cp_le_set_adv_data cp
;
665 if (!lmp_le_capable(hdev
)) {
670 memset(&cp
, 0, sizeof(cp
));
672 len
= create_ad(hdev
, cp
.data
);
674 if (hdev
->adv_data_len
== len
&&
675 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0) {
680 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
681 hdev
->adv_data_len
= len
;
684 err
= hci_send_cmd(hdev
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
687 hci_dev_unlock(hdev
);
692 /* ---- HCI ioctl helpers ---- */
694 int hci_dev_open(__u16 dev
)
696 struct hci_dev
*hdev
;
699 hdev
= hci_dev_get(dev
);
703 BT_DBG("%s %p", hdev
->name
, hdev
);
707 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
712 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
717 if (test_bit(HCI_UP
, &hdev
->flags
)) {
722 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
723 set_bit(HCI_RAW
, &hdev
->flags
);
725 /* Treat all non BR/EDR controllers as raw devices if
726 enable_hs is not set */
727 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
728 set_bit(HCI_RAW
, &hdev
->flags
);
730 if (hdev
->open(hdev
)) {
735 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
736 atomic_set(&hdev
->cmd_cnt
, 1);
737 set_bit(HCI_INIT
, &hdev
->flags
);
738 hdev
->init_last_cmd
= 0;
740 ret
= __hci_request(hdev
, hci_init_req
, 0, HCI_INIT_TIMEOUT
);
742 clear_bit(HCI_INIT
, &hdev
->flags
);
747 set_bit(HCI_UP
, &hdev
->flags
);
748 hci_notify(hdev
, HCI_DEV_UP
);
750 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
751 mgmt_valid_hdev(hdev
)) {
753 mgmt_powered(hdev
, 1);
754 hci_dev_unlock(hdev
);
757 /* Init failed, cleanup */
758 flush_work(&hdev
->tx_work
);
759 flush_work(&hdev
->cmd_work
);
760 flush_work(&hdev
->rx_work
);
762 skb_queue_purge(&hdev
->cmd_q
);
763 skb_queue_purge(&hdev
->rx_q
);
768 if (hdev
->sent_cmd
) {
769 kfree_skb(hdev
->sent_cmd
);
770 hdev
->sent_cmd
= NULL
;
778 hci_req_unlock(hdev
);
783 static int hci_dev_do_close(struct hci_dev
*hdev
)
785 BT_DBG("%s %p", hdev
->name
, hdev
);
787 cancel_work_sync(&hdev
->le_scan
);
789 cancel_delayed_work(&hdev
->power_off
);
791 hci_req_cancel(hdev
, ENODEV
);
794 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
795 del_timer_sync(&hdev
->cmd_timer
);
796 hci_req_unlock(hdev
);
800 /* Flush RX and TX works */
801 flush_work(&hdev
->tx_work
);
802 flush_work(&hdev
->rx_work
);
804 if (hdev
->discov_timeout
> 0) {
805 cancel_delayed_work(&hdev
->discov_off
);
806 hdev
->discov_timeout
= 0;
807 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
810 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
811 cancel_delayed_work(&hdev
->service_cache
);
813 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
816 inquiry_cache_flush(hdev
);
817 hci_conn_hash_flush(hdev
);
818 hci_dev_unlock(hdev
);
820 hci_notify(hdev
, HCI_DEV_DOWN
);
826 skb_queue_purge(&hdev
->cmd_q
);
827 atomic_set(&hdev
->cmd_cnt
, 1);
828 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
829 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
830 set_bit(HCI_INIT
, &hdev
->flags
);
831 __hci_request(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
832 clear_bit(HCI_INIT
, &hdev
->flags
);
836 flush_work(&hdev
->cmd_work
);
839 skb_queue_purge(&hdev
->rx_q
);
840 skb_queue_purge(&hdev
->cmd_q
);
841 skb_queue_purge(&hdev
->raw_q
);
843 /* Drop last sent command */
844 if (hdev
->sent_cmd
) {
845 del_timer_sync(&hdev
->cmd_timer
);
846 kfree_skb(hdev
->sent_cmd
);
847 hdev
->sent_cmd
= NULL
;
850 /* After this point our queues are empty
851 * and no tasks are scheduled. */
854 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) &&
855 mgmt_valid_hdev(hdev
)) {
857 mgmt_powered(hdev
, 0);
858 hci_dev_unlock(hdev
);
864 /* Controller radio is available but is currently powered down */
865 hdev
->amp_status
= 0;
867 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
868 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
870 hci_req_unlock(hdev
);
876 int hci_dev_close(__u16 dev
)
878 struct hci_dev
*hdev
;
881 hdev
= hci_dev_get(dev
);
885 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
886 cancel_delayed_work(&hdev
->power_off
);
888 err
= hci_dev_do_close(hdev
);
894 int hci_dev_reset(__u16 dev
)
896 struct hci_dev
*hdev
;
899 hdev
= hci_dev_get(dev
);
905 if (!test_bit(HCI_UP
, &hdev
->flags
))
909 skb_queue_purge(&hdev
->rx_q
);
910 skb_queue_purge(&hdev
->cmd_q
);
913 inquiry_cache_flush(hdev
);
914 hci_conn_hash_flush(hdev
);
915 hci_dev_unlock(hdev
);
920 atomic_set(&hdev
->cmd_cnt
, 1);
921 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
923 if (!test_bit(HCI_RAW
, &hdev
->flags
))
924 ret
= __hci_request(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
927 hci_req_unlock(hdev
);
932 int hci_dev_reset_stat(__u16 dev
)
934 struct hci_dev
*hdev
;
937 hdev
= hci_dev_get(dev
);
941 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
948 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
950 struct hci_dev
*hdev
;
951 struct hci_dev_req dr
;
954 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
957 hdev
= hci_dev_get(dr
.dev_id
);
963 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
968 if (!lmp_encrypt_capable(hdev
)) {
973 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
974 /* Auth must be enabled first */
975 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
981 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
986 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
991 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
996 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
997 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
1001 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
1005 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1006 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1010 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1011 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1023 int hci_get_dev_list(void __user
*arg
)
1025 struct hci_dev
*hdev
;
1026 struct hci_dev_list_req
*dl
;
1027 struct hci_dev_req
*dr
;
1028 int n
= 0, size
, err
;
1031 if (get_user(dev_num
, (__u16 __user
*) arg
))
1034 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
1037 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
1039 dl
= kzalloc(size
, GFP_KERNEL
);
1045 read_lock(&hci_dev_list_lock
);
1046 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1047 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1048 cancel_delayed_work(&hdev
->power_off
);
1050 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1051 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1053 (dr
+ n
)->dev_id
= hdev
->id
;
1054 (dr
+ n
)->dev_opt
= hdev
->flags
;
1059 read_unlock(&hci_dev_list_lock
);
1062 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1064 err
= copy_to_user(arg
, dl
, size
);
1067 return err
? -EFAULT
: 0;
1070 int hci_get_dev_info(void __user
*arg
)
1072 struct hci_dev
*hdev
;
1073 struct hci_dev_info di
;
1076 if (copy_from_user(&di
, arg
, sizeof(di
)))
1079 hdev
= hci_dev_get(di
.dev_id
);
1083 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1084 cancel_delayed_work_sync(&hdev
->power_off
);
1086 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1087 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1089 strcpy(di
.name
, hdev
->name
);
1090 di
.bdaddr
= hdev
->bdaddr
;
1091 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1092 di
.flags
= hdev
->flags
;
1093 di
.pkt_type
= hdev
->pkt_type
;
1094 if (lmp_bredr_capable(hdev
)) {
1095 di
.acl_mtu
= hdev
->acl_mtu
;
1096 di
.acl_pkts
= hdev
->acl_pkts
;
1097 di
.sco_mtu
= hdev
->sco_mtu
;
1098 di
.sco_pkts
= hdev
->sco_pkts
;
1100 di
.acl_mtu
= hdev
->le_mtu
;
1101 di
.acl_pkts
= hdev
->le_pkts
;
1105 di
.link_policy
= hdev
->link_policy
;
1106 di
.link_mode
= hdev
->link_mode
;
1108 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1109 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1111 if (copy_to_user(arg
, &di
, sizeof(di
)))
1119 /* ---- Interface to HCI drivers ---- */
1121 static int hci_rfkill_set_block(void *data
, bool blocked
)
1123 struct hci_dev
*hdev
= data
;
1125 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1130 hci_dev_do_close(hdev
);
1135 static const struct rfkill_ops hci_rfkill_ops
= {
1136 .set_block
= hci_rfkill_set_block
,
1139 static void hci_power_on(struct work_struct
*work
)
1141 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1143 BT_DBG("%s", hdev
->name
);
1145 if (hci_dev_open(hdev
->id
) < 0)
1148 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1149 schedule_delayed_work(&hdev
->power_off
, HCI_AUTO_OFF_TIMEOUT
);
1151 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1152 mgmt_index_added(hdev
);
1155 static void hci_power_off(struct work_struct
*work
)
1157 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1160 BT_DBG("%s", hdev
->name
);
1162 hci_dev_do_close(hdev
);
1165 static void hci_discov_off(struct work_struct
*work
)
1167 struct hci_dev
*hdev
;
1168 u8 scan
= SCAN_PAGE
;
1170 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1172 BT_DBG("%s", hdev
->name
);
1176 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1178 hdev
->discov_timeout
= 0;
1180 hci_dev_unlock(hdev
);
1183 int hci_uuids_clear(struct hci_dev
*hdev
)
1185 struct list_head
*p
, *n
;
1187 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1188 struct bt_uuid
*uuid
;
1190 uuid
= list_entry(p
, struct bt_uuid
, list
);
1199 int hci_link_keys_clear(struct hci_dev
*hdev
)
1201 struct list_head
*p
, *n
;
1203 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1204 struct link_key
*key
;
1206 key
= list_entry(p
, struct link_key
, list
);
1215 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1217 struct smp_ltk
*k
, *tmp
;
1219 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1227 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1231 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1232 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1238 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1239 u8 key_type
, u8 old_key_type
)
1242 if (key_type
< 0x03)
1245 /* Debug keys are insecure so don't store them persistently */
1246 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1249 /* Changed combination key and there's no previous one */
1250 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1253 /* Security mode 3 case */
1257 /* Neither local nor remote side had no-bonding as requirement */
1258 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1261 /* Local side had dedicated bonding as requirement */
1262 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1265 /* Remote side had dedicated bonding as requirement */
1266 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1269 /* If none of the above criteria match, then don't store the key
1274 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1278 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1279 if (k
->ediv
!= ediv
||
1280 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1289 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1294 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1295 if (addr_type
== k
->bdaddr_type
&&
1296 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1302 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1303 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1305 struct link_key
*key
, *old_key
;
1309 old_key
= hci_find_link_key(hdev
, bdaddr
);
1311 old_key_type
= old_key
->type
;
1314 old_key_type
= conn
? conn
->key_type
: 0xff;
1315 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1318 list_add(&key
->list
, &hdev
->link_keys
);
1321 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
1323 /* Some buggy controller combinations generate a changed
1324 * combination key for legacy pairing even when there's no
1326 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1327 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
1328 type
= HCI_LK_COMBINATION
;
1330 conn
->key_type
= type
;
1333 bacpy(&key
->bdaddr
, bdaddr
);
1334 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
1335 key
->pin_len
= pin_len
;
1337 if (type
== HCI_LK_CHANGED_COMBINATION
)
1338 key
->type
= old_key_type
;
1345 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1347 mgmt_new_link_key(hdev
, key
, persistent
);
1350 conn
->flush_key
= !persistent
;
1355 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1356 int new_key
, u8 authenticated
, u8 tk
[16], u8 enc_size
, __le16
1359 struct smp_ltk
*key
, *old_key
;
1361 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1364 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1368 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1371 list_add(&key
->list
, &hdev
->long_term_keys
);
1374 bacpy(&key
->bdaddr
, bdaddr
);
1375 key
->bdaddr_type
= addr_type
;
1376 memcpy(key
->val
, tk
, sizeof(key
->val
));
1377 key
->authenticated
= authenticated
;
1379 key
->enc_size
= enc_size
;
1381 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1386 if (type
& HCI_SMP_LTK
)
1387 mgmt_new_ltk(hdev
, key
, 1);
1392 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1394 struct link_key
*key
;
1396 key
= hci_find_link_key(hdev
, bdaddr
);
1400 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1402 list_del(&key
->list
);
1408 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1410 struct smp_ltk
*k
, *tmp
;
1412 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1413 if (bacmp(bdaddr
, &k
->bdaddr
))
1416 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1425 /* HCI command timer function */
1426 static void hci_cmd_timeout(unsigned long arg
)
1428 struct hci_dev
*hdev
= (void *) arg
;
1430 if (hdev
->sent_cmd
) {
1431 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
1432 u16 opcode
= __le16_to_cpu(sent
->opcode
);
1434 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
1436 BT_ERR("%s command tx timeout", hdev
->name
);
1439 atomic_set(&hdev
->cmd_cnt
, 1);
1440 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1443 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1446 struct oob_data
*data
;
1448 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1449 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1455 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1457 struct oob_data
*data
;
1459 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1463 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1465 list_del(&data
->list
);
1471 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1473 struct oob_data
*data
, *n
;
1475 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1476 list_del(&data
->list
);
1483 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1486 struct oob_data
*data
;
1488 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1491 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1495 bacpy(&data
->bdaddr
, bdaddr
);
1496 list_add(&data
->list
, &hdev
->remote_oob_data
);
1499 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1500 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1502 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
1507 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1509 struct bdaddr_list
*b
;
1511 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1512 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1518 int hci_blacklist_clear(struct hci_dev
*hdev
)
1520 struct list_head
*p
, *n
;
1522 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1523 struct bdaddr_list
*b
;
1525 b
= list_entry(p
, struct bdaddr_list
, list
);
1534 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1536 struct bdaddr_list
*entry
;
1538 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1541 if (hci_blacklist_lookup(hdev
, bdaddr
))
1544 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1548 bacpy(&entry
->bdaddr
, bdaddr
);
1550 list_add(&entry
->list
, &hdev
->blacklist
);
1552 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1555 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1557 struct bdaddr_list
*entry
;
1559 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1560 return hci_blacklist_clear(hdev
);
1562 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1566 list_del(&entry
->list
);
1569 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1572 static void le_scan_param_req(struct hci_dev
*hdev
, unsigned long opt
)
1574 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1575 struct hci_cp_le_set_scan_param cp
;
1577 memset(&cp
, 0, sizeof(cp
));
1578 cp
.type
= param
->type
;
1579 cp
.interval
= cpu_to_le16(param
->interval
);
1580 cp
.window
= cpu_to_le16(param
->window
);
1582 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1585 static void le_scan_enable_req(struct hci_dev
*hdev
, unsigned long opt
)
1587 struct hci_cp_le_set_scan_enable cp
;
1589 memset(&cp
, 0, sizeof(cp
));
1593 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1596 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1597 u16 window
, int timeout
)
1599 long timeo
= msecs_to_jiffies(3000);
1600 struct le_scan_params param
;
1603 BT_DBG("%s", hdev
->name
);
1605 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1606 return -EINPROGRESS
;
1609 param
.interval
= interval
;
1610 param
.window
= window
;
1614 err
= __hci_request(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1617 err
= __hci_request(hdev
, le_scan_enable_req
, 0, timeo
);
1619 hci_req_unlock(hdev
);
1624 schedule_delayed_work(&hdev
->le_scan_disable
,
1625 msecs_to_jiffies(timeout
));
1630 int hci_cancel_le_scan(struct hci_dev
*hdev
)
1632 BT_DBG("%s", hdev
->name
);
1634 if (!test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1637 if (cancel_delayed_work(&hdev
->le_scan_disable
)) {
1638 struct hci_cp_le_set_scan_enable cp
;
1640 /* Send HCI command to disable LE Scan */
1641 memset(&cp
, 0, sizeof(cp
));
1642 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1648 static void le_scan_disable_work(struct work_struct
*work
)
1650 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1651 le_scan_disable
.work
);
1652 struct hci_cp_le_set_scan_enable cp
;
1654 BT_DBG("%s", hdev
->name
);
1656 memset(&cp
, 0, sizeof(cp
));
1658 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1661 static void le_scan_work(struct work_struct
*work
)
1663 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1664 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1666 BT_DBG("%s", hdev
->name
);
1668 hci_do_le_scan(hdev
, param
->type
, param
->interval
, param
->window
,
1672 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1675 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1677 BT_DBG("%s", hdev
->name
);
1679 if (test_bit(HCI_LE_PERIPHERAL
, &hdev
->dev_flags
))
1682 if (work_busy(&hdev
->le_scan
))
1683 return -EINPROGRESS
;
1686 param
->interval
= interval
;
1687 param
->window
= window
;
1688 param
->timeout
= timeout
;
1690 queue_work(system_long_wq
, &hdev
->le_scan
);
1695 /* Alloc HCI device */
1696 struct hci_dev
*hci_alloc_dev(void)
1698 struct hci_dev
*hdev
;
1700 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1704 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1705 hdev
->esco_type
= (ESCO_HV1
);
1706 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1707 hdev
->io_capability
= 0x03; /* No Input No Output */
1708 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
1709 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
1711 hdev
->sniff_max_interval
= 800;
1712 hdev
->sniff_min_interval
= 80;
1714 mutex_init(&hdev
->lock
);
1715 mutex_init(&hdev
->req_lock
);
1717 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1718 INIT_LIST_HEAD(&hdev
->blacklist
);
1719 INIT_LIST_HEAD(&hdev
->uuids
);
1720 INIT_LIST_HEAD(&hdev
->link_keys
);
1721 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1722 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1723 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
1725 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1726 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1727 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1728 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1729 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1731 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1732 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1733 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1735 skb_queue_head_init(&hdev
->driver_init
);
1736 skb_queue_head_init(&hdev
->rx_q
);
1737 skb_queue_head_init(&hdev
->cmd_q
);
1738 skb_queue_head_init(&hdev
->raw_q
);
1740 init_waitqueue_head(&hdev
->req_wait_q
);
1742 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
1744 hci_init_sysfs(hdev
);
1745 discovery_init(hdev
);
1749 EXPORT_SYMBOL(hci_alloc_dev
);
1751 /* Free HCI device */
1752 void hci_free_dev(struct hci_dev
*hdev
)
1754 skb_queue_purge(&hdev
->driver_init
);
1756 /* will free via device release */
1757 put_device(&hdev
->dev
);
1759 EXPORT_SYMBOL(hci_free_dev
);
1761 /* Register HCI device */
1762 int hci_register_dev(struct hci_dev
*hdev
)
1766 if (!hdev
->open
|| !hdev
->close
)
1769 /* Do not allow HCI_AMP devices to register at index 0,
1770 * so the index can be used as the AMP controller ID.
1772 switch (hdev
->dev_type
) {
1774 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
1777 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
1786 sprintf(hdev
->name
, "hci%d", id
);
1789 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1791 write_lock(&hci_dev_list_lock
);
1792 list_add(&hdev
->list
, &hci_dev_list
);
1793 write_unlock(&hci_dev_list_lock
);
1795 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1797 if (!hdev
->workqueue
) {
1802 error
= hci_add_sysfs(hdev
);
1806 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1807 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
1810 if (rfkill_register(hdev
->rfkill
) < 0) {
1811 rfkill_destroy(hdev
->rfkill
);
1812 hdev
->rfkill
= NULL
;
1816 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1818 if (hdev
->dev_type
!= HCI_AMP
)
1819 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1821 hci_notify(hdev
, HCI_DEV_REG
);
1824 schedule_work(&hdev
->power_on
);
1829 destroy_workqueue(hdev
->workqueue
);
1831 ida_simple_remove(&hci_index_ida
, hdev
->id
);
1832 write_lock(&hci_dev_list_lock
);
1833 list_del(&hdev
->list
);
1834 write_unlock(&hci_dev_list_lock
);
1838 EXPORT_SYMBOL(hci_register_dev
);
1840 /* Unregister HCI device */
1841 void hci_unregister_dev(struct hci_dev
*hdev
)
1845 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1847 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
1851 write_lock(&hci_dev_list_lock
);
1852 list_del(&hdev
->list
);
1853 write_unlock(&hci_dev_list_lock
);
1855 hci_dev_do_close(hdev
);
1857 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1858 kfree_skb(hdev
->reassembly
[i
]);
1860 cancel_work_sync(&hdev
->power_on
);
1862 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1863 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1865 mgmt_index_removed(hdev
);
1866 hci_dev_unlock(hdev
);
1869 /* mgmt_index_removed should take care of emptying the
1871 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1873 hci_notify(hdev
, HCI_DEV_UNREG
);
1876 rfkill_unregister(hdev
->rfkill
);
1877 rfkill_destroy(hdev
->rfkill
);
1880 hci_del_sysfs(hdev
);
1882 destroy_workqueue(hdev
->workqueue
);
1885 hci_blacklist_clear(hdev
);
1886 hci_uuids_clear(hdev
);
1887 hci_link_keys_clear(hdev
);
1888 hci_smp_ltks_clear(hdev
);
1889 hci_remote_oob_data_clear(hdev
);
1890 hci_dev_unlock(hdev
);
1894 ida_simple_remove(&hci_index_ida
, id
);
1896 EXPORT_SYMBOL(hci_unregister_dev
);
1898 /* Suspend HCI device */
1899 int hci_suspend_dev(struct hci_dev
*hdev
)
1901 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1904 EXPORT_SYMBOL(hci_suspend_dev
);
1906 /* Resume HCI device */
1907 int hci_resume_dev(struct hci_dev
*hdev
)
1909 hci_notify(hdev
, HCI_DEV_RESUME
);
1912 EXPORT_SYMBOL(hci_resume_dev
);
1914 /* Receive frame from HCI drivers */
1915 int hci_recv_frame(struct sk_buff
*skb
)
1917 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1918 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1919 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1925 bt_cb(skb
)->incoming
= 1;
1928 __net_timestamp(skb
);
1930 skb_queue_tail(&hdev
->rx_q
, skb
);
1931 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1935 EXPORT_SYMBOL(hci_recv_frame
);
1937 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1938 int count
, __u8 index
)
1943 struct sk_buff
*skb
;
1944 struct bt_skb_cb
*scb
;
1946 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1947 index
>= NUM_REASSEMBLY
)
1950 skb
= hdev
->reassembly
[index
];
1954 case HCI_ACLDATA_PKT
:
1955 len
= HCI_MAX_FRAME_SIZE
;
1956 hlen
= HCI_ACL_HDR_SIZE
;
1959 len
= HCI_MAX_EVENT_SIZE
;
1960 hlen
= HCI_EVENT_HDR_SIZE
;
1962 case HCI_SCODATA_PKT
:
1963 len
= HCI_MAX_SCO_SIZE
;
1964 hlen
= HCI_SCO_HDR_SIZE
;
1968 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1972 scb
= (void *) skb
->cb
;
1974 scb
->pkt_type
= type
;
1976 skb
->dev
= (void *) hdev
;
1977 hdev
->reassembly
[index
] = skb
;
1981 scb
= (void *) skb
->cb
;
1982 len
= min_t(uint
, scb
->expect
, count
);
1984 memcpy(skb_put(skb
, len
), data
, len
);
1993 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1994 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1995 scb
->expect
= h
->plen
;
1997 if (skb_tailroom(skb
) < scb
->expect
) {
1999 hdev
->reassembly
[index
] = NULL
;
2005 case HCI_ACLDATA_PKT
:
2006 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
2007 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
2008 scb
->expect
= __le16_to_cpu(h
->dlen
);
2010 if (skb_tailroom(skb
) < scb
->expect
) {
2012 hdev
->reassembly
[index
] = NULL
;
2018 case HCI_SCODATA_PKT
:
2019 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
2020 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
2021 scb
->expect
= h
->dlen
;
2023 if (skb_tailroom(skb
) < scb
->expect
) {
2025 hdev
->reassembly
[index
] = NULL
;
2032 if (scb
->expect
== 0) {
2033 /* Complete frame */
2035 bt_cb(skb
)->pkt_type
= type
;
2036 hci_recv_frame(skb
);
2038 hdev
->reassembly
[index
] = NULL
;
2046 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
2050 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
2054 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
2058 data
+= (count
- rem
);
2064 EXPORT_SYMBOL(hci_recv_fragment
);
2066 #define STREAM_REASSEMBLY 0
2068 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2074 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2077 struct { char type
; } *pkt
;
2079 /* Start of the frame */
2086 type
= bt_cb(skb
)->pkt_type
;
2088 rem
= hci_reassembly(hdev
, type
, data
, count
,
2093 data
+= (count
- rem
);
2099 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2101 /* ---- Interface to upper protocols ---- */
2103 int hci_register_cb(struct hci_cb
*cb
)
2105 BT_DBG("%p name %s", cb
, cb
->name
);
2107 write_lock(&hci_cb_list_lock
);
2108 list_add(&cb
->list
, &hci_cb_list
);
2109 write_unlock(&hci_cb_list_lock
);
2113 EXPORT_SYMBOL(hci_register_cb
);
2115 int hci_unregister_cb(struct hci_cb
*cb
)
2117 BT_DBG("%p name %s", cb
, cb
->name
);
2119 write_lock(&hci_cb_list_lock
);
2120 list_del(&cb
->list
);
2121 write_unlock(&hci_cb_list_lock
);
2125 EXPORT_SYMBOL(hci_unregister_cb
);
2127 static int hci_send_frame(struct sk_buff
*skb
)
2129 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2136 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2139 __net_timestamp(skb
);
2141 /* Send copy to monitor */
2142 hci_send_to_monitor(hdev
, skb
);
2144 if (atomic_read(&hdev
->promisc
)) {
2145 /* Send copy to the sockets */
2146 hci_send_to_sock(hdev
, skb
);
2149 /* Get rid of skb owner, prior to sending to the driver. */
2152 return hdev
->send(skb
);
2155 /* Send HCI command */
2156 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2158 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2159 struct hci_command_hdr
*hdr
;
2160 struct sk_buff
*skb
;
2162 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
2164 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2166 BT_ERR("%s no memory for command", hdev
->name
);
2170 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2171 hdr
->opcode
= cpu_to_le16(opcode
);
2175 memcpy(skb_put(skb
, plen
), param
, plen
);
2177 BT_DBG("skb len %d", skb
->len
);
2179 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2180 skb
->dev
= (void *) hdev
;
2182 if (test_bit(HCI_INIT
, &hdev
->flags
))
2183 hdev
->init_last_cmd
= opcode
;
2185 skb_queue_tail(&hdev
->cmd_q
, skb
);
2186 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2191 /* Get data from the previously sent command */
2192 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2194 struct hci_command_hdr
*hdr
;
2196 if (!hdev
->sent_cmd
)
2199 hdr
= (void *) hdev
->sent_cmd
->data
;
2201 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2204 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
2206 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2210 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2212 struct hci_acl_hdr
*hdr
;
2215 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2216 skb_reset_transport_header(skb
);
2217 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2218 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2219 hdr
->dlen
= cpu_to_le16(len
);
2222 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
2223 struct sk_buff
*skb
, __u16 flags
)
2225 struct hci_conn
*conn
= chan
->conn
;
2226 struct hci_dev
*hdev
= conn
->hdev
;
2227 struct sk_buff
*list
;
2229 skb
->len
= skb_headlen(skb
);
2232 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2234 switch (hdev
->dev_type
) {
2236 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2239 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
2242 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
2246 list
= skb_shinfo(skb
)->frag_list
;
2248 /* Non fragmented */
2249 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2251 skb_queue_tail(queue
, skb
);
2254 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2256 skb_shinfo(skb
)->frag_list
= NULL
;
2258 /* Queue all fragments atomically */
2259 spin_lock(&queue
->lock
);
2261 __skb_queue_tail(queue
, skb
);
2263 flags
&= ~ACL_START
;
2266 skb
= list
; list
= list
->next
;
2268 skb
->dev
= (void *) hdev
;
2269 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2270 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2272 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2274 __skb_queue_tail(queue
, skb
);
2277 spin_unlock(&queue
->lock
);
2281 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2283 struct hci_dev
*hdev
= chan
->conn
->hdev
;
2285 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
2287 skb
->dev
= (void *) hdev
;
2289 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
2291 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2295 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2297 struct hci_dev
*hdev
= conn
->hdev
;
2298 struct hci_sco_hdr hdr
;
2300 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2302 hdr
.handle
= cpu_to_le16(conn
->handle
);
2303 hdr
.dlen
= skb
->len
;
2305 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2306 skb_reset_transport_header(skb
);
2307 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2309 skb
->dev
= (void *) hdev
;
2310 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2312 skb_queue_tail(&conn
->data_q
, skb
);
2313 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2316 /* ---- HCI TX task (outgoing data) ---- */
2318 /* HCI Connection scheduler */
2319 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
2322 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2323 struct hci_conn
*conn
= NULL
, *c
;
2324 unsigned int num
= 0, min
= ~0;
2326 /* We don't have to lock device here. Connections are always
2327 * added and removed with TX task disabled. */
2331 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2332 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2335 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2340 if (c
->sent
< min
) {
2345 if (hci_conn_num(hdev
, type
) == num
)
2354 switch (conn
->type
) {
2356 cnt
= hdev
->acl_cnt
;
2360 cnt
= hdev
->sco_cnt
;
2363 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2367 BT_ERR("Unknown link type");
2375 BT_DBG("conn %p quote %d", conn
, *quote
);
2379 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2381 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2384 BT_ERR("%s link tx timeout", hdev
->name
);
2388 /* Kill stalled connections */
2389 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2390 if (c
->type
== type
&& c
->sent
) {
2391 BT_ERR("%s killing stalled connection %pMR",
2392 hdev
->name
, &c
->dst
);
2393 hci_acl_disconn(c
, HCI_ERROR_REMOTE_USER_TERM
);
2400 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2403 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2404 struct hci_chan
*chan
= NULL
;
2405 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
2406 struct hci_conn
*conn
;
2407 int cnt
, q
, conn_num
= 0;
2409 BT_DBG("%s", hdev
->name
);
2413 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2414 struct hci_chan
*tmp
;
2416 if (conn
->type
!= type
)
2419 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2424 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2425 struct sk_buff
*skb
;
2427 if (skb_queue_empty(&tmp
->data_q
))
2430 skb
= skb_peek(&tmp
->data_q
);
2431 if (skb
->priority
< cur_prio
)
2434 if (skb
->priority
> cur_prio
) {
2437 cur_prio
= skb
->priority
;
2442 if (conn
->sent
< min
) {
2448 if (hci_conn_num(hdev
, type
) == conn_num
)
2457 switch (chan
->conn
->type
) {
2459 cnt
= hdev
->acl_cnt
;
2462 cnt
= hdev
->block_cnt
;
2466 cnt
= hdev
->sco_cnt
;
2469 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2473 BT_ERR("Unknown link type");
2478 BT_DBG("chan %p quote %d", chan
, *quote
);
2482 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2484 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2485 struct hci_conn
*conn
;
2488 BT_DBG("%s", hdev
->name
);
2492 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2493 struct hci_chan
*chan
;
2495 if (conn
->type
!= type
)
2498 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2503 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2504 struct sk_buff
*skb
;
2511 if (skb_queue_empty(&chan
->data_q
))
2514 skb
= skb_peek(&chan
->data_q
);
2515 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2518 skb
->priority
= HCI_PRIO_MAX
- 1;
2520 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2524 if (hci_conn_num(hdev
, type
) == num
)
2532 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2534 /* Calculate count of blocks used by this packet */
2535 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2538 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2540 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2541 /* ACL tx timeout must be longer than maximum
2542 * link supervision timeout (40.9 seconds) */
2543 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2544 HCI_ACL_TX_TIMEOUT
))
2545 hci_link_tx_to(hdev
, ACL_LINK
);
2549 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2551 unsigned int cnt
= hdev
->acl_cnt
;
2552 struct hci_chan
*chan
;
2553 struct sk_buff
*skb
;
2556 __check_timeout(hdev
, cnt
);
2558 while (hdev
->acl_cnt
&&
2559 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2560 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2561 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2562 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2563 skb
->len
, skb
->priority
);
2565 /* Stop if priority has changed */
2566 if (skb
->priority
< priority
)
2569 skb
= skb_dequeue(&chan
->data_q
);
2571 hci_conn_enter_active_mode(chan
->conn
,
2572 bt_cb(skb
)->force_active
);
2574 hci_send_frame(skb
);
2575 hdev
->acl_last_tx
= jiffies
;
2583 if (cnt
!= hdev
->acl_cnt
)
2584 hci_prio_recalculate(hdev
, ACL_LINK
);
2587 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
2589 unsigned int cnt
= hdev
->block_cnt
;
2590 struct hci_chan
*chan
;
2591 struct sk_buff
*skb
;
2595 __check_timeout(hdev
, cnt
);
2597 BT_DBG("%s", hdev
->name
);
2599 if (hdev
->dev_type
== HCI_AMP
)
2604 while (hdev
->block_cnt
> 0 &&
2605 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
2606 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2607 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2610 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2611 skb
->len
, skb
->priority
);
2613 /* Stop if priority has changed */
2614 if (skb
->priority
< priority
)
2617 skb
= skb_dequeue(&chan
->data_q
);
2619 blocks
= __get_blocks(hdev
, skb
);
2620 if (blocks
> hdev
->block_cnt
)
2623 hci_conn_enter_active_mode(chan
->conn
,
2624 bt_cb(skb
)->force_active
);
2626 hci_send_frame(skb
);
2627 hdev
->acl_last_tx
= jiffies
;
2629 hdev
->block_cnt
-= blocks
;
2632 chan
->sent
+= blocks
;
2633 chan
->conn
->sent
+= blocks
;
2637 if (cnt
!= hdev
->block_cnt
)
2638 hci_prio_recalculate(hdev
, type
);
2641 static void hci_sched_acl(struct hci_dev
*hdev
)
2643 BT_DBG("%s", hdev
->name
);
2645 /* No ACL link over BR/EDR controller */
2646 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
2649 /* No AMP link over AMP controller */
2650 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
2653 switch (hdev
->flow_ctl_mode
) {
2654 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2655 hci_sched_acl_pkt(hdev
);
2658 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2659 hci_sched_acl_blk(hdev
);
2665 static void hci_sched_sco(struct hci_dev
*hdev
)
2667 struct hci_conn
*conn
;
2668 struct sk_buff
*skb
;
2671 BT_DBG("%s", hdev
->name
);
2673 if (!hci_conn_num(hdev
, SCO_LINK
))
2676 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2677 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2678 BT_DBG("skb %p len %d", skb
, skb
->len
);
2679 hci_send_frame(skb
);
2682 if (conn
->sent
== ~0)
2688 static void hci_sched_esco(struct hci_dev
*hdev
)
2690 struct hci_conn
*conn
;
2691 struct sk_buff
*skb
;
2694 BT_DBG("%s", hdev
->name
);
2696 if (!hci_conn_num(hdev
, ESCO_LINK
))
2699 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
2701 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2702 BT_DBG("skb %p len %d", skb
, skb
->len
);
2703 hci_send_frame(skb
);
2706 if (conn
->sent
== ~0)
2712 static void hci_sched_le(struct hci_dev
*hdev
)
2714 struct hci_chan
*chan
;
2715 struct sk_buff
*skb
;
2716 int quote
, cnt
, tmp
;
2718 BT_DBG("%s", hdev
->name
);
2720 if (!hci_conn_num(hdev
, LE_LINK
))
2723 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2724 /* LE tx timeout must be longer than maximum
2725 * link supervision timeout (40.9 seconds) */
2726 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2727 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2728 hci_link_tx_to(hdev
, LE_LINK
);
2731 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2733 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2734 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2735 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2736 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2737 skb
->len
, skb
->priority
);
2739 /* Stop if priority has changed */
2740 if (skb
->priority
< priority
)
2743 skb
= skb_dequeue(&chan
->data_q
);
2745 hci_send_frame(skb
);
2746 hdev
->le_last_tx
= jiffies
;
2757 hdev
->acl_cnt
= cnt
;
2760 hci_prio_recalculate(hdev
, LE_LINK
);
2763 static void hci_tx_work(struct work_struct
*work
)
2765 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2766 struct sk_buff
*skb
;
2768 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2769 hdev
->sco_cnt
, hdev
->le_cnt
);
2771 /* Schedule queues and send stuff to HCI driver */
2773 hci_sched_acl(hdev
);
2775 hci_sched_sco(hdev
);
2777 hci_sched_esco(hdev
);
2781 /* Send next queued raw (unknown type) packet */
2782 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2783 hci_send_frame(skb
);
2786 /* ----- HCI RX task (incoming data processing) ----- */
2788 /* ACL data packet */
2789 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2791 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2792 struct hci_conn
*conn
;
2793 __u16 handle
, flags
;
2795 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2797 handle
= __le16_to_cpu(hdr
->handle
);
2798 flags
= hci_flags(handle
);
2799 handle
= hci_handle(handle
);
2801 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
2804 hdev
->stat
.acl_rx
++;
2807 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2808 hci_dev_unlock(hdev
);
2811 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2814 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
2815 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
))
2816 mgmt_device_connected(hdev
, &conn
->dst
, conn
->type
,
2817 conn
->dst_type
, 0, NULL
, 0,
2819 hci_dev_unlock(hdev
);
2821 /* Send to upper protocol */
2822 l2cap_recv_acldata(conn
, skb
, flags
);
2825 BT_ERR("%s ACL packet for unknown connection handle %d",
2826 hdev
->name
, handle
);
2832 /* SCO data packet */
2833 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2835 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2836 struct hci_conn
*conn
;
2839 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2841 handle
= __le16_to_cpu(hdr
->handle
);
2843 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
2845 hdev
->stat
.sco_rx
++;
2848 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2849 hci_dev_unlock(hdev
);
2852 /* Send to upper protocol */
2853 sco_recv_scodata(conn
, skb
);
2856 BT_ERR("%s SCO packet for unknown connection handle %d",
2857 hdev
->name
, handle
);
2863 static void hci_rx_work(struct work_struct
*work
)
2865 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2866 struct sk_buff
*skb
;
2868 BT_DBG("%s", hdev
->name
);
2870 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2871 /* Send copy to monitor */
2872 hci_send_to_monitor(hdev
, skb
);
2874 if (atomic_read(&hdev
->promisc
)) {
2875 /* Send copy to the sockets */
2876 hci_send_to_sock(hdev
, skb
);
2879 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2884 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2885 /* Don't process data packets in this states. */
2886 switch (bt_cb(skb
)->pkt_type
) {
2887 case HCI_ACLDATA_PKT
:
2888 case HCI_SCODATA_PKT
:
2895 switch (bt_cb(skb
)->pkt_type
) {
2897 BT_DBG("%s Event packet", hdev
->name
);
2898 hci_event_packet(hdev
, skb
);
2901 case HCI_ACLDATA_PKT
:
2902 BT_DBG("%s ACL data packet", hdev
->name
);
2903 hci_acldata_packet(hdev
, skb
);
2906 case HCI_SCODATA_PKT
:
2907 BT_DBG("%s SCO data packet", hdev
->name
);
2908 hci_scodata_packet(hdev
, skb
);
2918 static void hci_cmd_work(struct work_struct
*work
)
2920 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2921 struct sk_buff
*skb
;
2923 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
2924 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
2926 /* Send queued commands */
2927 if (atomic_read(&hdev
->cmd_cnt
)) {
2928 skb
= skb_dequeue(&hdev
->cmd_q
);
2932 kfree_skb(hdev
->sent_cmd
);
2934 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2935 if (hdev
->sent_cmd
) {
2936 atomic_dec(&hdev
->cmd_cnt
);
2937 hci_send_frame(skb
);
2938 if (test_bit(HCI_RESET
, &hdev
->flags
))
2939 del_timer(&hdev
->cmd_timer
);
2941 mod_timer(&hdev
->cmd_timer
,
2942 jiffies
+ HCI_CMD_TIMEOUT
);
2944 skb_queue_head(&hdev
->cmd_q
, skb
);
2945 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2950 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2952 /* General inquiry access code (GIAC) */
2953 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2954 struct hci_cp_inquiry cp
;
2956 BT_DBG("%s", hdev
->name
);
2958 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2959 return -EINPROGRESS
;
2961 inquiry_cache_flush(hdev
);
2963 memset(&cp
, 0, sizeof(cp
));
2964 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2967 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2970 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2972 BT_DBG("%s", hdev
->name
);
2974 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2977 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2980 u8
bdaddr_to_le(u8 bdaddr_type
)
2982 switch (bdaddr_type
) {
2983 case BDADDR_LE_PUBLIC
:
2984 return ADDR_LE_DEV_PUBLIC
;
2987 /* Fallback to LE Random address type */
2988 return ADDR_LE_DEV_RANDOM
;