2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct
*work
);
37 static void hci_cmd_work(struct work_struct
*work
);
38 static void hci_tx_work(struct work_struct
*work
);
41 LIST_HEAD(hci_dev_list
);
42 DEFINE_RWLOCK(hci_dev_list_lock
);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list
);
46 DEFINE_RWLOCK(hci_cb_list_lock
);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida
);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev
*hdev
, int event
)
55 hci_sock_dev_event(hdev
, event
);
58 /* ---- HCI requests ---- */
60 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev
->name
, cmd
, result
);
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
67 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
) {
68 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
69 u16 opcode
= __le16_to_cpu(sent
->opcode
);
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
79 if (cmd
!= HCI_OP_RESET
|| opcode
== HCI_OP_RESET
)
82 skb
= skb_clone(hdev
->sent_cmd
, GFP_ATOMIC
);
84 skb_queue_head(&hdev
->cmd_q
, skb
);
85 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
91 if (hdev
->req_status
== HCI_REQ_PEND
) {
92 hdev
->req_result
= result
;
93 hdev
->req_status
= HCI_REQ_DONE
;
94 wake_up_interruptible(&hdev
->req_wait_q
);
98 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
100 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
102 if (hdev
->req_status
== HCI_REQ_PEND
) {
103 hdev
->req_result
= err
;
104 hdev
->req_status
= HCI_REQ_CANCELED
;
105 wake_up_interruptible(&hdev
->req_wait_q
);
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev
*hdev
,
111 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
112 unsigned long opt
, __u32 timeout
)
114 DECLARE_WAITQUEUE(wait
, current
);
117 BT_DBG("%s start", hdev
->name
);
119 hdev
->req_status
= HCI_REQ_PEND
;
121 add_wait_queue(&hdev
->req_wait_q
, &wait
);
122 set_current_state(TASK_INTERRUPTIBLE
);
125 schedule_timeout(timeout
);
127 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
129 if (signal_pending(current
))
132 switch (hdev
->req_status
) {
134 err
= -bt_to_errno(hdev
->req_result
);
137 case HCI_REQ_CANCELED
:
138 err
= -hdev
->req_result
;
146 hdev
->req_status
= hdev
->req_result
= 0;
148 BT_DBG("%s end: err %d", hdev
->name
, err
);
153 static int hci_request(struct hci_dev
*hdev
,
154 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
155 unsigned long opt
, __u32 timeout
)
159 if (!test_bit(HCI_UP
, &hdev
->flags
))
162 /* Serialize all requests */
164 ret
= __hci_request(hdev
, req
, opt
, timeout
);
165 hci_req_unlock(hdev
);
170 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
172 BT_DBG("%s %ld", hdev
->name
, opt
);
175 set_bit(HCI_RESET
, &hdev
->flags
);
176 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
179 static void bredr_init(struct hci_dev
*hdev
)
181 struct hci_cp_delete_stored_link_key cp
;
185 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
187 /* Mandatory initialization */
189 /* Read Local Supported Features */
190 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
192 /* Read Local Version */
193 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
196 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
198 /* Read BD Address */
199 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
201 /* Read Class of Device */
202 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
204 /* Read Local Name */
205 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
207 /* Read Voice Setting */
208 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
210 /* Optional initialization */
212 /* Clear Event Filters */
213 flt_type
= HCI_FLT_CLEAR_ALL
;
214 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
216 /* Connection accept timeout ~20 secs */
217 param
= __constant_cpu_to_le16(0x7d00);
218 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
220 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
222 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
225 static void amp_init(struct hci_dev
*hdev
)
227 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
229 /* Read Local Version */
230 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
232 /* Read Local AMP Info */
233 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
235 /* Read Data Blk size */
236 hci_send_cmd(hdev
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
239 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
243 BT_DBG("%s %ld", hdev
->name
, opt
);
245 /* Driver initialization */
247 /* Special commands */
248 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
249 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
250 skb
->dev
= (void *) hdev
;
252 skb_queue_tail(&hdev
->cmd_q
, skb
);
253 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
255 skb_queue_purge(&hdev
->driver_init
);
258 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
259 hci_reset_req(hdev
, 0);
261 switch (hdev
->dev_type
) {
271 BT_ERR("Unknown device type %d", hdev
->dev_type
);
276 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
278 BT_DBG("%s", hdev
->name
);
280 /* Read LE buffer size */
281 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
284 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
288 BT_DBG("%s %x", hdev
->name
, scan
);
290 /* Inquiry and Page scans */
291 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
294 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
298 BT_DBG("%s %x", hdev
->name
, auth
);
301 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
304 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
308 BT_DBG("%s %x", hdev
->name
, encrypt
);
311 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
314 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
316 __le16 policy
= cpu_to_le16(opt
);
318 BT_DBG("%s %x", hdev
->name
, policy
);
320 /* Default link policy */
321 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
324 /* Get HCI device by index.
325 * Device is held on return. */
326 struct hci_dev
*hci_dev_get(int index
)
328 struct hci_dev
*hdev
= NULL
, *d
;
335 read_lock(&hci_dev_list_lock
);
336 list_for_each_entry(d
, &hci_dev_list
, list
) {
337 if (d
->id
== index
) {
338 hdev
= hci_dev_hold(d
);
342 read_unlock(&hci_dev_list_lock
);
346 /* ---- Inquiry support ---- */
348 bool hci_discovery_active(struct hci_dev
*hdev
)
350 struct discovery_state
*discov
= &hdev
->discovery
;
352 switch (discov
->state
) {
353 case DISCOVERY_FINDING
:
354 case DISCOVERY_RESOLVING
:
362 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
364 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
366 if (hdev
->discovery
.state
== state
)
370 case DISCOVERY_STOPPED
:
371 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
372 mgmt_discovering(hdev
, 0);
374 case DISCOVERY_STARTING
:
376 case DISCOVERY_FINDING
:
377 mgmt_discovering(hdev
, 1);
379 case DISCOVERY_RESOLVING
:
381 case DISCOVERY_STOPPING
:
385 hdev
->discovery
.state
= state
;
388 static void inquiry_cache_flush(struct hci_dev
*hdev
)
390 struct discovery_state
*cache
= &hdev
->discovery
;
391 struct inquiry_entry
*p
, *n
;
393 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
398 INIT_LIST_HEAD(&cache
->unknown
);
399 INIT_LIST_HEAD(&cache
->resolve
);
402 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
405 struct discovery_state
*cache
= &hdev
->discovery
;
406 struct inquiry_entry
*e
;
408 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
410 list_for_each_entry(e
, &cache
->all
, all
) {
411 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
418 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
421 struct discovery_state
*cache
= &hdev
->discovery
;
422 struct inquiry_entry
*e
;
424 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
426 list_for_each_entry(e
, &cache
->unknown
, list
) {
427 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
434 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
438 struct discovery_state
*cache
= &hdev
->discovery
;
439 struct inquiry_entry
*e
;
441 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
443 list_for_each_entry(e
, &cache
->resolve
, list
) {
444 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
446 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
453 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
454 struct inquiry_entry
*ie
)
456 struct discovery_state
*cache
= &hdev
->discovery
;
457 struct list_head
*pos
= &cache
->resolve
;
458 struct inquiry_entry
*p
;
462 list_for_each_entry(p
, &cache
->resolve
, list
) {
463 if (p
->name_state
!= NAME_PENDING
&&
464 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
469 list_add(&ie
->list
, pos
);
472 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
473 bool name_known
, bool *ssp
)
475 struct discovery_state
*cache
= &hdev
->discovery
;
476 struct inquiry_entry
*ie
;
478 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
481 *ssp
= data
->ssp_mode
;
483 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
485 if (ie
->data
.ssp_mode
&& ssp
)
488 if (ie
->name_state
== NAME_NEEDED
&&
489 data
->rssi
!= ie
->data
.rssi
) {
490 ie
->data
.rssi
= data
->rssi
;
491 hci_inquiry_cache_update_resolve(hdev
, ie
);
497 /* Entry not in the cache. Add new one. */
498 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
502 list_add(&ie
->all
, &cache
->all
);
505 ie
->name_state
= NAME_KNOWN
;
507 ie
->name_state
= NAME_NOT_KNOWN
;
508 list_add(&ie
->list
, &cache
->unknown
);
512 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
513 ie
->name_state
!= NAME_PENDING
) {
514 ie
->name_state
= NAME_KNOWN
;
518 memcpy(&ie
->data
, data
, sizeof(*data
));
519 ie
->timestamp
= jiffies
;
520 cache
->timestamp
= jiffies
;
522 if (ie
->name_state
== NAME_NOT_KNOWN
)
528 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
530 struct discovery_state
*cache
= &hdev
->discovery
;
531 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
532 struct inquiry_entry
*e
;
535 list_for_each_entry(e
, &cache
->all
, all
) {
536 struct inquiry_data
*data
= &e
->data
;
541 bacpy(&info
->bdaddr
, &data
->bdaddr
);
542 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
543 info
->pscan_period_mode
= data
->pscan_period_mode
;
544 info
->pscan_mode
= data
->pscan_mode
;
545 memcpy(info
->dev_class
, data
->dev_class
, 3);
546 info
->clock_offset
= data
->clock_offset
;
552 BT_DBG("cache %p, copied %d", cache
, copied
);
556 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
558 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
559 struct hci_cp_inquiry cp
;
561 BT_DBG("%s", hdev
->name
);
563 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
567 memcpy(&cp
.lap
, &ir
->lap
, 3);
568 cp
.length
= ir
->length
;
569 cp
.num_rsp
= ir
->num_rsp
;
570 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
573 int hci_inquiry(void __user
*arg
)
575 __u8 __user
*ptr
= arg
;
576 struct hci_inquiry_req ir
;
577 struct hci_dev
*hdev
;
578 int err
= 0, do_inquiry
= 0, max_rsp
;
582 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
585 hdev
= hci_dev_get(ir
.dev_id
);
590 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
591 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
592 inquiry_cache_flush(hdev
);
595 hci_dev_unlock(hdev
);
597 timeo
= ir
.length
* msecs_to_jiffies(2000);
600 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
605 /* for unlimited number of responses we will use buffer with
608 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
610 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611 * copy it to the user space.
613 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
620 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
621 hci_dev_unlock(hdev
);
623 BT_DBG("num_rsp %d", ir
.num_rsp
);
625 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
627 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
640 /* ---- HCI ioctl helpers ---- */
642 int hci_dev_open(__u16 dev
)
644 struct hci_dev
*hdev
;
647 hdev
= hci_dev_get(dev
);
651 BT_DBG("%s %p", hdev
->name
, hdev
);
655 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
660 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
665 if (test_bit(HCI_UP
, &hdev
->flags
)) {
670 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
671 set_bit(HCI_RAW
, &hdev
->flags
);
673 /* Treat all non BR/EDR controllers as raw devices if
674 enable_hs is not set */
675 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
676 set_bit(HCI_RAW
, &hdev
->flags
);
678 if (hdev
->open(hdev
)) {
683 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
684 atomic_set(&hdev
->cmd_cnt
, 1);
685 set_bit(HCI_INIT
, &hdev
->flags
);
686 hdev
->init_last_cmd
= 0;
688 ret
= __hci_request(hdev
, hci_init_req
, 0, HCI_INIT_TIMEOUT
);
690 if (lmp_host_le_capable(hdev
))
691 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
694 clear_bit(HCI_INIT
, &hdev
->flags
);
699 set_bit(HCI_UP
, &hdev
->flags
);
700 hci_notify(hdev
, HCI_DEV_UP
);
701 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
702 mgmt_valid_hdev(hdev
)) {
704 mgmt_powered(hdev
, 1);
705 hci_dev_unlock(hdev
);
708 /* Init failed, cleanup */
709 flush_work(&hdev
->tx_work
);
710 flush_work(&hdev
->cmd_work
);
711 flush_work(&hdev
->rx_work
);
713 skb_queue_purge(&hdev
->cmd_q
);
714 skb_queue_purge(&hdev
->rx_q
);
719 if (hdev
->sent_cmd
) {
720 kfree_skb(hdev
->sent_cmd
);
721 hdev
->sent_cmd
= NULL
;
729 hci_req_unlock(hdev
);
734 static int hci_dev_do_close(struct hci_dev
*hdev
)
736 BT_DBG("%s %p", hdev
->name
, hdev
);
738 cancel_work_sync(&hdev
->le_scan
);
740 cancel_delayed_work(&hdev
->power_off
);
742 hci_req_cancel(hdev
, ENODEV
);
745 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
746 del_timer_sync(&hdev
->cmd_timer
);
747 hci_req_unlock(hdev
);
751 /* Flush RX and TX works */
752 flush_work(&hdev
->tx_work
);
753 flush_work(&hdev
->rx_work
);
755 if (hdev
->discov_timeout
> 0) {
756 cancel_delayed_work(&hdev
->discov_off
);
757 hdev
->discov_timeout
= 0;
758 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
761 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
762 cancel_delayed_work(&hdev
->service_cache
);
764 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
767 inquiry_cache_flush(hdev
);
768 hci_conn_hash_flush(hdev
);
769 hci_dev_unlock(hdev
);
771 hci_notify(hdev
, HCI_DEV_DOWN
);
777 skb_queue_purge(&hdev
->cmd_q
);
778 atomic_set(&hdev
->cmd_cnt
, 1);
779 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
780 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
781 set_bit(HCI_INIT
, &hdev
->flags
);
782 __hci_request(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
783 clear_bit(HCI_INIT
, &hdev
->flags
);
787 flush_work(&hdev
->cmd_work
);
790 skb_queue_purge(&hdev
->rx_q
);
791 skb_queue_purge(&hdev
->cmd_q
);
792 skb_queue_purge(&hdev
->raw_q
);
794 /* Drop last sent command */
795 if (hdev
->sent_cmd
) {
796 del_timer_sync(&hdev
->cmd_timer
);
797 kfree_skb(hdev
->sent_cmd
);
798 hdev
->sent_cmd
= NULL
;
801 /* After this point our queues are empty
802 * and no tasks are scheduled. */
805 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) &&
806 mgmt_valid_hdev(hdev
)) {
808 mgmt_powered(hdev
, 0);
809 hci_dev_unlock(hdev
);
815 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
816 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
818 hci_req_unlock(hdev
);
824 int hci_dev_close(__u16 dev
)
826 struct hci_dev
*hdev
;
829 hdev
= hci_dev_get(dev
);
833 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
834 cancel_delayed_work(&hdev
->power_off
);
836 err
= hci_dev_do_close(hdev
);
842 int hci_dev_reset(__u16 dev
)
844 struct hci_dev
*hdev
;
847 hdev
= hci_dev_get(dev
);
853 if (!test_bit(HCI_UP
, &hdev
->flags
))
857 skb_queue_purge(&hdev
->rx_q
);
858 skb_queue_purge(&hdev
->cmd_q
);
861 inquiry_cache_flush(hdev
);
862 hci_conn_hash_flush(hdev
);
863 hci_dev_unlock(hdev
);
868 atomic_set(&hdev
->cmd_cnt
, 1);
869 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
871 if (!test_bit(HCI_RAW
, &hdev
->flags
))
872 ret
= __hci_request(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
875 hci_req_unlock(hdev
);
880 int hci_dev_reset_stat(__u16 dev
)
882 struct hci_dev
*hdev
;
885 hdev
= hci_dev_get(dev
);
889 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
896 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
898 struct hci_dev
*hdev
;
899 struct hci_dev_req dr
;
902 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
905 hdev
= hci_dev_get(dr
.dev_id
);
911 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
916 if (!lmp_encrypt_capable(hdev
)) {
921 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
922 /* Auth must be enabled first */
923 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
929 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
934 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
939 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
944 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
945 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
949 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
953 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
954 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
958 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
959 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
971 int hci_get_dev_list(void __user
*arg
)
973 struct hci_dev
*hdev
;
974 struct hci_dev_list_req
*dl
;
975 struct hci_dev_req
*dr
;
976 int n
= 0, size
, err
;
979 if (get_user(dev_num
, (__u16 __user
*) arg
))
982 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
985 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
987 dl
= kzalloc(size
, GFP_KERNEL
);
993 read_lock(&hci_dev_list_lock
);
994 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
995 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
996 cancel_delayed_work(&hdev
->power_off
);
998 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
999 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1001 (dr
+ n
)->dev_id
= hdev
->id
;
1002 (dr
+ n
)->dev_opt
= hdev
->flags
;
1007 read_unlock(&hci_dev_list_lock
);
1010 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1012 err
= copy_to_user(arg
, dl
, size
);
1015 return err
? -EFAULT
: 0;
1018 int hci_get_dev_info(void __user
*arg
)
1020 struct hci_dev
*hdev
;
1021 struct hci_dev_info di
;
1024 if (copy_from_user(&di
, arg
, sizeof(di
)))
1027 hdev
= hci_dev_get(di
.dev_id
);
1031 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1032 cancel_delayed_work_sync(&hdev
->power_off
);
1034 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1035 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1037 strcpy(di
.name
, hdev
->name
);
1038 di
.bdaddr
= hdev
->bdaddr
;
1039 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1040 di
.flags
= hdev
->flags
;
1041 di
.pkt_type
= hdev
->pkt_type
;
1042 di
.acl_mtu
= hdev
->acl_mtu
;
1043 di
.acl_pkts
= hdev
->acl_pkts
;
1044 di
.sco_mtu
= hdev
->sco_mtu
;
1045 di
.sco_pkts
= hdev
->sco_pkts
;
1046 di
.link_policy
= hdev
->link_policy
;
1047 di
.link_mode
= hdev
->link_mode
;
1049 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1050 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1052 if (copy_to_user(arg
, &di
, sizeof(di
)))
1060 /* ---- Interface to HCI drivers ---- */
1062 static int hci_rfkill_set_block(void *data
, bool blocked
)
1064 struct hci_dev
*hdev
= data
;
1066 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1071 hci_dev_do_close(hdev
);
1076 static const struct rfkill_ops hci_rfkill_ops
= {
1077 .set_block
= hci_rfkill_set_block
,
1080 static void hci_power_on(struct work_struct
*work
)
1082 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1084 BT_DBG("%s", hdev
->name
);
1086 if (hci_dev_open(hdev
->id
) < 0)
1089 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1090 schedule_delayed_work(&hdev
->power_off
, HCI_AUTO_OFF_TIMEOUT
);
1092 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1093 mgmt_index_added(hdev
);
1096 static void hci_power_off(struct work_struct
*work
)
1098 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1101 BT_DBG("%s", hdev
->name
);
1103 hci_dev_do_close(hdev
);
1106 static void hci_discov_off(struct work_struct
*work
)
1108 struct hci_dev
*hdev
;
1109 u8 scan
= SCAN_PAGE
;
1111 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1113 BT_DBG("%s", hdev
->name
);
1117 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1119 hdev
->discov_timeout
= 0;
1121 hci_dev_unlock(hdev
);
1124 int hci_uuids_clear(struct hci_dev
*hdev
)
1126 struct list_head
*p
, *n
;
1128 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1129 struct bt_uuid
*uuid
;
1131 uuid
= list_entry(p
, struct bt_uuid
, list
);
1140 int hci_link_keys_clear(struct hci_dev
*hdev
)
1142 struct list_head
*p
, *n
;
1144 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1145 struct link_key
*key
;
1147 key
= list_entry(p
, struct link_key
, list
);
1156 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1158 struct smp_ltk
*k
, *tmp
;
1160 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1168 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1172 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1173 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1179 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1180 u8 key_type
, u8 old_key_type
)
1183 if (key_type
< 0x03)
1186 /* Debug keys are insecure so don't store them persistently */
1187 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1190 /* Changed combination key and there's no previous one */
1191 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1194 /* Security mode 3 case */
1198 /* Neither local nor remote side had no-bonding as requirement */
1199 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1202 /* Local side had dedicated bonding as requirement */
1203 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1206 /* Remote side had dedicated bonding as requirement */
1207 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1210 /* If none of the above criteria match, then don't store the key
1215 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1219 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1220 if (k
->ediv
!= ediv
||
1221 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1230 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1235 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1236 if (addr_type
== k
->bdaddr_type
&&
1237 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1243 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1244 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1246 struct link_key
*key
, *old_key
;
1250 old_key
= hci_find_link_key(hdev
, bdaddr
);
1252 old_key_type
= old_key
->type
;
1255 old_key_type
= conn
? conn
->key_type
: 0xff;
1256 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1259 list_add(&key
->list
, &hdev
->link_keys
);
1262 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
1264 /* Some buggy controller combinations generate a changed
1265 * combination key for legacy pairing even when there's no
1267 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1268 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
1269 type
= HCI_LK_COMBINATION
;
1271 conn
->key_type
= type
;
1274 bacpy(&key
->bdaddr
, bdaddr
);
1275 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
1276 key
->pin_len
= pin_len
;
1278 if (type
== HCI_LK_CHANGED_COMBINATION
)
1279 key
->type
= old_key_type
;
1286 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1288 mgmt_new_link_key(hdev
, key
, persistent
);
1291 conn
->flush_key
= !persistent
;
1296 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1297 int new_key
, u8 authenticated
, u8 tk
[16], u8 enc_size
, __le16
1300 struct smp_ltk
*key
, *old_key
;
1302 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1305 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1309 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1312 list_add(&key
->list
, &hdev
->long_term_keys
);
1315 bacpy(&key
->bdaddr
, bdaddr
);
1316 key
->bdaddr_type
= addr_type
;
1317 memcpy(key
->val
, tk
, sizeof(key
->val
));
1318 key
->authenticated
= authenticated
;
1320 key
->enc_size
= enc_size
;
1322 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1327 if (type
& HCI_SMP_LTK
)
1328 mgmt_new_ltk(hdev
, key
, 1);
1333 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1335 struct link_key
*key
;
1337 key
= hci_find_link_key(hdev
, bdaddr
);
1341 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1343 list_del(&key
->list
);
1349 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1351 struct smp_ltk
*k
, *tmp
;
1353 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1354 if (bacmp(bdaddr
, &k
->bdaddr
))
1357 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1366 /* HCI command timer function */
1367 static void hci_cmd_timeout(unsigned long arg
)
1369 struct hci_dev
*hdev
= (void *) arg
;
1371 if (hdev
->sent_cmd
) {
1372 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
1373 u16 opcode
= __le16_to_cpu(sent
->opcode
);
1375 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
1377 BT_ERR("%s command tx timeout", hdev
->name
);
1380 atomic_set(&hdev
->cmd_cnt
, 1);
1381 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1384 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1387 struct oob_data
*data
;
1389 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1390 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1396 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1398 struct oob_data
*data
;
1400 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1404 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1406 list_del(&data
->list
);
1412 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1414 struct oob_data
*data
, *n
;
1416 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1417 list_del(&data
->list
);
1424 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1427 struct oob_data
*data
;
1429 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1432 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1436 bacpy(&data
->bdaddr
, bdaddr
);
1437 list_add(&data
->list
, &hdev
->remote_oob_data
);
1440 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1441 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1443 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
1448 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1450 struct bdaddr_list
*b
;
1452 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1453 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1459 int hci_blacklist_clear(struct hci_dev
*hdev
)
1461 struct list_head
*p
, *n
;
1463 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1464 struct bdaddr_list
*b
;
1466 b
= list_entry(p
, struct bdaddr_list
, list
);
1475 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1477 struct bdaddr_list
*entry
;
1479 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1482 if (hci_blacklist_lookup(hdev
, bdaddr
))
1485 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1489 bacpy(&entry
->bdaddr
, bdaddr
);
1491 list_add(&entry
->list
, &hdev
->blacklist
);
1493 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1496 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1498 struct bdaddr_list
*entry
;
1500 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1501 return hci_blacklist_clear(hdev
);
1503 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1507 list_del(&entry
->list
);
1510 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1513 static void le_scan_param_req(struct hci_dev
*hdev
, unsigned long opt
)
1515 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1516 struct hci_cp_le_set_scan_param cp
;
1518 memset(&cp
, 0, sizeof(cp
));
1519 cp
.type
= param
->type
;
1520 cp
.interval
= cpu_to_le16(param
->interval
);
1521 cp
.window
= cpu_to_le16(param
->window
);
1523 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1526 static void le_scan_enable_req(struct hci_dev
*hdev
, unsigned long opt
)
1528 struct hci_cp_le_set_scan_enable cp
;
1530 memset(&cp
, 0, sizeof(cp
));
1534 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1537 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1538 u16 window
, int timeout
)
1540 long timeo
= msecs_to_jiffies(3000);
1541 struct le_scan_params param
;
1544 BT_DBG("%s", hdev
->name
);
1546 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1547 return -EINPROGRESS
;
1550 param
.interval
= interval
;
1551 param
.window
= window
;
1555 err
= __hci_request(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1558 err
= __hci_request(hdev
, le_scan_enable_req
, 0, timeo
);
1560 hci_req_unlock(hdev
);
1565 schedule_delayed_work(&hdev
->le_scan_disable
,
1566 msecs_to_jiffies(timeout
));
1571 int hci_cancel_le_scan(struct hci_dev
*hdev
)
1573 BT_DBG("%s", hdev
->name
);
1575 if (!test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1578 if (cancel_delayed_work(&hdev
->le_scan_disable
)) {
1579 struct hci_cp_le_set_scan_enable cp
;
1581 /* Send HCI command to disable LE Scan */
1582 memset(&cp
, 0, sizeof(cp
));
1583 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1589 static void le_scan_disable_work(struct work_struct
*work
)
1591 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1592 le_scan_disable
.work
);
1593 struct hci_cp_le_set_scan_enable cp
;
1595 BT_DBG("%s", hdev
->name
);
1597 memset(&cp
, 0, sizeof(cp
));
1599 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1602 static void le_scan_work(struct work_struct
*work
)
1604 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1605 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1607 BT_DBG("%s", hdev
->name
);
1609 hci_do_le_scan(hdev
, param
->type
, param
->interval
, param
->window
,
1613 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1616 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1618 BT_DBG("%s", hdev
->name
);
1620 if (work_busy(&hdev
->le_scan
))
1621 return -EINPROGRESS
;
1624 param
->interval
= interval
;
1625 param
->window
= window
;
1626 param
->timeout
= timeout
;
1628 queue_work(system_long_wq
, &hdev
->le_scan
);
1633 /* Alloc HCI device */
1634 struct hci_dev
*hci_alloc_dev(void)
1636 struct hci_dev
*hdev
;
1638 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1642 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1643 hdev
->esco_type
= (ESCO_HV1
);
1644 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1645 hdev
->io_capability
= 0x03; /* No Input No Output */
1647 hdev
->sniff_max_interval
= 800;
1648 hdev
->sniff_min_interval
= 80;
1650 mutex_init(&hdev
->lock
);
1651 mutex_init(&hdev
->req_lock
);
1653 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1654 INIT_LIST_HEAD(&hdev
->blacklist
);
1655 INIT_LIST_HEAD(&hdev
->uuids
);
1656 INIT_LIST_HEAD(&hdev
->link_keys
);
1657 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1658 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1659 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
1661 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1662 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1663 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1664 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1665 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1667 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1668 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1669 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1671 skb_queue_head_init(&hdev
->driver_init
);
1672 skb_queue_head_init(&hdev
->rx_q
);
1673 skb_queue_head_init(&hdev
->cmd_q
);
1674 skb_queue_head_init(&hdev
->raw_q
);
1676 init_waitqueue_head(&hdev
->req_wait_q
);
1678 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
1680 hci_init_sysfs(hdev
);
1681 discovery_init(hdev
);
1685 EXPORT_SYMBOL(hci_alloc_dev
);
1687 /* Free HCI device */
1688 void hci_free_dev(struct hci_dev
*hdev
)
1690 skb_queue_purge(&hdev
->driver_init
);
1692 /* will free via device release */
1693 put_device(&hdev
->dev
);
1695 EXPORT_SYMBOL(hci_free_dev
);
1697 /* Register HCI device */
1698 int hci_register_dev(struct hci_dev
*hdev
)
1702 if (!hdev
->open
|| !hdev
->close
)
1705 /* Do not allow HCI_AMP devices to register at index 0,
1706 * so the index can be used as the AMP controller ID.
1708 switch (hdev
->dev_type
) {
1710 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
1713 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
1722 sprintf(hdev
->name
, "hci%d", id
);
1725 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1727 write_lock(&hci_dev_list_lock
);
1728 list_add(&hdev
->list
, &hci_dev_list
);
1729 write_unlock(&hci_dev_list_lock
);
1731 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1733 if (!hdev
->workqueue
) {
1738 error
= hci_add_sysfs(hdev
);
1742 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1743 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
1746 if (rfkill_register(hdev
->rfkill
) < 0) {
1747 rfkill_destroy(hdev
->rfkill
);
1748 hdev
->rfkill
= NULL
;
1752 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1754 if (hdev
->dev_type
!= HCI_AMP
)
1755 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1757 hci_notify(hdev
, HCI_DEV_REG
);
1760 schedule_work(&hdev
->power_on
);
1765 destroy_workqueue(hdev
->workqueue
);
1767 ida_simple_remove(&hci_index_ida
, hdev
->id
);
1768 write_lock(&hci_dev_list_lock
);
1769 list_del(&hdev
->list
);
1770 write_unlock(&hci_dev_list_lock
);
1774 EXPORT_SYMBOL(hci_register_dev
);
1776 /* Unregister HCI device */
1777 void hci_unregister_dev(struct hci_dev
*hdev
)
1781 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1783 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
1787 write_lock(&hci_dev_list_lock
);
1788 list_del(&hdev
->list
);
1789 write_unlock(&hci_dev_list_lock
);
1791 hci_dev_do_close(hdev
);
1793 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1794 kfree_skb(hdev
->reassembly
[i
]);
1796 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1797 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1799 mgmt_index_removed(hdev
);
1800 hci_dev_unlock(hdev
);
1803 /* mgmt_index_removed should take care of emptying the
1805 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1807 hci_notify(hdev
, HCI_DEV_UNREG
);
1810 rfkill_unregister(hdev
->rfkill
);
1811 rfkill_destroy(hdev
->rfkill
);
1814 hci_del_sysfs(hdev
);
1816 destroy_workqueue(hdev
->workqueue
);
1819 hci_blacklist_clear(hdev
);
1820 hci_uuids_clear(hdev
);
1821 hci_link_keys_clear(hdev
);
1822 hci_smp_ltks_clear(hdev
);
1823 hci_remote_oob_data_clear(hdev
);
1824 hci_dev_unlock(hdev
);
1828 ida_simple_remove(&hci_index_ida
, id
);
1830 EXPORT_SYMBOL(hci_unregister_dev
);
1832 /* Suspend HCI device */
1833 int hci_suspend_dev(struct hci_dev
*hdev
)
1835 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1838 EXPORT_SYMBOL(hci_suspend_dev
);
1840 /* Resume HCI device */
1841 int hci_resume_dev(struct hci_dev
*hdev
)
1843 hci_notify(hdev
, HCI_DEV_RESUME
);
1846 EXPORT_SYMBOL(hci_resume_dev
);
1848 /* Receive frame from HCI drivers */
1849 int hci_recv_frame(struct sk_buff
*skb
)
1851 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1852 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1853 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1859 bt_cb(skb
)->incoming
= 1;
1862 __net_timestamp(skb
);
1864 skb_queue_tail(&hdev
->rx_q
, skb
);
1865 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1869 EXPORT_SYMBOL(hci_recv_frame
);
1871 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1872 int count
, __u8 index
)
1877 struct sk_buff
*skb
;
1878 struct bt_skb_cb
*scb
;
1880 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1881 index
>= NUM_REASSEMBLY
)
1884 skb
= hdev
->reassembly
[index
];
1888 case HCI_ACLDATA_PKT
:
1889 len
= HCI_MAX_FRAME_SIZE
;
1890 hlen
= HCI_ACL_HDR_SIZE
;
1893 len
= HCI_MAX_EVENT_SIZE
;
1894 hlen
= HCI_EVENT_HDR_SIZE
;
1896 case HCI_SCODATA_PKT
:
1897 len
= HCI_MAX_SCO_SIZE
;
1898 hlen
= HCI_SCO_HDR_SIZE
;
1902 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1906 scb
= (void *) skb
->cb
;
1908 scb
->pkt_type
= type
;
1910 skb
->dev
= (void *) hdev
;
1911 hdev
->reassembly
[index
] = skb
;
1915 scb
= (void *) skb
->cb
;
1916 len
= min_t(uint
, scb
->expect
, count
);
1918 memcpy(skb_put(skb
, len
), data
, len
);
1927 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1928 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1929 scb
->expect
= h
->plen
;
1931 if (skb_tailroom(skb
) < scb
->expect
) {
1933 hdev
->reassembly
[index
] = NULL
;
1939 case HCI_ACLDATA_PKT
:
1940 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1941 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1942 scb
->expect
= __le16_to_cpu(h
->dlen
);
1944 if (skb_tailroom(skb
) < scb
->expect
) {
1946 hdev
->reassembly
[index
] = NULL
;
1952 case HCI_SCODATA_PKT
:
1953 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1954 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1955 scb
->expect
= h
->dlen
;
1957 if (skb_tailroom(skb
) < scb
->expect
) {
1959 hdev
->reassembly
[index
] = NULL
;
1966 if (scb
->expect
== 0) {
1967 /* Complete frame */
1969 bt_cb(skb
)->pkt_type
= type
;
1970 hci_recv_frame(skb
);
1972 hdev
->reassembly
[index
] = NULL
;
1980 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1984 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1988 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1992 data
+= (count
- rem
);
1998 EXPORT_SYMBOL(hci_recv_fragment
);
2000 #define STREAM_REASSEMBLY 0
2002 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2008 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2011 struct { char type
; } *pkt
;
2013 /* Start of the frame */
2020 type
= bt_cb(skb
)->pkt_type
;
2022 rem
= hci_reassembly(hdev
, type
, data
, count
,
2027 data
+= (count
- rem
);
2033 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2035 /* ---- Interface to upper protocols ---- */
2037 int hci_register_cb(struct hci_cb
*cb
)
2039 BT_DBG("%p name %s", cb
, cb
->name
);
2041 write_lock(&hci_cb_list_lock
);
2042 list_add(&cb
->list
, &hci_cb_list
);
2043 write_unlock(&hci_cb_list_lock
);
2047 EXPORT_SYMBOL(hci_register_cb
);
2049 int hci_unregister_cb(struct hci_cb
*cb
)
2051 BT_DBG("%p name %s", cb
, cb
->name
);
2053 write_lock(&hci_cb_list_lock
);
2054 list_del(&cb
->list
);
2055 write_unlock(&hci_cb_list_lock
);
2059 EXPORT_SYMBOL(hci_unregister_cb
);
2061 static int hci_send_frame(struct sk_buff
*skb
)
2063 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2070 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2073 __net_timestamp(skb
);
2075 /* Send copy to monitor */
2076 hci_send_to_monitor(hdev
, skb
);
2078 if (atomic_read(&hdev
->promisc
)) {
2079 /* Send copy to the sockets */
2080 hci_send_to_sock(hdev
, skb
);
2083 /* Get rid of skb owner, prior to sending to the driver. */
2086 return hdev
->send(skb
);
2089 /* Send HCI command */
2090 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2092 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2093 struct hci_command_hdr
*hdr
;
2094 struct sk_buff
*skb
;
2096 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
2098 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2100 BT_ERR("%s no memory for command", hdev
->name
);
2104 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2105 hdr
->opcode
= cpu_to_le16(opcode
);
2109 memcpy(skb_put(skb
, plen
), param
, plen
);
2111 BT_DBG("skb len %d", skb
->len
);
2113 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2114 skb
->dev
= (void *) hdev
;
2116 if (test_bit(HCI_INIT
, &hdev
->flags
))
2117 hdev
->init_last_cmd
= opcode
;
2119 skb_queue_tail(&hdev
->cmd_q
, skb
);
2120 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2125 /* Get data from the previously sent command */
2126 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2128 struct hci_command_hdr
*hdr
;
2130 if (!hdev
->sent_cmd
)
2133 hdr
= (void *) hdev
->sent_cmd
->data
;
2135 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2138 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
2140 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2144 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2146 struct hci_acl_hdr
*hdr
;
2149 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2150 skb_reset_transport_header(skb
);
2151 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2152 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2153 hdr
->dlen
= cpu_to_le16(len
);
2156 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
2157 struct sk_buff
*skb
, __u16 flags
)
2159 struct hci_conn
*conn
= chan
->conn
;
2160 struct hci_dev
*hdev
= conn
->hdev
;
2161 struct sk_buff
*list
;
2163 skb
->len
= skb_headlen(skb
);
2166 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2168 switch (hdev
->dev_type
) {
2170 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2173 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
2176 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
2180 list
= skb_shinfo(skb
)->frag_list
;
2182 /* Non fragmented */
2183 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2185 skb_queue_tail(queue
, skb
);
2188 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2190 skb_shinfo(skb
)->frag_list
= NULL
;
2192 /* Queue all fragments atomically */
2193 spin_lock(&queue
->lock
);
2195 __skb_queue_tail(queue
, skb
);
2197 flags
&= ~ACL_START
;
2200 skb
= list
; list
= list
->next
;
2202 skb
->dev
= (void *) hdev
;
2203 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2204 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2206 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2208 __skb_queue_tail(queue
, skb
);
2211 spin_unlock(&queue
->lock
);
2215 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2217 struct hci_dev
*hdev
= chan
->conn
->hdev
;
2219 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
2221 skb
->dev
= (void *) hdev
;
2223 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
2225 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2229 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2231 struct hci_dev
*hdev
= conn
->hdev
;
2232 struct hci_sco_hdr hdr
;
2234 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2236 hdr
.handle
= cpu_to_le16(conn
->handle
);
2237 hdr
.dlen
= skb
->len
;
2239 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2240 skb_reset_transport_header(skb
);
2241 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2243 skb
->dev
= (void *) hdev
;
2244 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2246 skb_queue_tail(&conn
->data_q
, skb
);
2247 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2250 /* ---- HCI TX task (outgoing data) ---- */
2252 /* HCI Connection scheduler */
2253 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
2256 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2257 struct hci_conn
*conn
= NULL
, *c
;
2258 unsigned int num
= 0, min
= ~0;
2260 /* We don't have to lock device here. Connections are always
2261 * added and removed with TX task disabled. */
2265 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2266 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2269 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2274 if (c
->sent
< min
) {
2279 if (hci_conn_num(hdev
, type
) == num
)
2288 switch (conn
->type
) {
2290 cnt
= hdev
->acl_cnt
;
2294 cnt
= hdev
->sco_cnt
;
2297 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2301 BT_ERR("Unknown link type");
2309 BT_DBG("conn %p quote %d", conn
, *quote
);
2313 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2315 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2318 BT_ERR("%s link tx timeout", hdev
->name
);
2322 /* Kill stalled connections */
2323 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2324 if (c
->type
== type
&& c
->sent
) {
2325 BT_ERR("%s killing stalled connection %pMR",
2326 hdev
->name
, &c
->dst
);
2327 hci_acl_disconn(c
, HCI_ERROR_REMOTE_USER_TERM
);
2334 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2337 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2338 struct hci_chan
*chan
= NULL
;
2339 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
2340 struct hci_conn
*conn
;
2341 int cnt
, q
, conn_num
= 0;
2343 BT_DBG("%s", hdev
->name
);
2347 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2348 struct hci_chan
*tmp
;
2350 if (conn
->type
!= type
)
2353 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2358 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2359 struct sk_buff
*skb
;
2361 if (skb_queue_empty(&tmp
->data_q
))
2364 skb
= skb_peek(&tmp
->data_q
);
2365 if (skb
->priority
< cur_prio
)
2368 if (skb
->priority
> cur_prio
) {
2371 cur_prio
= skb
->priority
;
2376 if (conn
->sent
< min
) {
2382 if (hci_conn_num(hdev
, type
) == conn_num
)
2391 switch (chan
->conn
->type
) {
2393 cnt
= hdev
->acl_cnt
;
2396 cnt
= hdev
->block_cnt
;
2400 cnt
= hdev
->sco_cnt
;
2403 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2407 BT_ERR("Unknown link type");
2412 BT_DBG("chan %p quote %d", chan
, *quote
);
2416 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2418 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2419 struct hci_conn
*conn
;
2422 BT_DBG("%s", hdev
->name
);
2426 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2427 struct hci_chan
*chan
;
2429 if (conn
->type
!= type
)
2432 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2437 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2438 struct sk_buff
*skb
;
2445 if (skb_queue_empty(&chan
->data_q
))
2448 skb
= skb_peek(&chan
->data_q
);
2449 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2452 skb
->priority
= HCI_PRIO_MAX
- 1;
2454 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2458 if (hci_conn_num(hdev
, type
) == num
)
2466 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2468 /* Calculate count of blocks used by this packet */
2469 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2472 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2474 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2475 /* ACL tx timeout must be longer than maximum
2476 * link supervision timeout (40.9 seconds) */
2477 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2478 HCI_ACL_TX_TIMEOUT
))
2479 hci_link_tx_to(hdev
, ACL_LINK
);
2483 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2485 unsigned int cnt
= hdev
->acl_cnt
;
2486 struct hci_chan
*chan
;
2487 struct sk_buff
*skb
;
2490 __check_timeout(hdev
, cnt
);
2492 while (hdev
->acl_cnt
&&
2493 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2494 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2495 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2496 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2497 skb
->len
, skb
->priority
);
2499 /* Stop if priority has changed */
2500 if (skb
->priority
< priority
)
2503 skb
= skb_dequeue(&chan
->data_q
);
2505 hci_conn_enter_active_mode(chan
->conn
,
2506 bt_cb(skb
)->force_active
);
2508 hci_send_frame(skb
);
2509 hdev
->acl_last_tx
= jiffies
;
2517 if (cnt
!= hdev
->acl_cnt
)
2518 hci_prio_recalculate(hdev
, ACL_LINK
);
2521 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
2523 unsigned int cnt
= hdev
->block_cnt
;
2524 struct hci_chan
*chan
;
2525 struct sk_buff
*skb
;
2529 __check_timeout(hdev
, cnt
);
2531 BT_DBG("%s", hdev
->name
);
2533 if (hdev
->dev_type
== HCI_AMP
)
2538 while (hdev
->block_cnt
> 0 &&
2539 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
2540 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2541 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2544 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2545 skb
->len
, skb
->priority
);
2547 /* Stop if priority has changed */
2548 if (skb
->priority
< priority
)
2551 skb
= skb_dequeue(&chan
->data_q
);
2553 blocks
= __get_blocks(hdev
, skb
);
2554 if (blocks
> hdev
->block_cnt
)
2557 hci_conn_enter_active_mode(chan
->conn
,
2558 bt_cb(skb
)->force_active
);
2560 hci_send_frame(skb
);
2561 hdev
->acl_last_tx
= jiffies
;
2563 hdev
->block_cnt
-= blocks
;
2566 chan
->sent
+= blocks
;
2567 chan
->conn
->sent
+= blocks
;
2571 if (cnt
!= hdev
->block_cnt
)
2572 hci_prio_recalculate(hdev
, type
);
2575 static void hci_sched_acl(struct hci_dev
*hdev
)
2577 BT_DBG("%s", hdev
->name
);
2579 /* No ACL link over BR/EDR controller */
2580 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
2583 /* No AMP link over AMP controller */
2584 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
2587 switch (hdev
->flow_ctl_mode
) {
2588 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2589 hci_sched_acl_pkt(hdev
);
2592 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2593 hci_sched_acl_blk(hdev
);
2599 static void hci_sched_sco(struct hci_dev
*hdev
)
2601 struct hci_conn
*conn
;
2602 struct sk_buff
*skb
;
2605 BT_DBG("%s", hdev
->name
);
2607 if (!hci_conn_num(hdev
, SCO_LINK
))
2610 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2611 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2612 BT_DBG("skb %p len %d", skb
, skb
->len
);
2613 hci_send_frame(skb
);
2616 if (conn
->sent
== ~0)
2622 static void hci_sched_esco(struct hci_dev
*hdev
)
2624 struct hci_conn
*conn
;
2625 struct sk_buff
*skb
;
2628 BT_DBG("%s", hdev
->name
);
2630 if (!hci_conn_num(hdev
, ESCO_LINK
))
2633 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
2635 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2636 BT_DBG("skb %p len %d", skb
, skb
->len
);
2637 hci_send_frame(skb
);
2640 if (conn
->sent
== ~0)
2646 static void hci_sched_le(struct hci_dev
*hdev
)
2648 struct hci_chan
*chan
;
2649 struct sk_buff
*skb
;
2650 int quote
, cnt
, tmp
;
2652 BT_DBG("%s", hdev
->name
);
2654 if (!hci_conn_num(hdev
, LE_LINK
))
2657 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2658 /* LE tx timeout must be longer than maximum
2659 * link supervision timeout (40.9 seconds) */
2660 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2661 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2662 hci_link_tx_to(hdev
, LE_LINK
);
2665 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2667 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2668 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2669 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2670 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2671 skb
->len
, skb
->priority
);
2673 /* Stop if priority has changed */
2674 if (skb
->priority
< priority
)
2677 skb
= skb_dequeue(&chan
->data_q
);
2679 hci_send_frame(skb
);
2680 hdev
->le_last_tx
= jiffies
;
2691 hdev
->acl_cnt
= cnt
;
2694 hci_prio_recalculate(hdev
, LE_LINK
);
2697 static void hci_tx_work(struct work_struct
*work
)
2699 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2700 struct sk_buff
*skb
;
2702 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2703 hdev
->sco_cnt
, hdev
->le_cnt
);
2705 /* Schedule queues and send stuff to HCI driver */
2707 hci_sched_acl(hdev
);
2709 hci_sched_sco(hdev
);
2711 hci_sched_esco(hdev
);
2715 /* Send next queued raw (unknown type) packet */
2716 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2717 hci_send_frame(skb
);
2720 /* ----- HCI RX task (incoming data processing) ----- */
2722 /* ACL data packet */
2723 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2725 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2726 struct hci_conn
*conn
;
2727 __u16 handle
, flags
;
2729 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2731 handle
= __le16_to_cpu(hdr
->handle
);
2732 flags
= hci_flags(handle
);
2733 handle
= hci_handle(handle
);
2735 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
2738 hdev
->stat
.acl_rx
++;
2741 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2742 hci_dev_unlock(hdev
);
2745 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2748 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
2749 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
))
2750 mgmt_device_connected(hdev
, &conn
->dst
, conn
->type
,
2751 conn
->dst_type
, 0, NULL
, 0,
2753 hci_dev_unlock(hdev
);
2755 /* Send to upper protocol */
2756 l2cap_recv_acldata(conn
, skb
, flags
);
2759 BT_ERR("%s ACL packet for unknown connection handle %d",
2760 hdev
->name
, handle
);
2766 /* SCO data packet */
2767 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2769 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2770 struct hci_conn
*conn
;
2773 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2775 handle
= __le16_to_cpu(hdr
->handle
);
2777 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
2779 hdev
->stat
.sco_rx
++;
2782 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2783 hci_dev_unlock(hdev
);
2786 /* Send to upper protocol */
2787 sco_recv_scodata(conn
, skb
);
2790 BT_ERR("%s SCO packet for unknown connection handle %d",
2791 hdev
->name
, handle
);
2797 static void hci_rx_work(struct work_struct
*work
)
2799 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2800 struct sk_buff
*skb
;
2802 BT_DBG("%s", hdev
->name
);
2804 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2805 /* Send copy to monitor */
2806 hci_send_to_monitor(hdev
, skb
);
2808 if (atomic_read(&hdev
->promisc
)) {
2809 /* Send copy to the sockets */
2810 hci_send_to_sock(hdev
, skb
);
2813 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2818 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2819 /* Don't process data packets in this states. */
2820 switch (bt_cb(skb
)->pkt_type
) {
2821 case HCI_ACLDATA_PKT
:
2822 case HCI_SCODATA_PKT
:
2829 switch (bt_cb(skb
)->pkt_type
) {
2831 BT_DBG("%s Event packet", hdev
->name
);
2832 hci_event_packet(hdev
, skb
);
2835 case HCI_ACLDATA_PKT
:
2836 BT_DBG("%s ACL data packet", hdev
->name
);
2837 hci_acldata_packet(hdev
, skb
);
2840 case HCI_SCODATA_PKT
:
2841 BT_DBG("%s SCO data packet", hdev
->name
);
2842 hci_scodata_packet(hdev
, skb
);
2852 static void hci_cmd_work(struct work_struct
*work
)
2854 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2855 struct sk_buff
*skb
;
2857 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
2858 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
2860 /* Send queued commands */
2861 if (atomic_read(&hdev
->cmd_cnt
)) {
2862 skb
= skb_dequeue(&hdev
->cmd_q
);
2866 kfree_skb(hdev
->sent_cmd
);
2868 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2869 if (hdev
->sent_cmd
) {
2870 atomic_dec(&hdev
->cmd_cnt
);
2871 hci_send_frame(skb
);
2872 if (test_bit(HCI_RESET
, &hdev
->flags
))
2873 del_timer(&hdev
->cmd_timer
);
2875 mod_timer(&hdev
->cmd_timer
,
2876 jiffies
+ HCI_CMD_TIMEOUT
);
2878 skb_queue_head(&hdev
->cmd_q
, skb
);
2879 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2884 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2886 /* General inquiry access code (GIAC) */
2887 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2888 struct hci_cp_inquiry cp
;
2890 BT_DBG("%s", hdev
->name
);
2892 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2893 return -EINPROGRESS
;
2895 inquiry_cache_flush(hdev
);
2897 memset(&cp
, 0, sizeof(cp
));
2898 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2901 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2904 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2906 BT_DBG("%s", hdev
->name
);
2908 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2911 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2914 u8
bdaddr_to_le(u8 bdaddr_type
)
2916 switch (bdaddr_type
) {
2917 case BDADDR_LE_PUBLIC
:
2918 return ADDR_LE_DEV_PUBLIC
;
2921 /* Fallback to LE Random address type */
2922 return ADDR_LE_DEV_RANDOM
;