drivers/rtc/rtc-pcf8563.c: use PTR_RET()
[linux-2.6.git] / net / bluetooth / hci_core.c
blobdb7de80b88a2758ea25a630c3209f87d08faff7c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
89 hci_dev_lock(hdev);
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
94 hci_dev_unlock(hdev);
96 if (!skb)
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
132 failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
175 default:
176 err = -ETIMEDOUT;
177 break;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
184 if (err < 0)
185 return ERR_PTR(err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
201 unsigned long opt),
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
214 func(&req, opt);
216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
225 if (err == -ENODATA)
226 return 0;
228 return err;
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
239 return -EINTR;
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
243 err = -bt_to_errno(hdev->req_result);
244 break;
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
250 default:
251 err = -ETIMEDOUT;
252 break;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
259 return err;
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
264 unsigned long opt),
265 unsigned long opt, __u32 timeout)
267 int ret;
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
272 /* Serialize all requests */
273 hci_req_lock(hdev);
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
277 return ret;
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
284 /* Reset device */
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local AMP Info */
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
313 /* Read Data Blk size */
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
319 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s %ld", hdev->name, opt);
323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
327 switch (hdev->dev_type) {
328 case HCI_BREDR:
329 bredr_init(req);
330 break;
332 case HCI_AMP:
333 amp_init(req);
334 break;
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
342 static void bredr_setup(struct hci_request *req)
344 __le16 param;
345 __u8 flt_type;
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350 /* Read Class of Device */
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353 /* Read Local Name */
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356 /* Read Voice Setting */
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
374 static void le_setup(struct hci_request *req)
376 struct hci_dev *hdev = req->hdev;
378 /* Read LE Buffer Size */
379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
381 /* Read LE Local Supported Features */
382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
384 /* Read LE Advertising Channel TX Power */
385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
387 /* Read LE White List Size */
388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
390 /* Read LE Supported States */
391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
423 return 0x00;
426 static void hci_setup_inquiry_mode(struct hci_request *req)
428 u8 mode;
430 mode = hci_get_inquiry_mode(req->hdev);
432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
435 static void hci_setup_event_mask(struct hci_request *req)
437 struct hci_dev *hdev = req->hdev;
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
459 if (lmp_inq_rssi_capable(hdev))
460 events[4] |= 0x02; /* Inquiry Result with RSSI */
462 if (lmp_sniffsubr_capable(hdev))
463 events[5] |= 0x20; /* Sniff Subrating */
465 if (lmp_pause_enc_capable(hdev))
466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
468 if (lmp_ext_inq_capable(hdev))
469 events[5] |= 0x40; /* Extended Inquiry Result */
471 if (lmp_no_flush_capable(hdev))
472 events[7] |= 0x01; /* Enhanced Flush Complete */
474 if (lmp_lsto_capable(hdev))
475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
477 if (lmp_ssp_capable(hdev)) {
478 events[6] |= 0x01; /* IO Capability Request */
479 events[6] |= 0x02; /* IO Capability Response */
480 events[6] |= 0x04; /* User Confirmation Request */
481 events[6] |= 0x08; /* User Passkey Request */
482 events[6] |= 0x10; /* Remote OOB Data Request */
483 events[6] |= 0x20; /* Simple Pairing Complete */
484 events[7] |= 0x04; /* User Passkey Notification */
485 events[7] |= 0x08; /* Keypress Notification */
486 events[7] |= 0x10; /* Remote Host Supported
487 * Features Notification
491 if (lmp_le_capable(hdev))
492 events[7] |= 0x20; /* LE Meta-Event */
494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
496 if (lmp_le_capable(hdev)) {
497 memset(events, 0, sizeof(events));
498 events[0] = 0x1f;
499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500 sizeof(events), events);
504 static void hci_init2_req(struct hci_request *req, unsigned long opt)
506 struct hci_dev *hdev = req->hdev;
508 if (lmp_bredr_capable(hdev))
509 bredr_setup(req);
511 if (lmp_le_capable(hdev))
512 le_setup(req);
514 hci_setup_event_mask(req);
516 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
517 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
519 if (lmp_ssp_capable(hdev)) {
520 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
521 u8 mode = 0x01;
522 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
523 sizeof(mode), &mode);
524 } else {
525 struct hci_cp_write_eir cp;
527 memset(hdev->eir, 0, sizeof(hdev->eir));
528 memset(&cp, 0, sizeof(cp));
530 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
534 if (lmp_inq_rssi_capable(hdev))
535 hci_setup_inquiry_mode(req);
537 if (lmp_inq_tx_pwr_capable(hdev))
538 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
540 if (lmp_ext_feat_capable(hdev)) {
541 struct hci_cp_read_local_ext_features cp;
543 cp.page = 0x01;
544 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
545 sizeof(cp), &cp);
548 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
549 u8 enable = 1;
550 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
551 &enable);
555 static void hci_setup_link_policy(struct hci_request *req)
557 struct hci_dev *hdev = req->hdev;
558 struct hci_cp_write_def_link_policy cp;
559 u16 link_policy = 0;
561 if (lmp_rswitch_capable(hdev))
562 link_policy |= HCI_LP_RSWITCH;
563 if (lmp_hold_capable(hdev))
564 link_policy |= HCI_LP_HOLD;
565 if (lmp_sniff_capable(hdev))
566 link_policy |= HCI_LP_SNIFF;
567 if (lmp_park_capable(hdev))
568 link_policy |= HCI_LP_PARK;
570 cp.policy = cpu_to_le16(link_policy);
571 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
574 static void hci_set_le_support(struct hci_request *req)
576 struct hci_dev *hdev = req->hdev;
577 struct hci_cp_write_le_host_supported cp;
579 /* LE-only devices do not support explicit enablement */
580 if (!lmp_bredr_capable(hdev))
581 return;
583 memset(&cp, 0, sizeof(cp));
585 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
586 cp.le = 0x01;
587 cp.simul = lmp_le_br_capable(hdev);
590 if (cp.le != lmp_host_le_capable(hdev))
591 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
592 &cp);
595 static void hci_init3_req(struct hci_request *req, unsigned long opt)
597 struct hci_dev *hdev = req->hdev;
598 u8 p;
600 /* Only send HCI_Delete_Stored_Link_Key if it is supported */
601 if (hdev->commands[6] & 0x80) {
602 struct hci_cp_delete_stored_link_key cp;
604 bacpy(&cp.bdaddr, BDADDR_ANY);
605 cp.delete_all = 0x01;
606 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
607 sizeof(cp), &cp);
610 if (hdev->commands[5] & 0x10)
611 hci_setup_link_policy(req);
613 if (lmp_le_capable(hdev)) {
614 hci_set_le_support(req);
615 hci_update_ad(req);
618 /* Read features beyond page 1 if available */
619 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
620 struct hci_cp_read_local_ext_features cp;
622 cp.page = p;
623 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
624 sizeof(cp), &cp);
628 static int __hci_init(struct hci_dev *hdev)
630 int err;
632 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
633 if (err < 0)
634 return err;
636 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
637 * BR/EDR/LE type controllers. AMP controllers only need the
638 * first stage init.
640 if (hdev->dev_type != HCI_BREDR)
641 return 0;
643 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
644 if (err < 0)
645 return err;
647 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
650 static void hci_scan_req(struct hci_request *req, unsigned long opt)
652 __u8 scan = opt;
654 BT_DBG("%s %x", req->hdev->name, scan);
656 /* Inquiry and Page scans */
657 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
660 static void hci_auth_req(struct hci_request *req, unsigned long opt)
662 __u8 auth = opt;
664 BT_DBG("%s %x", req->hdev->name, auth);
666 /* Authentication */
667 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
670 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
672 __u8 encrypt = opt;
674 BT_DBG("%s %x", req->hdev->name, encrypt);
676 /* Encryption */
677 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
680 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
682 __le16 policy = cpu_to_le16(opt);
684 BT_DBG("%s %x", req->hdev->name, policy);
686 /* Default link policy */
687 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
690 /* Get HCI device by index.
691 * Device is held on return. */
692 struct hci_dev *hci_dev_get(int index)
694 struct hci_dev *hdev = NULL, *d;
696 BT_DBG("%d", index);
698 if (index < 0)
699 return NULL;
701 read_lock(&hci_dev_list_lock);
702 list_for_each_entry(d, &hci_dev_list, list) {
703 if (d->id == index) {
704 hdev = hci_dev_hold(d);
705 break;
708 read_unlock(&hci_dev_list_lock);
709 return hdev;
712 /* ---- Inquiry support ---- */
714 bool hci_discovery_active(struct hci_dev *hdev)
716 struct discovery_state *discov = &hdev->discovery;
718 switch (discov->state) {
719 case DISCOVERY_FINDING:
720 case DISCOVERY_RESOLVING:
721 return true;
723 default:
724 return false;
728 void hci_discovery_set_state(struct hci_dev *hdev, int state)
730 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
732 if (hdev->discovery.state == state)
733 return;
735 switch (state) {
736 case DISCOVERY_STOPPED:
737 if (hdev->discovery.state != DISCOVERY_STARTING)
738 mgmt_discovering(hdev, 0);
739 break;
740 case DISCOVERY_STARTING:
741 break;
742 case DISCOVERY_FINDING:
743 mgmt_discovering(hdev, 1);
744 break;
745 case DISCOVERY_RESOLVING:
746 break;
747 case DISCOVERY_STOPPING:
748 break;
751 hdev->discovery.state = state;
754 static void inquiry_cache_flush(struct hci_dev *hdev)
756 struct discovery_state *cache = &hdev->discovery;
757 struct inquiry_entry *p, *n;
759 list_for_each_entry_safe(p, n, &cache->all, all) {
760 list_del(&p->all);
761 kfree(p);
764 INIT_LIST_HEAD(&cache->unknown);
765 INIT_LIST_HEAD(&cache->resolve);
768 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
769 bdaddr_t *bdaddr)
771 struct discovery_state *cache = &hdev->discovery;
772 struct inquiry_entry *e;
774 BT_DBG("cache %p, %pMR", cache, bdaddr);
776 list_for_each_entry(e, &cache->all, all) {
777 if (!bacmp(&e->data.bdaddr, bdaddr))
778 return e;
781 return NULL;
784 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
785 bdaddr_t *bdaddr)
787 struct discovery_state *cache = &hdev->discovery;
788 struct inquiry_entry *e;
790 BT_DBG("cache %p, %pMR", cache, bdaddr);
792 list_for_each_entry(e, &cache->unknown, list) {
793 if (!bacmp(&e->data.bdaddr, bdaddr))
794 return e;
797 return NULL;
800 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
801 bdaddr_t *bdaddr,
802 int state)
804 struct discovery_state *cache = &hdev->discovery;
805 struct inquiry_entry *e;
807 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
809 list_for_each_entry(e, &cache->resolve, list) {
810 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
811 return e;
812 if (!bacmp(&e->data.bdaddr, bdaddr))
813 return e;
816 return NULL;
819 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
820 struct inquiry_entry *ie)
822 struct discovery_state *cache = &hdev->discovery;
823 struct list_head *pos = &cache->resolve;
824 struct inquiry_entry *p;
826 list_del(&ie->list);
828 list_for_each_entry(p, &cache->resolve, list) {
829 if (p->name_state != NAME_PENDING &&
830 abs(p->data.rssi) >= abs(ie->data.rssi))
831 break;
832 pos = &p->list;
835 list_add(&ie->list, pos);
838 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
839 bool name_known, bool *ssp)
841 struct discovery_state *cache = &hdev->discovery;
842 struct inquiry_entry *ie;
844 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
846 hci_remove_remote_oob_data(hdev, &data->bdaddr);
848 if (ssp)
849 *ssp = data->ssp_mode;
851 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
852 if (ie) {
853 if (ie->data.ssp_mode && ssp)
854 *ssp = true;
856 if (ie->name_state == NAME_NEEDED &&
857 data->rssi != ie->data.rssi) {
858 ie->data.rssi = data->rssi;
859 hci_inquiry_cache_update_resolve(hdev, ie);
862 goto update;
865 /* Entry not in the cache. Add new one. */
866 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
867 if (!ie)
868 return false;
870 list_add(&ie->all, &cache->all);
872 if (name_known) {
873 ie->name_state = NAME_KNOWN;
874 } else {
875 ie->name_state = NAME_NOT_KNOWN;
876 list_add(&ie->list, &cache->unknown);
879 update:
880 if (name_known && ie->name_state != NAME_KNOWN &&
881 ie->name_state != NAME_PENDING) {
882 ie->name_state = NAME_KNOWN;
883 list_del(&ie->list);
886 memcpy(&ie->data, data, sizeof(*data));
887 ie->timestamp = jiffies;
888 cache->timestamp = jiffies;
890 if (ie->name_state == NAME_NOT_KNOWN)
891 return false;
893 return true;
896 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
898 struct discovery_state *cache = &hdev->discovery;
899 struct inquiry_info *info = (struct inquiry_info *) buf;
900 struct inquiry_entry *e;
901 int copied = 0;
903 list_for_each_entry(e, &cache->all, all) {
904 struct inquiry_data *data = &e->data;
906 if (copied >= num)
907 break;
909 bacpy(&info->bdaddr, &data->bdaddr);
910 info->pscan_rep_mode = data->pscan_rep_mode;
911 info->pscan_period_mode = data->pscan_period_mode;
912 info->pscan_mode = data->pscan_mode;
913 memcpy(info->dev_class, data->dev_class, 3);
914 info->clock_offset = data->clock_offset;
916 info++;
917 copied++;
920 BT_DBG("cache %p, copied %d", cache, copied);
921 return copied;
924 static void hci_inq_req(struct hci_request *req, unsigned long opt)
926 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
927 struct hci_dev *hdev = req->hdev;
928 struct hci_cp_inquiry cp;
930 BT_DBG("%s", hdev->name);
932 if (test_bit(HCI_INQUIRY, &hdev->flags))
933 return;
935 /* Start Inquiry */
936 memcpy(&cp.lap, &ir->lap, 3);
937 cp.length = ir->length;
938 cp.num_rsp = ir->num_rsp;
939 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
942 static int wait_inquiry(void *word)
944 schedule();
945 return signal_pending(current);
948 int hci_inquiry(void __user *arg)
950 __u8 __user *ptr = arg;
951 struct hci_inquiry_req ir;
952 struct hci_dev *hdev;
953 int err = 0, do_inquiry = 0, max_rsp;
954 long timeo;
955 __u8 *buf;
957 if (copy_from_user(&ir, ptr, sizeof(ir)))
958 return -EFAULT;
960 hdev = hci_dev_get(ir.dev_id);
961 if (!hdev)
962 return -ENODEV;
964 hci_dev_lock(hdev);
965 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
966 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
967 inquiry_cache_flush(hdev);
968 do_inquiry = 1;
970 hci_dev_unlock(hdev);
972 timeo = ir.length * msecs_to_jiffies(2000);
974 if (do_inquiry) {
975 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
976 timeo);
977 if (err < 0)
978 goto done;
980 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
981 * cleared). If it is interrupted by a signal, return -EINTR.
983 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
984 TASK_INTERRUPTIBLE))
985 return -EINTR;
988 /* for unlimited number of responses we will use buffer with
989 * 255 entries
991 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
993 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
994 * copy it to the user space.
996 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
997 if (!buf) {
998 err = -ENOMEM;
999 goto done;
1002 hci_dev_lock(hdev);
1003 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1004 hci_dev_unlock(hdev);
1006 BT_DBG("num_rsp %d", ir.num_rsp);
1008 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1009 ptr += sizeof(ir);
1010 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1011 ir.num_rsp))
1012 err = -EFAULT;
1013 } else
1014 err = -EFAULT;
1016 kfree(buf);
1018 done:
1019 hci_dev_put(hdev);
1020 return err;
1023 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1025 u8 ad_len = 0, flags = 0;
1026 size_t name_len;
1028 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1029 flags |= LE_AD_GENERAL;
1031 if (!lmp_bredr_capable(hdev))
1032 flags |= LE_AD_NO_BREDR;
1034 if (lmp_le_br_capable(hdev))
1035 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1037 if (lmp_host_le_br_capable(hdev))
1038 flags |= LE_AD_SIM_LE_BREDR_HOST;
1040 if (flags) {
1041 BT_DBG("adv flags 0x%02x", flags);
1043 ptr[0] = 2;
1044 ptr[1] = EIR_FLAGS;
1045 ptr[2] = flags;
1047 ad_len += 3;
1048 ptr += 3;
1051 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1052 ptr[0] = 2;
1053 ptr[1] = EIR_TX_POWER;
1054 ptr[2] = (u8) hdev->adv_tx_power;
1056 ad_len += 3;
1057 ptr += 3;
1060 name_len = strlen(hdev->dev_name);
1061 if (name_len > 0) {
1062 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1064 if (name_len > max_len) {
1065 name_len = max_len;
1066 ptr[1] = EIR_NAME_SHORT;
1067 } else
1068 ptr[1] = EIR_NAME_COMPLETE;
1070 ptr[0] = name_len + 1;
1072 memcpy(ptr + 2, hdev->dev_name, name_len);
1074 ad_len += (name_len + 2);
1075 ptr += (name_len + 2);
1078 return ad_len;
1081 void hci_update_ad(struct hci_request *req)
1083 struct hci_dev *hdev = req->hdev;
1084 struct hci_cp_le_set_adv_data cp;
1085 u8 len;
1087 if (!lmp_le_capable(hdev))
1088 return;
1090 memset(&cp, 0, sizeof(cp));
1092 len = create_ad(hdev, cp.data);
1094 if (hdev->adv_data_len == len &&
1095 memcmp(cp.data, hdev->adv_data, len) == 0)
1096 return;
1098 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1099 hdev->adv_data_len = len;
1101 cp.length = len;
1103 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1106 /* ---- HCI ioctl helpers ---- */
1108 int hci_dev_open(__u16 dev)
1110 struct hci_dev *hdev;
1111 int ret = 0;
1113 hdev = hci_dev_get(dev);
1114 if (!hdev)
1115 return -ENODEV;
1117 BT_DBG("%s %p", hdev->name, hdev);
1119 hci_req_lock(hdev);
1121 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1122 ret = -ENODEV;
1123 goto done;
1126 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1127 ret = -ERFKILL;
1128 goto done;
1131 if (test_bit(HCI_UP, &hdev->flags)) {
1132 ret = -EALREADY;
1133 goto done;
1136 if (hdev->open(hdev)) {
1137 ret = -EIO;
1138 goto done;
1141 atomic_set(&hdev->cmd_cnt, 1);
1142 set_bit(HCI_INIT, &hdev->flags);
1144 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1145 ret = hdev->setup(hdev);
1147 if (!ret) {
1148 /* Treat all non BR/EDR controllers as raw devices if
1149 * enable_hs is not set.
1151 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1152 set_bit(HCI_RAW, &hdev->flags);
1154 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1155 set_bit(HCI_RAW, &hdev->flags);
1157 if (!test_bit(HCI_RAW, &hdev->flags))
1158 ret = __hci_init(hdev);
1161 clear_bit(HCI_INIT, &hdev->flags);
1163 if (!ret) {
1164 hci_dev_hold(hdev);
1165 set_bit(HCI_UP, &hdev->flags);
1166 hci_notify(hdev, HCI_DEV_UP);
1167 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1168 mgmt_valid_hdev(hdev)) {
1169 hci_dev_lock(hdev);
1170 mgmt_powered(hdev, 1);
1171 hci_dev_unlock(hdev);
1173 } else {
1174 /* Init failed, cleanup */
1175 flush_work(&hdev->tx_work);
1176 flush_work(&hdev->cmd_work);
1177 flush_work(&hdev->rx_work);
1179 skb_queue_purge(&hdev->cmd_q);
1180 skb_queue_purge(&hdev->rx_q);
1182 if (hdev->flush)
1183 hdev->flush(hdev);
1185 if (hdev->sent_cmd) {
1186 kfree_skb(hdev->sent_cmd);
1187 hdev->sent_cmd = NULL;
1190 hdev->close(hdev);
1191 hdev->flags = 0;
1194 done:
1195 hci_req_unlock(hdev);
1196 hci_dev_put(hdev);
1197 return ret;
1200 static int hci_dev_do_close(struct hci_dev *hdev)
1202 BT_DBG("%s %p", hdev->name, hdev);
1204 cancel_work_sync(&hdev->le_scan);
1206 cancel_delayed_work(&hdev->power_off);
1208 hci_req_cancel(hdev, ENODEV);
1209 hci_req_lock(hdev);
1211 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1212 del_timer_sync(&hdev->cmd_timer);
1213 hci_req_unlock(hdev);
1214 return 0;
1217 /* Flush RX and TX works */
1218 flush_work(&hdev->tx_work);
1219 flush_work(&hdev->rx_work);
1221 if (hdev->discov_timeout > 0) {
1222 cancel_delayed_work(&hdev->discov_off);
1223 hdev->discov_timeout = 0;
1224 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1227 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1228 cancel_delayed_work(&hdev->service_cache);
1230 cancel_delayed_work_sync(&hdev->le_scan_disable);
1232 hci_dev_lock(hdev);
1233 inquiry_cache_flush(hdev);
1234 hci_conn_hash_flush(hdev);
1235 hci_dev_unlock(hdev);
1237 hci_notify(hdev, HCI_DEV_DOWN);
1239 if (hdev->flush)
1240 hdev->flush(hdev);
1242 /* Reset device */
1243 skb_queue_purge(&hdev->cmd_q);
1244 atomic_set(&hdev->cmd_cnt, 1);
1245 if (!test_bit(HCI_RAW, &hdev->flags) &&
1246 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1247 set_bit(HCI_INIT, &hdev->flags);
1248 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1249 clear_bit(HCI_INIT, &hdev->flags);
1252 /* flush cmd work */
1253 flush_work(&hdev->cmd_work);
1255 /* Drop queues */
1256 skb_queue_purge(&hdev->rx_q);
1257 skb_queue_purge(&hdev->cmd_q);
1258 skb_queue_purge(&hdev->raw_q);
1260 /* Drop last sent command */
1261 if (hdev->sent_cmd) {
1262 del_timer_sync(&hdev->cmd_timer);
1263 kfree_skb(hdev->sent_cmd);
1264 hdev->sent_cmd = NULL;
1267 kfree_skb(hdev->recv_evt);
1268 hdev->recv_evt = NULL;
1270 /* After this point our queues are empty
1271 * and no tasks are scheduled. */
1272 hdev->close(hdev);
1274 /* Clear flags */
1275 hdev->flags = 0;
1276 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1278 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1279 mgmt_valid_hdev(hdev)) {
1280 hci_dev_lock(hdev);
1281 mgmt_powered(hdev, 0);
1282 hci_dev_unlock(hdev);
1285 /* Controller radio is available but is currently powered down */
1286 hdev->amp_status = 0;
1288 memset(hdev->eir, 0, sizeof(hdev->eir));
1289 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1291 hci_req_unlock(hdev);
1293 hci_dev_put(hdev);
1294 return 0;
1297 int hci_dev_close(__u16 dev)
1299 struct hci_dev *hdev;
1300 int err;
1302 hdev = hci_dev_get(dev);
1303 if (!hdev)
1304 return -ENODEV;
1306 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1307 cancel_delayed_work(&hdev->power_off);
1309 err = hci_dev_do_close(hdev);
1311 hci_dev_put(hdev);
1312 return err;
1315 int hci_dev_reset(__u16 dev)
1317 struct hci_dev *hdev;
1318 int ret = 0;
1320 hdev = hci_dev_get(dev);
1321 if (!hdev)
1322 return -ENODEV;
1324 hci_req_lock(hdev);
1326 if (!test_bit(HCI_UP, &hdev->flags))
1327 goto done;
1329 /* Drop queues */
1330 skb_queue_purge(&hdev->rx_q);
1331 skb_queue_purge(&hdev->cmd_q);
1333 hci_dev_lock(hdev);
1334 inquiry_cache_flush(hdev);
1335 hci_conn_hash_flush(hdev);
1336 hci_dev_unlock(hdev);
1338 if (hdev->flush)
1339 hdev->flush(hdev);
1341 atomic_set(&hdev->cmd_cnt, 1);
1342 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1344 if (!test_bit(HCI_RAW, &hdev->flags))
1345 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1347 done:
1348 hci_req_unlock(hdev);
1349 hci_dev_put(hdev);
1350 return ret;
1353 int hci_dev_reset_stat(__u16 dev)
1355 struct hci_dev *hdev;
1356 int ret = 0;
1358 hdev = hci_dev_get(dev);
1359 if (!hdev)
1360 return -ENODEV;
1362 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1364 hci_dev_put(hdev);
1366 return ret;
1369 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1371 struct hci_dev *hdev;
1372 struct hci_dev_req dr;
1373 int err = 0;
1375 if (copy_from_user(&dr, arg, sizeof(dr)))
1376 return -EFAULT;
1378 hdev = hci_dev_get(dr.dev_id);
1379 if (!hdev)
1380 return -ENODEV;
1382 switch (cmd) {
1383 case HCISETAUTH:
1384 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1385 HCI_INIT_TIMEOUT);
1386 break;
1388 case HCISETENCRYPT:
1389 if (!lmp_encrypt_capable(hdev)) {
1390 err = -EOPNOTSUPP;
1391 break;
1394 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1395 /* Auth must be enabled first */
1396 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1397 HCI_INIT_TIMEOUT);
1398 if (err)
1399 break;
1402 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1403 HCI_INIT_TIMEOUT);
1404 break;
1406 case HCISETSCAN:
1407 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1408 HCI_INIT_TIMEOUT);
1409 break;
1411 case HCISETLINKPOL:
1412 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1413 HCI_INIT_TIMEOUT);
1414 break;
1416 case HCISETLINKMODE:
1417 hdev->link_mode = ((__u16) dr.dev_opt) &
1418 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1419 break;
1421 case HCISETPTYPE:
1422 hdev->pkt_type = (__u16) dr.dev_opt;
1423 break;
1425 case HCISETACLMTU:
1426 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1427 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1428 break;
1430 case HCISETSCOMTU:
1431 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1432 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1433 break;
1435 default:
1436 err = -EINVAL;
1437 break;
1440 hci_dev_put(hdev);
1441 return err;
1444 int hci_get_dev_list(void __user *arg)
1446 struct hci_dev *hdev;
1447 struct hci_dev_list_req *dl;
1448 struct hci_dev_req *dr;
1449 int n = 0, size, err;
1450 __u16 dev_num;
1452 if (get_user(dev_num, (__u16 __user *) arg))
1453 return -EFAULT;
1455 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1456 return -EINVAL;
1458 size = sizeof(*dl) + dev_num * sizeof(*dr);
1460 dl = kzalloc(size, GFP_KERNEL);
1461 if (!dl)
1462 return -ENOMEM;
1464 dr = dl->dev_req;
1466 read_lock(&hci_dev_list_lock);
1467 list_for_each_entry(hdev, &hci_dev_list, list) {
1468 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1469 cancel_delayed_work(&hdev->power_off);
1471 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1472 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1474 (dr + n)->dev_id = hdev->id;
1475 (dr + n)->dev_opt = hdev->flags;
1477 if (++n >= dev_num)
1478 break;
1480 read_unlock(&hci_dev_list_lock);
1482 dl->dev_num = n;
1483 size = sizeof(*dl) + n * sizeof(*dr);
1485 err = copy_to_user(arg, dl, size);
1486 kfree(dl);
1488 return err ? -EFAULT : 0;
1491 int hci_get_dev_info(void __user *arg)
1493 struct hci_dev *hdev;
1494 struct hci_dev_info di;
1495 int err = 0;
1497 if (copy_from_user(&di, arg, sizeof(di)))
1498 return -EFAULT;
1500 hdev = hci_dev_get(di.dev_id);
1501 if (!hdev)
1502 return -ENODEV;
1504 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1505 cancel_delayed_work_sync(&hdev->power_off);
1507 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1508 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1510 strcpy(di.name, hdev->name);
1511 di.bdaddr = hdev->bdaddr;
1512 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1513 di.flags = hdev->flags;
1514 di.pkt_type = hdev->pkt_type;
1515 if (lmp_bredr_capable(hdev)) {
1516 di.acl_mtu = hdev->acl_mtu;
1517 di.acl_pkts = hdev->acl_pkts;
1518 di.sco_mtu = hdev->sco_mtu;
1519 di.sco_pkts = hdev->sco_pkts;
1520 } else {
1521 di.acl_mtu = hdev->le_mtu;
1522 di.acl_pkts = hdev->le_pkts;
1523 di.sco_mtu = 0;
1524 di.sco_pkts = 0;
1526 di.link_policy = hdev->link_policy;
1527 di.link_mode = hdev->link_mode;
1529 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1530 memcpy(&di.features, &hdev->features, sizeof(di.features));
1532 if (copy_to_user(arg, &di, sizeof(di)))
1533 err = -EFAULT;
1535 hci_dev_put(hdev);
1537 return err;
1540 /* ---- Interface to HCI drivers ---- */
1542 static int hci_rfkill_set_block(void *data, bool blocked)
1544 struct hci_dev *hdev = data;
1546 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1548 if (!blocked)
1549 return 0;
1551 hci_dev_do_close(hdev);
1553 return 0;
1556 static const struct rfkill_ops hci_rfkill_ops = {
1557 .set_block = hci_rfkill_set_block,
1560 static void hci_power_on(struct work_struct *work)
1562 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1563 int err;
1565 BT_DBG("%s", hdev->name);
1567 err = hci_dev_open(hdev->id);
1568 if (err < 0) {
1569 mgmt_set_powered_failed(hdev, err);
1570 return;
1573 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1574 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1575 HCI_AUTO_OFF_TIMEOUT);
1577 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1578 mgmt_index_added(hdev);
1581 static void hci_power_off(struct work_struct *work)
1583 struct hci_dev *hdev = container_of(work, struct hci_dev,
1584 power_off.work);
1586 BT_DBG("%s", hdev->name);
1588 hci_dev_do_close(hdev);
1591 static void hci_discov_off(struct work_struct *work)
1593 struct hci_dev *hdev;
1594 u8 scan = SCAN_PAGE;
1596 hdev = container_of(work, struct hci_dev, discov_off.work);
1598 BT_DBG("%s", hdev->name);
1600 hci_dev_lock(hdev);
1602 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1604 hdev->discov_timeout = 0;
1606 hci_dev_unlock(hdev);
1609 int hci_uuids_clear(struct hci_dev *hdev)
1611 struct bt_uuid *uuid, *tmp;
1613 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1614 list_del(&uuid->list);
1615 kfree(uuid);
1618 return 0;
1621 int hci_link_keys_clear(struct hci_dev *hdev)
1623 struct list_head *p, *n;
1625 list_for_each_safe(p, n, &hdev->link_keys) {
1626 struct link_key *key;
1628 key = list_entry(p, struct link_key, list);
1630 list_del(p);
1631 kfree(key);
1634 return 0;
1637 int hci_smp_ltks_clear(struct hci_dev *hdev)
1639 struct smp_ltk *k, *tmp;
1641 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1642 list_del(&k->list);
1643 kfree(k);
1646 return 0;
1649 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1651 struct link_key *k;
1653 list_for_each_entry(k, &hdev->link_keys, list)
1654 if (bacmp(bdaddr, &k->bdaddr) == 0)
1655 return k;
1657 return NULL;
1660 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1661 u8 key_type, u8 old_key_type)
1663 /* Legacy key */
1664 if (key_type < 0x03)
1665 return true;
1667 /* Debug keys are insecure so don't store them persistently */
1668 if (key_type == HCI_LK_DEBUG_COMBINATION)
1669 return false;
1671 /* Changed combination key and there's no previous one */
1672 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1673 return false;
1675 /* Security mode 3 case */
1676 if (!conn)
1677 return true;
1679 /* Neither local nor remote side had no-bonding as requirement */
1680 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1681 return true;
1683 /* Local side had dedicated bonding as requirement */
1684 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1685 return true;
1687 /* Remote side had dedicated bonding as requirement */
1688 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1689 return true;
1691 /* If none of the above criteria match, then don't store the key
1692 * persistently */
1693 return false;
1696 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1698 struct smp_ltk *k;
1700 list_for_each_entry(k, &hdev->long_term_keys, list) {
1701 if (k->ediv != ediv ||
1702 memcmp(rand, k->rand, sizeof(k->rand)))
1703 continue;
1705 return k;
1708 return NULL;
1711 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1712 u8 addr_type)
1714 struct smp_ltk *k;
1716 list_for_each_entry(k, &hdev->long_term_keys, list)
1717 if (addr_type == k->bdaddr_type &&
1718 bacmp(bdaddr, &k->bdaddr) == 0)
1719 return k;
1721 return NULL;
1724 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1725 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1727 struct link_key *key, *old_key;
1728 u8 old_key_type;
1729 bool persistent;
1731 old_key = hci_find_link_key(hdev, bdaddr);
1732 if (old_key) {
1733 old_key_type = old_key->type;
1734 key = old_key;
1735 } else {
1736 old_key_type = conn ? conn->key_type : 0xff;
1737 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1738 if (!key)
1739 return -ENOMEM;
1740 list_add(&key->list, &hdev->link_keys);
1743 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1745 /* Some buggy controller combinations generate a changed
1746 * combination key for legacy pairing even when there's no
1747 * previous key */
1748 if (type == HCI_LK_CHANGED_COMBINATION &&
1749 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1750 type = HCI_LK_COMBINATION;
1751 if (conn)
1752 conn->key_type = type;
1755 bacpy(&key->bdaddr, bdaddr);
1756 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1757 key->pin_len = pin_len;
1759 if (type == HCI_LK_CHANGED_COMBINATION)
1760 key->type = old_key_type;
1761 else
1762 key->type = type;
1764 if (!new_key)
1765 return 0;
1767 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1769 mgmt_new_link_key(hdev, key, persistent);
1771 if (conn)
1772 conn->flush_key = !persistent;
1774 return 0;
1777 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1778 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1779 ediv, u8 rand[8])
1781 struct smp_ltk *key, *old_key;
1783 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1784 return 0;
1786 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1787 if (old_key)
1788 key = old_key;
1789 else {
1790 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1791 if (!key)
1792 return -ENOMEM;
1793 list_add(&key->list, &hdev->long_term_keys);
1796 bacpy(&key->bdaddr, bdaddr);
1797 key->bdaddr_type = addr_type;
1798 memcpy(key->val, tk, sizeof(key->val));
1799 key->authenticated = authenticated;
1800 key->ediv = ediv;
1801 key->enc_size = enc_size;
1802 key->type = type;
1803 memcpy(key->rand, rand, sizeof(key->rand));
1805 if (!new_key)
1806 return 0;
1808 if (type & HCI_SMP_LTK)
1809 mgmt_new_ltk(hdev, key, 1);
1811 return 0;
1814 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1816 struct link_key *key;
1818 key = hci_find_link_key(hdev, bdaddr);
1819 if (!key)
1820 return -ENOENT;
1822 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1824 list_del(&key->list);
1825 kfree(key);
1827 return 0;
1830 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1832 struct smp_ltk *k, *tmp;
1834 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1835 if (bacmp(bdaddr, &k->bdaddr))
1836 continue;
1838 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1840 list_del(&k->list);
1841 kfree(k);
1844 return 0;
1847 /* HCI command timer function */
1848 static void hci_cmd_timeout(unsigned long arg)
1850 struct hci_dev *hdev = (void *) arg;
1852 if (hdev->sent_cmd) {
1853 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1854 u16 opcode = __le16_to_cpu(sent->opcode);
1856 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1857 } else {
1858 BT_ERR("%s command tx timeout", hdev->name);
1861 atomic_set(&hdev->cmd_cnt, 1);
1862 queue_work(hdev->workqueue, &hdev->cmd_work);
1865 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1866 bdaddr_t *bdaddr)
1868 struct oob_data *data;
1870 list_for_each_entry(data, &hdev->remote_oob_data, list)
1871 if (bacmp(bdaddr, &data->bdaddr) == 0)
1872 return data;
1874 return NULL;
1877 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1879 struct oob_data *data;
1881 data = hci_find_remote_oob_data(hdev, bdaddr);
1882 if (!data)
1883 return -ENOENT;
1885 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1887 list_del(&data->list);
1888 kfree(data);
1890 return 0;
1893 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1895 struct oob_data *data, *n;
1897 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1898 list_del(&data->list);
1899 kfree(data);
1902 return 0;
1905 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1906 u8 *randomizer)
1908 struct oob_data *data;
1910 data = hci_find_remote_oob_data(hdev, bdaddr);
1912 if (!data) {
1913 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1914 if (!data)
1915 return -ENOMEM;
1917 bacpy(&data->bdaddr, bdaddr);
1918 list_add(&data->list, &hdev->remote_oob_data);
1921 memcpy(data->hash, hash, sizeof(data->hash));
1922 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1924 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1926 return 0;
1929 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1931 struct bdaddr_list *b;
1933 list_for_each_entry(b, &hdev->blacklist, list)
1934 if (bacmp(bdaddr, &b->bdaddr) == 0)
1935 return b;
1937 return NULL;
1940 int hci_blacklist_clear(struct hci_dev *hdev)
1942 struct list_head *p, *n;
1944 list_for_each_safe(p, n, &hdev->blacklist) {
1945 struct bdaddr_list *b;
1947 b = list_entry(p, struct bdaddr_list, list);
1949 list_del(p);
1950 kfree(b);
1953 return 0;
1956 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1958 struct bdaddr_list *entry;
1960 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1961 return -EBADF;
1963 if (hci_blacklist_lookup(hdev, bdaddr))
1964 return -EEXIST;
1966 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1967 if (!entry)
1968 return -ENOMEM;
1970 bacpy(&entry->bdaddr, bdaddr);
1972 list_add(&entry->list, &hdev->blacklist);
1974 return mgmt_device_blocked(hdev, bdaddr, type);
1977 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1979 struct bdaddr_list *entry;
1981 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1982 return hci_blacklist_clear(hdev);
1984 entry = hci_blacklist_lookup(hdev, bdaddr);
1985 if (!entry)
1986 return -ENOENT;
1988 list_del(&entry->list);
1989 kfree(entry);
1991 return mgmt_device_unblocked(hdev, bdaddr, type);
1994 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1996 struct le_scan_params *param = (struct le_scan_params *) opt;
1997 struct hci_cp_le_set_scan_param cp;
1999 memset(&cp, 0, sizeof(cp));
2000 cp.type = param->type;
2001 cp.interval = cpu_to_le16(param->interval);
2002 cp.window = cpu_to_le16(param->window);
2004 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
2007 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
2009 struct hci_cp_le_set_scan_enable cp;
2011 memset(&cp, 0, sizeof(cp));
2012 cp.enable = LE_SCAN_ENABLE;
2013 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2015 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2018 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
2019 u16 window, int timeout)
2021 long timeo = msecs_to_jiffies(3000);
2022 struct le_scan_params param;
2023 int err;
2025 BT_DBG("%s", hdev->name);
2027 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2028 return -EINPROGRESS;
2030 param.type = type;
2031 param.interval = interval;
2032 param.window = window;
2034 hci_req_lock(hdev);
2036 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2037 timeo);
2038 if (!err)
2039 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2041 hci_req_unlock(hdev);
2043 if (err < 0)
2044 return err;
2046 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2047 timeout);
2049 return 0;
2052 int hci_cancel_le_scan(struct hci_dev *hdev)
2054 BT_DBG("%s", hdev->name);
2056 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2057 return -EALREADY;
2059 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2060 struct hci_cp_le_set_scan_enable cp;
2062 /* Send HCI command to disable LE Scan */
2063 memset(&cp, 0, sizeof(cp));
2064 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2067 return 0;
2070 static void le_scan_disable_work(struct work_struct *work)
2072 struct hci_dev *hdev = container_of(work, struct hci_dev,
2073 le_scan_disable.work);
2074 struct hci_cp_le_set_scan_enable cp;
2076 BT_DBG("%s", hdev->name);
2078 memset(&cp, 0, sizeof(cp));
2080 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2083 static void le_scan_work(struct work_struct *work)
2085 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2086 struct le_scan_params *param = &hdev->le_scan_params;
2088 BT_DBG("%s", hdev->name);
2090 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2091 param->timeout);
2094 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2095 int timeout)
2097 struct le_scan_params *param = &hdev->le_scan_params;
2099 BT_DBG("%s", hdev->name);
2101 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2102 return -ENOTSUPP;
2104 if (work_busy(&hdev->le_scan))
2105 return -EINPROGRESS;
2107 param->type = type;
2108 param->interval = interval;
2109 param->window = window;
2110 param->timeout = timeout;
2112 queue_work(system_long_wq, &hdev->le_scan);
2114 return 0;
2117 /* Alloc HCI device */
2118 struct hci_dev *hci_alloc_dev(void)
2120 struct hci_dev *hdev;
2122 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2123 if (!hdev)
2124 return NULL;
2126 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2127 hdev->esco_type = (ESCO_HV1);
2128 hdev->link_mode = (HCI_LM_ACCEPT);
2129 hdev->io_capability = 0x03; /* No Input No Output */
2130 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2131 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2133 hdev->sniff_max_interval = 800;
2134 hdev->sniff_min_interval = 80;
2136 mutex_init(&hdev->lock);
2137 mutex_init(&hdev->req_lock);
2139 INIT_LIST_HEAD(&hdev->mgmt_pending);
2140 INIT_LIST_HEAD(&hdev->blacklist);
2141 INIT_LIST_HEAD(&hdev->uuids);
2142 INIT_LIST_HEAD(&hdev->link_keys);
2143 INIT_LIST_HEAD(&hdev->long_term_keys);
2144 INIT_LIST_HEAD(&hdev->remote_oob_data);
2145 INIT_LIST_HEAD(&hdev->conn_hash.list);
2147 INIT_WORK(&hdev->rx_work, hci_rx_work);
2148 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2149 INIT_WORK(&hdev->tx_work, hci_tx_work);
2150 INIT_WORK(&hdev->power_on, hci_power_on);
2151 INIT_WORK(&hdev->le_scan, le_scan_work);
2153 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2154 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2155 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2157 skb_queue_head_init(&hdev->rx_q);
2158 skb_queue_head_init(&hdev->cmd_q);
2159 skb_queue_head_init(&hdev->raw_q);
2161 init_waitqueue_head(&hdev->req_wait_q);
2163 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2165 hci_init_sysfs(hdev);
2166 discovery_init(hdev);
2168 return hdev;
2170 EXPORT_SYMBOL(hci_alloc_dev);
2172 /* Free HCI device */
2173 void hci_free_dev(struct hci_dev *hdev)
2175 /* will free via device release */
2176 put_device(&hdev->dev);
2178 EXPORT_SYMBOL(hci_free_dev);
2180 /* Register HCI device */
2181 int hci_register_dev(struct hci_dev *hdev)
2183 int id, error;
2185 if (!hdev->open || !hdev->close)
2186 return -EINVAL;
2188 /* Do not allow HCI_AMP devices to register at index 0,
2189 * so the index can be used as the AMP controller ID.
2191 switch (hdev->dev_type) {
2192 case HCI_BREDR:
2193 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2194 break;
2195 case HCI_AMP:
2196 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2197 break;
2198 default:
2199 return -EINVAL;
2202 if (id < 0)
2203 return id;
2205 sprintf(hdev->name, "hci%d", id);
2206 hdev->id = id;
2208 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2210 write_lock(&hci_dev_list_lock);
2211 list_add(&hdev->list, &hci_dev_list);
2212 write_unlock(&hci_dev_list_lock);
2214 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2215 WQ_MEM_RECLAIM, 1, hdev->name);
2216 if (!hdev->workqueue) {
2217 error = -ENOMEM;
2218 goto err;
2221 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2222 WQ_MEM_RECLAIM, 1, hdev->name);
2223 if (!hdev->req_workqueue) {
2224 destroy_workqueue(hdev->workqueue);
2225 error = -ENOMEM;
2226 goto err;
2229 error = hci_add_sysfs(hdev);
2230 if (error < 0)
2231 goto err_wqueue;
2233 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2234 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2235 hdev);
2236 if (hdev->rfkill) {
2237 if (rfkill_register(hdev->rfkill) < 0) {
2238 rfkill_destroy(hdev->rfkill);
2239 hdev->rfkill = NULL;
2243 set_bit(HCI_SETUP, &hdev->dev_flags);
2245 if (hdev->dev_type != HCI_AMP)
2246 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2248 hci_notify(hdev, HCI_DEV_REG);
2249 hci_dev_hold(hdev);
2251 queue_work(hdev->req_workqueue, &hdev->power_on);
2253 return id;
2255 err_wqueue:
2256 destroy_workqueue(hdev->workqueue);
2257 destroy_workqueue(hdev->req_workqueue);
2258 err:
2259 ida_simple_remove(&hci_index_ida, hdev->id);
2260 write_lock(&hci_dev_list_lock);
2261 list_del(&hdev->list);
2262 write_unlock(&hci_dev_list_lock);
2264 return error;
2266 EXPORT_SYMBOL(hci_register_dev);
2268 /* Unregister HCI device */
2269 void hci_unregister_dev(struct hci_dev *hdev)
2271 int i, id;
2273 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2275 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2277 id = hdev->id;
2279 write_lock(&hci_dev_list_lock);
2280 list_del(&hdev->list);
2281 write_unlock(&hci_dev_list_lock);
2283 hci_dev_do_close(hdev);
2285 for (i = 0; i < NUM_REASSEMBLY; i++)
2286 kfree_skb(hdev->reassembly[i]);
2288 cancel_work_sync(&hdev->power_on);
2290 if (!test_bit(HCI_INIT, &hdev->flags) &&
2291 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2292 hci_dev_lock(hdev);
2293 mgmt_index_removed(hdev);
2294 hci_dev_unlock(hdev);
2297 /* mgmt_index_removed should take care of emptying the
2298 * pending list */
2299 BUG_ON(!list_empty(&hdev->mgmt_pending));
2301 hci_notify(hdev, HCI_DEV_UNREG);
2303 if (hdev->rfkill) {
2304 rfkill_unregister(hdev->rfkill);
2305 rfkill_destroy(hdev->rfkill);
2308 hci_del_sysfs(hdev);
2310 destroy_workqueue(hdev->workqueue);
2311 destroy_workqueue(hdev->req_workqueue);
2313 hci_dev_lock(hdev);
2314 hci_blacklist_clear(hdev);
2315 hci_uuids_clear(hdev);
2316 hci_link_keys_clear(hdev);
2317 hci_smp_ltks_clear(hdev);
2318 hci_remote_oob_data_clear(hdev);
2319 hci_dev_unlock(hdev);
2321 hci_dev_put(hdev);
2323 ida_simple_remove(&hci_index_ida, id);
2325 EXPORT_SYMBOL(hci_unregister_dev);
2327 /* Suspend HCI device */
2328 int hci_suspend_dev(struct hci_dev *hdev)
2330 hci_notify(hdev, HCI_DEV_SUSPEND);
2331 return 0;
2333 EXPORT_SYMBOL(hci_suspend_dev);
2335 /* Resume HCI device */
2336 int hci_resume_dev(struct hci_dev *hdev)
2338 hci_notify(hdev, HCI_DEV_RESUME);
2339 return 0;
2341 EXPORT_SYMBOL(hci_resume_dev);
2343 /* Receive frame from HCI drivers */
2344 int hci_recv_frame(struct sk_buff *skb)
2346 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2347 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2348 && !test_bit(HCI_INIT, &hdev->flags))) {
2349 kfree_skb(skb);
2350 return -ENXIO;
2353 /* Incoming skb */
2354 bt_cb(skb)->incoming = 1;
2356 /* Time stamp */
2357 __net_timestamp(skb);
2359 skb_queue_tail(&hdev->rx_q, skb);
2360 queue_work(hdev->workqueue, &hdev->rx_work);
2362 return 0;
2364 EXPORT_SYMBOL(hci_recv_frame);
2366 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2367 int count, __u8 index)
2369 int len = 0;
2370 int hlen = 0;
2371 int remain = count;
2372 struct sk_buff *skb;
2373 struct bt_skb_cb *scb;
2375 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2376 index >= NUM_REASSEMBLY)
2377 return -EILSEQ;
2379 skb = hdev->reassembly[index];
2381 if (!skb) {
2382 switch (type) {
2383 case HCI_ACLDATA_PKT:
2384 len = HCI_MAX_FRAME_SIZE;
2385 hlen = HCI_ACL_HDR_SIZE;
2386 break;
2387 case HCI_EVENT_PKT:
2388 len = HCI_MAX_EVENT_SIZE;
2389 hlen = HCI_EVENT_HDR_SIZE;
2390 break;
2391 case HCI_SCODATA_PKT:
2392 len = HCI_MAX_SCO_SIZE;
2393 hlen = HCI_SCO_HDR_SIZE;
2394 break;
2397 skb = bt_skb_alloc(len, GFP_ATOMIC);
2398 if (!skb)
2399 return -ENOMEM;
2401 scb = (void *) skb->cb;
2402 scb->expect = hlen;
2403 scb->pkt_type = type;
2405 skb->dev = (void *) hdev;
2406 hdev->reassembly[index] = skb;
2409 while (count) {
2410 scb = (void *) skb->cb;
2411 len = min_t(uint, scb->expect, count);
2413 memcpy(skb_put(skb, len), data, len);
2415 count -= len;
2416 data += len;
2417 scb->expect -= len;
2418 remain = count;
2420 switch (type) {
2421 case HCI_EVENT_PKT:
2422 if (skb->len == HCI_EVENT_HDR_SIZE) {
2423 struct hci_event_hdr *h = hci_event_hdr(skb);
2424 scb->expect = h->plen;
2426 if (skb_tailroom(skb) < scb->expect) {
2427 kfree_skb(skb);
2428 hdev->reassembly[index] = NULL;
2429 return -ENOMEM;
2432 break;
2434 case HCI_ACLDATA_PKT:
2435 if (skb->len == HCI_ACL_HDR_SIZE) {
2436 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2437 scb->expect = __le16_to_cpu(h->dlen);
2439 if (skb_tailroom(skb) < scb->expect) {
2440 kfree_skb(skb);
2441 hdev->reassembly[index] = NULL;
2442 return -ENOMEM;
2445 break;
2447 case HCI_SCODATA_PKT:
2448 if (skb->len == HCI_SCO_HDR_SIZE) {
2449 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2450 scb->expect = h->dlen;
2452 if (skb_tailroom(skb) < scb->expect) {
2453 kfree_skb(skb);
2454 hdev->reassembly[index] = NULL;
2455 return -ENOMEM;
2458 break;
2461 if (scb->expect == 0) {
2462 /* Complete frame */
2464 bt_cb(skb)->pkt_type = type;
2465 hci_recv_frame(skb);
2467 hdev->reassembly[index] = NULL;
2468 return remain;
2472 return remain;
2475 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2477 int rem = 0;
2479 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2480 return -EILSEQ;
2482 while (count) {
2483 rem = hci_reassembly(hdev, type, data, count, type - 1);
2484 if (rem < 0)
2485 return rem;
2487 data += (count - rem);
2488 count = rem;
2491 return rem;
2493 EXPORT_SYMBOL(hci_recv_fragment);
2495 #define STREAM_REASSEMBLY 0
2497 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2499 int type;
2500 int rem = 0;
2502 while (count) {
2503 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2505 if (!skb) {
2506 struct { char type; } *pkt;
2508 /* Start of the frame */
2509 pkt = data;
2510 type = pkt->type;
2512 data++;
2513 count--;
2514 } else
2515 type = bt_cb(skb)->pkt_type;
2517 rem = hci_reassembly(hdev, type, data, count,
2518 STREAM_REASSEMBLY);
2519 if (rem < 0)
2520 return rem;
2522 data += (count - rem);
2523 count = rem;
2526 return rem;
2528 EXPORT_SYMBOL(hci_recv_stream_fragment);
2530 /* ---- Interface to upper protocols ---- */
2532 int hci_register_cb(struct hci_cb *cb)
2534 BT_DBG("%p name %s", cb, cb->name);
2536 write_lock(&hci_cb_list_lock);
2537 list_add(&cb->list, &hci_cb_list);
2538 write_unlock(&hci_cb_list_lock);
2540 return 0;
2542 EXPORT_SYMBOL(hci_register_cb);
2544 int hci_unregister_cb(struct hci_cb *cb)
2546 BT_DBG("%p name %s", cb, cb->name);
2548 write_lock(&hci_cb_list_lock);
2549 list_del(&cb->list);
2550 write_unlock(&hci_cb_list_lock);
2552 return 0;
2554 EXPORT_SYMBOL(hci_unregister_cb);
2556 static int hci_send_frame(struct sk_buff *skb)
2558 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2560 if (!hdev) {
2561 kfree_skb(skb);
2562 return -ENODEV;
2565 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2567 /* Time stamp */
2568 __net_timestamp(skb);
2570 /* Send copy to monitor */
2571 hci_send_to_monitor(hdev, skb);
2573 if (atomic_read(&hdev->promisc)) {
2574 /* Send copy to the sockets */
2575 hci_send_to_sock(hdev, skb);
2578 /* Get rid of skb owner, prior to sending to the driver. */
2579 skb_orphan(skb);
2581 return hdev->send(skb);
2584 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2586 skb_queue_head_init(&req->cmd_q);
2587 req->hdev = hdev;
2588 req->err = 0;
2591 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2593 struct hci_dev *hdev = req->hdev;
2594 struct sk_buff *skb;
2595 unsigned long flags;
2597 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2599 /* If an error occured during request building, remove all HCI
2600 * commands queued on the HCI request queue.
2602 if (req->err) {
2603 skb_queue_purge(&req->cmd_q);
2604 return req->err;
2607 /* Do not allow empty requests */
2608 if (skb_queue_empty(&req->cmd_q))
2609 return -ENODATA;
2611 skb = skb_peek_tail(&req->cmd_q);
2612 bt_cb(skb)->req.complete = complete;
2614 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2615 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2616 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2618 queue_work(hdev->workqueue, &hdev->cmd_work);
2620 return 0;
2623 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2624 u32 plen, const void *param)
2626 int len = HCI_COMMAND_HDR_SIZE + plen;
2627 struct hci_command_hdr *hdr;
2628 struct sk_buff *skb;
2630 skb = bt_skb_alloc(len, GFP_ATOMIC);
2631 if (!skb)
2632 return NULL;
2634 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2635 hdr->opcode = cpu_to_le16(opcode);
2636 hdr->plen = plen;
2638 if (plen)
2639 memcpy(skb_put(skb, plen), param, plen);
2641 BT_DBG("skb len %d", skb->len);
2643 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2644 skb->dev = (void *) hdev;
2646 return skb;
2649 /* Send HCI command */
2650 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2651 const void *param)
2653 struct sk_buff *skb;
2655 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2657 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2658 if (!skb) {
2659 BT_ERR("%s no memory for command", hdev->name);
2660 return -ENOMEM;
2663 /* Stand-alone HCI commands must be flaged as
2664 * single-command requests.
2666 bt_cb(skb)->req.start = true;
2668 skb_queue_tail(&hdev->cmd_q, skb);
2669 queue_work(hdev->workqueue, &hdev->cmd_work);
2671 return 0;
2674 /* Queue a command to an asynchronous HCI request */
2675 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2676 const void *param, u8 event)
2678 struct hci_dev *hdev = req->hdev;
2679 struct sk_buff *skb;
2681 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2683 /* If an error occured during request building, there is no point in
2684 * queueing the HCI command. We can simply return.
2686 if (req->err)
2687 return;
2689 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2690 if (!skb) {
2691 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2692 hdev->name, opcode);
2693 req->err = -ENOMEM;
2694 return;
2697 if (skb_queue_empty(&req->cmd_q))
2698 bt_cb(skb)->req.start = true;
2700 bt_cb(skb)->req.event = event;
2702 skb_queue_tail(&req->cmd_q, skb);
2705 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2706 const void *param)
2708 hci_req_add_ev(req, opcode, plen, param, 0);
2711 /* Get data from the previously sent command */
2712 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2714 struct hci_command_hdr *hdr;
2716 if (!hdev->sent_cmd)
2717 return NULL;
2719 hdr = (void *) hdev->sent_cmd->data;
2721 if (hdr->opcode != cpu_to_le16(opcode))
2722 return NULL;
2724 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2726 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2729 /* Send ACL data */
2730 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2732 struct hci_acl_hdr *hdr;
2733 int len = skb->len;
2735 skb_push(skb, HCI_ACL_HDR_SIZE);
2736 skb_reset_transport_header(skb);
2737 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2738 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2739 hdr->dlen = cpu_to_le16(len);
2742 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2743 struct sk_buff *skb, __u16 flags)
2745 struct hci_conn *conn = chan->conn;
2746 struct hci_dev *hdev = conn->hdev;
2747 struct sk_buff *list;
2749 skb->len = skb_headlen(skb);
2750 skb->data_len = 0;
2752 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2754 switch (hdev->dev_type) {
2755 case HCI_BREDR:
2756 hci_add_acl_hdr(skb, conn->handle, flags);
2757 break;
2758 case HCI_AMP:
2759 hci_add_acl_hdr(skb, chan->handle, flags);
2760 break;
2761 default:
2762 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2763 return;
2766 list = skb_shinfo(skb)->frag_list;
2767 if (!list) {
2768 /* Non fragmented */
2769 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2771 skb_queue_tail(queue, skb);
2772 } else {
2773 /* Fragmented */
2774 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2776 skb_shinfo(skb)->frag_list = NULL;
2778 /* Queue all fragments atomically */
2779 spin_lock(&queue->lock);
2781 __skb_queue_tail(queue, skb);
2783 flags &= ~ACL_START;
2784 flags |= ACL_CONT;
2785 do {
2786 skb = list; list = list->next;
2788 skb->dev = (void *) hdev;
2789 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2790 hci_add_acl_hdr(skb, conn->handle, flags);
2792 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2794 __skb_queue_tail(queue, skb);
2795 } while (list);
2797 spin_unlock(&queue->lock);
2801 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2803 struct hci_dev *hdev = chan->conn->hdev;
2805 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2807 skb->dev = (void *) hdev;
2809 hci_queue_acl(chan, &chan->data_q, skb, flags);
2811 queue_work(hdev->workqueue, &hdev->tx_work);
2814 /* Send SCO data */
2815 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2817 struct hci_dev *hdev = conn->hdev;
2818 struct hci_sco_hdr hdr;
2820 BT_DBG("%s len %d", hdev->name, skb->len);
2822 hdr.handle = cpu_to_le16(conn->handle);
2823 hdr.dlen = skb->len;
2825 skb_push(skb, HCI_SCO_HDR_SIZE);
2826 skb_reset_transport_header(skb);
2827 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2829 skb->dev = (void *) hdev;
2830 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2832 skb_queue_tail(&conn->data_q, skb);
2833 queue_work(hdev->workqueue, &hdev->tx_work);
2836 /* ---- HCI TX task (outgoing data) ---- */
2838 /* HCI Connection scheduler */
2839 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2840 int *quote)
2842 struct hci_conn_hash *h = &hdev->conn_hash;
2843 struct hci_conn *conn = NULL, *c;
2844 unsigned int num = 0, min = ~0;
2846 /* We don't have to lock device here. Connections are always
2847 * added and removed with TX task disabled. */
2849 rcu_read_lock();
2851 list_for_each_entry_rcu(c, &h->list, list) {
2852 if (c->type != type || skb_queue_empty(&c->data_q))
2853 continue;
2855 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2856 continue;
2858 num++;
2860 if (c->sent < min) {
2861 min = c->sent;
2862 conn = c;
2865 if (hci_conn_num(hdev, type) == num)
2866 break;
2869 rcu_read_unlock();
2871 if (conn) {
2872 int cnt, q;
2874 switch (conn->type) {
2875 case ACL_LINK:
2876 cnt = hdev->acl_cnt;
2877 break;
2878 case SCO_LINK:
2879 case ESCO_LINK:
2880 cnt = hdev->sco_cnt;
2881 break;
2882 case LE_LINK:
2883 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2884 break;
2885 default:
2886 cnt = 0;
2887 BT_ERR("Unknown link type");
2890 q = cnt / num;
2891 *quote = q ? q : 1;
2892 } else
2893 *quote = 0;
2895 BT_DBG("conn %p quote %d", conn, *quote);
2896 return conn;
2899 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2901 struct hci_conn_hash *h = &hdev->conn_hash;
2902 struct hci_conn *c;
2904 BT_ERR("%s link tx timeout", hdev->name);
2906 rcu_read_lock();
2908 /* Kill stalled connections */
2909 list_for_each_entry_rcu(c, &h->list, list) {
2910 if (c->type == type && c->sent) {
2911 BT_ERR("%s killing stalled connection %pMR",
2912 hdev->name, &c->dst);
2913 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2917 rcu_read_unlock();
2920 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2921 int *quote)
2923 struct hci_conn_hash *h = &hdev->conn_hash;
2924 struct hci_chan *chan = NULL;
2925 unsigned int num = 0, min = ~0, cur_prio = 0;
2926 struct hci_conn *conn;
2927 int cnt, q, conn_num = 0;
2929 BT_DBG("%s", hdev->name);
2931 rcu_read_lock();
2933 list_for_each_entry_rcu(conn, &h->list, list) {
2934 struct hci_chan *tmp;
2936 if (conn->type != type)
2937 continue;
2939 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2940 continue;
2942 conn_num++;
2944 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2945 struct sk_buff *skb;
2947 if (skb_queue_empty(&tmp->data_q))
2948 continue;
2950 skb = skb_peek(&tmp->data_q);
2951 if (skb->priority < cur_prio)
2952 continue;
2954 if (skb->priority > cur_prio) {
2955 num = 0;
2956 min = ~0;
2957 cur_prio = skb->priority;
2960 num++;
2962 if (conn->sent < min) {
2963 min = conn->sent;
2964 chan = tmp;
2968 if (hci_conn_num(hdev, type) == conn_num)
2969 break;
2972 rcu_read_unlock();
2974 if (!chan)
2975 return NULL;
2977 switch (chan->conn->type) {
2978 case ACL_LINK:
2979 cnt = hdev->acl_cnt;
2980 break;
2981 case AMP_LINK:
2982 cnt = hdev->block_cnt;
2983 break;
2984 case SCO_LINK:
2985 case ESCO_LINK:
2986 cnt = hdev->sco_cnt;
2987 break;
2988 case LE_LINK:
2989 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2990 break;
2991 default:
2992 cnt = 0;
2993 BT_ERR("Unknown link type");
2996 q = cnt / num;
2997 *quote = q ? q : 1;
2998 BT_DBG("chan %p quote %d", chan, *quote);
2999 return chan;
3002 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3004 struct hci_conn_hash *h = &hdev->conn_hash;
3005 struct hci_conn *conn;
3006 int num = 0;
3008 BT_DBG("%s", hdev->name);
3010 rcu_read_lock();
3012 list_for_each_entry_rcu(conn, &h->list, list) {
3013 struct hci_chan *chan;
3015 if (conn->type != type)
3016 continue;
3018 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3019 continue;
3021 num++;
3023 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3024 struct sk_buff *skb;
3026 if (chan->sent) {
3027 chan->sent = 0;
3028 continue;
3031 if (skb_queue_empty(&chan->data_q))
3032 continue;
3034 skb = skb_peek(&chan->data_q);
3035 if (skb->priority >= HCI_PRIO_MAX - 1)
3036 continue;
3038 skb->priority = HCI_PRIO_MAX - 1;
3040 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3041 skb->priority);
3044 if (hci_conn_num(hdev, type) == num)
3045 break;
3048 rcu_read_unlock();
3052 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3054 /* Calculate count of blocks used by this packet */
3055 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3058 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3060 if (!test_bit(HCI_RAW, &hdev->flags)) {
3061 /* ACL tx timeout must be longer than maximum
3062 * link supervision timeout (40.9 seconds) */
3063 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3064 HCI_ACL_TX_TIMEOUT))
3065 hci_link_tx_to(hdev, ACL_LINK);
3069 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3071 unsigned int cnt = hdev->acl_cnt;
3072 struct hci_chan *chan;
3073 struct sk_buff *skb;
3074 int quote;
3076 __check_timeout(hdev, cnt);
3078 while (hdev->acl_cnt &&
3079 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3080 u32 priority = (skb_peek(&chan->data_q))->priority;
3081 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3082 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3083 skb->len, skb->priority);
3085 /* Stop if priority has changed */
3086 if (skb->priority < priority)
3087 break;
3089 skb = skb_dequeue(&chan->data_q);
3091 hci_conn_enter_active_mode(chan->conn,
3092 bt_cb(skb)->force_active);
3094 hci_send_frame(skb);
3095 hdev->acl_last_tx = jiffies;
3097 hdev->acl_cnt--;
3098 chan->sent++;
3099 chan->conn->sent++;
3103 if (cnt != hdev->acl_cnt)
3104 hci_prio_recalculate(hdev, ACL_LINK);
3107 static void hci_sched_acl_blk(struct hci_dev *hdev)
3109 unsigned int cnt = hdev->block_cnt;
3110 struct hci_chan *chan;
3111 struct sk_buff *skb;
3112 int quote;
3113 u8 type;
3115 __check_timeout(hdev, cnt);
3117 BT_DBG("%s", hdev->name);
3119 if (hdev->dev_type == HCI_AMP)
3120 type = AMP_LINK;
3121 else
3122 type = ACL_LINK;
3124 while (hdev->block_cnt > 0 &&
3125 (chan = hci_chan_sent(hdev, type, &quote))) {
3126 u32 priority = (skb_peek(&chan->data_q))->priority;
3127 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3128 int blocks;
3130 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3131 skb->len, skb->priority);
3133 /* Stop if priority has changed */
3134 if (skb->priority < priority)
3135 break;
3137 skb = skb_dequeue(&chan->data_q);
3139 blocks = __get_blocks(hdev, skb);
3140 if (blocks > hdev->block_cnt)
3141 return;
3143 hci_conn_enter_active_mode(chan->conn,
3144 bt_cb(skb)->force_active);
3146 hci_send_frame(skb);
3147 hdev->acl_last_tx = jiffies;
3149 hdev->block_cnt -= blocks;
3150 quote -= blocks;
3152 chan->sent += blocks;
3153 chan->conn->sent += blocks;
3157 if (cnt != hdev->block_cnt)
3158 hci_prio_recalculate(hdev, type);
3161 static void hci_sched_acl(struct hci_dev *hdev)
3163 BT_DBG("%s", hdev->name);
3165 /* No ACL link over BR/EDR controller */
3166 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3167 return;
3169 /* No AMP link over AMP controller */
3170 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3171 return;
3173 switch (hdev->flow_ctl_mode) {
3174 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3175 hci_sched_acl_pkt(hdev);
3176 break;
3178 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3179 hci_sched_acl_blk(hdev);
3180 break;
3184 /* Schedule SCO */
3185 static void hci_sched_sco(struct hci_dev *hdev)
3187 struct hci_conn *conn;
3188 struct sk_buff *skb;
3189 int quote;
3191 BT_DBG("%s", hdev->name);
3193 if (!hci_conn_num(hdev, SCO_LINK))
3194 return;
3196 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3197 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3198 BT_DBG("skb %p len %d", skb, skb->len);
3199 hci_send_frame(skb);
3201 conn->sent++;
3202 if (conn->sent == ~0)
3203 conn->sent = 0;
3208 static void hci_sched_esco(struct hci_dev *hdev)
3210 struct hci_conn *conn;
3211 struct sk_buff *skb;
3212 int quote;
3214 BT_DBG("%s", hdev->name);
3216 if (!hci_conn_num(hdev, ESCO_LINK))
3217 return;
3219 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3220 &quote))) {
3221 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3222 BT_DBG("skb %p len %d", skb, skb->len);
3223 hci_send_frame(skb);
3225 conn->sent++;
3226 if (conn->sent == ~0)
3227 conn->sent = 0;
3232 static void hci_sched_le(struct hci_dev *hdev)
3234 struct hci_chan *chan;
3235 struct sk_buff *skb;
3236 int quote, cnt, tmp;
3238 BT_DBG("%s", hdev->name);
3240 if (!hci_conn_num(hdev, LE_LINK))
3241 return;
3243 if (!test_bit(HCI_RAW, &hdev->flags)) {
3244 /* LE tx timeout must be longer than maximum
3245 * link supervision timeout (40.9 seconds) */
3246 if (!hdev->le_cnt && hdev->le_pkts &&
3247 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3248 hci_link_tx_to(hdev, LE_LINK);
3251 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3252 tmp = cnt;
3253 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3254 u32 priority = (skb_peek(&chan->data_q))->priority;
3255 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3256 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3257 skb->len, skb->priority);
3259 /* Stop if priority has changed */
3260 if (skb->priority < priority)
3261 break;
3263 skb = skb_dequeue(&chan->data_q);
3265 hci_send_frame(skb);
3266 hdev->le_last_tx = jiffies;
3268 cnt--;
3269 chan->sent++;
3270 chan->conn->sent++;
3274 if (hdev->le_pkts)
3275 hdev->le_cnt = cnt;
3276 else
3277 hdev->acl_cnt = cnt;
3279 if (cnt != tmp)
3280 hci_prio_recalculate(hdev, LE_LINK);
3283 static void hci_tx_work(struct work_struct *work)
3285 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3286 struct sk_buff *skb;
3288 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3289 hdev->sco_cnt, hdev->le_cnt);
3291 /* Schedule queues and send stuff to HCI driver */
3293 hci_sched_acl(hdev);
3295 hci_sched_sco(hdev);
3297 hci_sched_esco(hdev);
3299 hci_sched_le(hdev);
3301 /* Send next queued raw (unknown type) packet */
3302 while ((skb = skb_dequeue(&hdev->raw_q)))
3303 hci_send_frame(skb);
3306 /* ----- HCI RX task (incoming data processing) ----- */
3308 /* ACL data packet */
3309 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3311 struct hci_acl_hdr *hdr = (void *) skb->data;
3312 struct hci_conn *conn;
3313 __u16 handle, flags;
3315 skb_pull(skb, HCI_ACL_HDR_SIZE);
3317 handle = __le16_to_cpu(hdr->handle);
3318 flags = hci_flags(handle);
3319 handle = hci_handle(handle);
3321 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3322 handle, flags);
3324 hdev->stat.acl_rx++;
3326 hci_dev_lock(hdev);
3327 conn = hci_conn_hash_lookup_handle(hdev, handle);
3328 hci_dev_unlock(hdev);
3330 if (conn) {
3331 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3333 /* Send to upper protocol */
3334 l2cap_recv_acldata(conn, skb, flags);
3335 return;
3336 } else {
3337 BT_ERR("%s ACL packet for unknown connection handle %d",
3338 hdev->name, handle);
3341 kfree_skb(skb);
3344 /* SCO data packet */
3345 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3347 struct hci_sco_hdr *hdr = (void *) skb->data;
3348 struct hci_conn *conn;
3349 __u16 handle;
3351 skb_pull(skb, HCI_SCO_HDR_SIZE);
3353 handle = __le16_to_cpu(hdr->handle);
3355 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3357 hdev->stat.sco_rx++;
3359 hci_dev_lock(hdev);
3360 conn = hci_conn_hash_lookup_handle(hdev, handle);
3361 hci_dev_unlock(hdev);
3363 if (conn) {
3364 /* Send to upper protocol */
3365 sco_recv_scodata(conn, skb);
3366 return;
3367 } else {
3368 BT_ERR("%s SCO packet for unknown connection handle %d",
3369 hdev->name, handle);
3372 kfree_skb(skb);
3375 static bool hci_req_is_complete(struct hci_dev *hdev)
3377 struct sk_buff *skb;
3379 skb = skb_peek(&hdev->cmd_q);
3380 if (!skb)
3381 return true;
3383 return bt_cb(skb)->req.start;
3386 static void hci_resend_last(struct hci_dev *hdev)
3388 struct hci_command_hdr *sent;
3389 struct sk_buff *skb;
3390 u16 opcode;
3392 if (!hdev->sent_cmd)
3393 return;
3395 sent = (void *) hdev->sent_cmd->data;
3396 opcode = __le16_to_cpu(sent->opcode);
3397 if (opcode == HCI_OP_RESET)
3398 return;
3400 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3401 if (!skb)
3402 return;
3404 skb_queue_head(&hdev->cmd_q, skb);
3405 queue_work(hdev->workqueue, &hdev->cmd_work);
3408 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3410 hci_req_complete_t req_complete = NULL;
3411 struct sk_buff *skb;
3412 unsigned long flags;
3414 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3416 /* If the completed command doesn't match the last one that was
3417 * sent we need to do special handling of it.
3419 if (!hci_sent_cmd_data(hdev, opcode)) {
3420 /* Some CSR based controllers generate a spontaneous
3421 * reset complete event during init and any pending
3422 * command will never be completed. In such a case we
3423 * need to resend whatever was the last sent
3424 * command.
3426 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3427 hci_resend_last(hdev);
3429 return;
3432 /* If the command succeeded and there's still more commands in
3433 * this request the request is not yet complete.
3435 if (!status && !hci_req_is_complete(hdev))
3436 return;
3438 /* If this was the last command in a request the complete
3439 * callback would be found in hdev->sent_cmd instead of the
3440 * command queue (hdev->cmd_q).
3442 if (hdev->sent_cmd) {
3443 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3444 if (req_complete)
3445 goto call_complete;
3448 /* Remove all pending commands belonging to this request */
3449 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3450 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3451 if (bt_cb(skb)->req.start) {
3452 __skb_queue_head(&hdev->cmd_q, skb);
3453 break;
3456 req_complete = bt_cb(skb)->req.complete;
3457 kfree_skb(skb);
3459 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3461 call_complete:
3462 if (req_complete)
3463 req_complete(hdev, status);
3466 static void hci_rx_work(struct work_struct *work)
3468 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3469 struct sk_buff *skb;
3471 BT_DBG("%s", hdev->name);
3473 while ((skb = skb_dequeue(&hdev->rx_q))) {
3474 /* Send copy to monitor */
3475 hci_send_to_monitor(hdev, skb);
3477 if (atomic_read(&hdev->promisc)) {
3478 /* Send copy to the sockets */
3479 hci_send_to_sock(hdev, skb);
3482 if (test_bit(HCI_RAW, &hdev->flags)) {
3483 kfree_skb(skb);
3484 continue;
3487 if (test_bit(HCI_INIT, &hdev->flags)) {
3488 /* Don't process data packets in this states. */
3489 switch (bt_cb(skb)->pkt_type) {
3490 case HCI_ACLDATA_PKT:
3491 case HCI_SCODATA_PKT:
3492 kfree_skb(skb);
3493 continue;
3497 /* Process frame */
3498 switch (bt_cb(skb)->pkt_type) {
3499 case HCI_EVENT_PKT:
3500 BT_DBG("%s Event packet", hdev->name);
3501 hci_event_packet(hdev, skb);
3502 break;
3504 case HCI_ACLDATA_PKT:
3505 BT_DBG("%s ACL data packet", hdev->name);
3506 hci_acldata_packet(hdev, skb);
3507 break;
3509 case HCI_SCODATA_PKT:
3510 BT_DBG("%s SCO data packet", hdev->name);
3511 hci_scodata_packet(hdev, skb);
3512 break;
3514 default:
3515 kfree_skb(skb);
3516 break;
3521 static void hci_cmd_work(struct work_struct *work)
3523 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3524 struct sk_buff *skb;
3526 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3527 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3529 /* Send queued commands */
3530 if (atomic_read(&hdev->cmd_cnt)) {
3531 skb = skb_dequeue(&hdev->cmd_q);
3532 if (!skb)
3533 return;
3535 kfree_skb(hdev->sent_cmd);
3537 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3538 if (hdev->sent_cmd) {
3539 atomic_dec(&hdev->cmd_cnt);
3540 hci_send_frame(skb);
3541 if (test_bit(HCI_RESET, &hdev->flags))
3542 del_timer(&hdev->cmd_timer);
3543 else
3544 mod_timer(&hdev->cmd_timer,
3545 jiffies + HCI_CMD_TIMEOUT);
3546 } else {
3547 skb_queue_head(&hdev->cmd_q, skb);
3548 queue_work(hdev->workqueue, &hdev->cmd_work);
3553 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3555 /* General inquiry access code (GIAC) */
3556 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3557 struct hci_cp_inquiry cp;
3559 BT_DBG("%s", hdev->name);
3561 if (test_bit(HCI_INQUIRY, &hdev->flags))
3562 return -EINPROGRESS;
3564 inquiry_cache_flush(hdev);
3566 memset(&cp, 0, sizeof(cp));
3567 memcpy(&cp.lap, lap, sizeof(cp.lap));
3568 cp.length = length;
3570 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3573 int hci_cancel_inquiry(struct hci_dev *hdev)
3575 BT_DBG("%s", hdev->name);
3577 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3578 return -EALREADY;
3580 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3583 u8 bdaddr_to_le(u8 bdaddr_type)
3585 switch (bdaddr_type) {
3586 case BDADDR_LE_PUBLIC:
3587 return ADDR_LE_DEV_PUBLIC;
3589 default:
3590 /* Fallback to LE Random address type */
3591 return ADDR_LE_DEV_RANDOM;