Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6.git] / net / bluetooth / hci_core.c
blobfb7356fcfe51e03664d7aed6458eafd3015634e5
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
89 hci_dev_lock(hdev);
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
94 hci_dev_unlock(hdev);
96 if (!skb)
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
132 failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
175 default:
176 err = -ETIMEDOUT;
177 break;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
184 if (err < 0)
185 return ERR_PTR(err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
201 unsigned long opt),
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
214 func(&req, opt);
216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
225 if (err == -ENODATA)
226 return 0;
228 return err;
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
239 return -EINTR;
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
243 err = -bt_to_errno(hdev->req_result);
244 break;
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
250 default:
251 err = -ETIMEDOUT;
252 break;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
259 return err;
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
264 unsigned long opt),
265 unsigned long opt, __u32 timeout)
267 int ret;
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
272 /* Serialize all requests */
273 hci_req_lock(hdev);
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
277 return ret;
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
284 /* Reset device */
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local AMP Info */
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
313 /* Read Data Blk size */
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
319 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s %ld", hdev->name, opt);
323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
327 switch (hdev->dev_type) {
328 case HCI_BREDR:
329 bredr_init(req);
330 break;
332 case HCI_AMP:
333 amp_init(req);
334 break;
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
342 static void bredr_setup(struct hci_request *req)
344 __le16 param;
345 __u8 flt_type;
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350 /* Read Class of Device */
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353 /* Read Local Name */
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356 /* Read Voice Setting */
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
374 static void le_setup(struct hci_request *req)
376 struct hci_dev *hdev = req->hdev;
378 /* Read LE Buffer Size */
379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
381 /* Read LE Local Supported Features */
382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
384 /* Read LE Advertising Channel TX Power */
385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
387 /* Read LE White List Size */
388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
390 /* Read LE Supported States */
391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
423 return 0x00;
426 static void hci_setup_inquiry_mode(struct hci_request *req)
428 u8 mode;
430 mode = hci_get_inquiry_mode(req->hdev);
432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
435 static void hci_setup_event_mask(struct hci_request *req)
437 struct hci_dev *hdev = req->hdev;
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
518 struct hci_dev *hdev = req->hdev;
520 if (lmp_bredr_capable(hdev))
521 bredr_setup(req);
523 if (lmp_le_capable(hdev))
524 le_setup(req);
526 hci_setup_event_mask(req);
528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
539 } else {
540 struct hci_cp_write_eir cp;
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
549 if (lmp_inq_rssi_capable(hdev))
550 hci_setup_inquiry_mode(req);
552 if (lmp_inq_tx_pwr_capable(hdev))
553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
558 cp.page = 0x01;
559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
570 static void hci_setup_link_policy(struct hci_request *req)
572 struct hci_dev *hdev = req->hdev;
573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
585 cp.policy = cpu_to_le16(link_policy);
586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
589 static void hci_set_le_support(struct hci_request *req)
591 struct hci_dev *hdev = req->hdev;
592 struct hci_cp_write_le_host_supported cp;
594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
598 memset(&cp, 0, sizeof(cp));
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
605 if (cp.le != lmp_host_le_capable(hdev))
606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
612 struct hci_dev *hdev = req->hdev;
613 u8 p;
615 /* Some Broadcom based Bluetooth controllers do not support the
616 * Delete Stored Link Key command. They are clearly indicating its
617 * absence in the bit mask of supported commands.
619 * Check the supported commands and only if the the command is marked
620 * as supported send it. If not supported assume that the controller
621 * does not have actual support for stored link keys which makes this
622 * command redundant anyway.
624 if (hdev->commands[6] & 0x80) {
625 struct hci_cp_delete_stored_link_key cp;
627 bacpy(&cp.bdaddr, BDADDR_ANY);
628 cp.delete_all = 0x01;
629 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
630 sizeof(cp), &cp);
633 if (hdev->commands[5] & 0x10)
634 hci_setup_link_policy(req);
636 if (lmp_le_capable(hdev)) {
637 hci_set_le_support(req);
638 hci_update_ad(req);
641 /* Read features beyond page 1 if available */
642 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
643 struct hci_cp_read_local_ext_features cp;
645 cp.page = p;
646 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
647 sizeof(cp), &cp);
651 static int __hci_init(struct hci_dev *hdev)
653 int err;
655 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
656 if (err < 0)
657 return err;
659 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
660 * BR/EDR/LE type controllers. AMP controllers only need the
661 * first stage init.
663 if (hdev->dev_type != HCI_BREDR)
664 return 0;
666 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
667 if (err < 0)
668 return err;
670 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
673 static void hci_scan_req(struct hci_request *req, unsigned long opt)
675 __u8 scan = opt;
677 BT_DBG("%s %x", req->hdev->name, scan);
679 /* Inquiry and Page scans */
680 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
683 static void hci_auth_req(struct hci_request *req, unsigned long opt)
685 __u8 auth = opt;
687 BT_DBG("%s %x", req->hdev->name, auth);
689 /* Authentication */
690 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
693 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
695 __u8 encrypt = opt;
697 BT_DBG("%s %x", req->hdev->name, encrypt);
699 /* Encryption */
700 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
703 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
705 __le16 policy = cpu_to_le16(opt);
707 BT_DBG("%s %x", req->hdev->name, policy);
709 /* Default link policy */
710 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
713 /* Get HCI device by index.
714 * Device is held on return. */
715 struct hci_dev *hci_dev_get(int index)
717 struct hci_dev *hdev = NULL, *d;
719 BT_DBG("%d", index);
721 if (index < 0)
722 return NULL;
724 read_lock(&hci_dev_list_lock);
725 list_for_each_entry(d, &hci_dev_list, list) {
726 if (d->id == index) {
727 hdev = hci_dev_hold(d);
728 break;
731 read_unlock(&hci_dev_list_lock);
732 return hdev;
735 /* ---- Inquiry support ---- */
737 bool hci_discovery_active(struct hci_dev *hdev)
739 struct discovery_state *discov = &hdev->discovery;
741 switch (discov->state) {
742 case DISCOVERY_FINDING:
743 case DISCOVERY_RESOLVING:
744 return true;
746 default:
747 return false;
751 void hci_discovery_set_state(struct hci_dev *hdev, int state)
753 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
755 if (hdev->discovery.state == state)
756 return;
758 switch (state) {
759 case DISCOVERY_STOPPED:
760 if (hdev->discovery.state != DISCOVERY_STARTING)
761 mgmt_discovering(hdev, 0);
762 break;
763 case DISCOVERY_STARTING:
764 break;
765 case DISCOVERY_FINDING:
766 mgmt_discovering(hdev, 1);
767 break;
768 case DISCOVERY_RESOLVING:
769 break;
770 case DISCOVERY_STOPPING:
771 break;
774 hdev->discovery.state = state;
777 void hci_inquiry_cache_flush(struct hci_dev *hdev)
779 struct discovery_state *cache = &hdev->discovery;
780 struct inquiry_entry *p, *n;
782 list_for_each_entry_safe(p, n, &cache->all, all) {
783 list_del(&p->all);
784 kfree(p);
787 INIT_LIST_HEAD(&cache->unknown);
788 INIT_LIST_HEAD(&cache->resolve);
791 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
792 bdaddr_t *bdaddr)
794 struct discovery_state *cache = &hdev->discovery;
795 struct inquiry_entry *e;
797 BT_DBG("cache %p, %pMR", cache, bdaddr);
799 list_for_each_entry(e, &cache->all, all) {
800 if (!bacmp(&e->data.bdaddr, bdaddr))
801 return e;
804 return NULL;
807 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
808 bdaddr_t *bdaddr)
810 struct discovery_state *cache = &hdev->discovery;
811 struct inquiry_entry *e;
813 BT_DBG("cache %p, %pMR", cache, bdaddr);
815 list_for_each_entry(e, &cache->unknown, list) {
816 if (!bacmp(&e->data.bdaddr, bdaddr))
817 return e;
820 return NULL;
823 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
824 bdaddr_t *bdaddr,
825 int state)
827 struct discovery_state *cache = &hdev->discovery;
828 struct inquiry_entry *e;
830 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
832 list_for_each_entry(e, &cache->resolve, list) {
833 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
834 return e;
835 if (!bacmp(&e->data.bdaddr, bdaddr))
836 return e;
839 return NULL;
842 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
843 struct inquiry_entry *ie)
845 struct discovery_state *cache = &hdev->discovery;
846 struct list_head *pos = &cache->resolve;
847 struct inquiry_entry *p;
849 list_del(&ie->list);
851 list_for_each_entry(p, &cache->resolve, list) {
852 if (p->name_state != NAME_PENDING &&
853 abs(p->data.rssi) >= abs(ie->data.rssi))
854 break;
855 pos = &p->list;
858 list_add(&ie->list, pos);
861 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
862 bool name_known, bool *ssp)
864 struct discovery_state *cache = &hdev->discovery;
865 struct inquiry_entry *ie;
867 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
869 hci_remove_remote_oob_data(hdev, &data->bdaddr);
871 if (ssp)
872 *ssp = data->ssp_mode;
874 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
875 if (ie) {
876 if (ie->data.ssp_mode && ssp)
877 *ssp = true;
879 if (ie->name_state == NAME_NEEDED &&
880 data->rssi != ie->data.rssi) {
881 ie->data.rssi = data->rssi;
882 hci_inquiry_cache_update_resolve(hdev, ie);
885 goto update;
888 /* Entry not in the cache. Add new one. */
889 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
890 if (!ie)
891 return false;
893 list_add(&ie->all, &cache->all);
895 if (name_known) {
896 ie->name_state = NAME_KNOWN;
897 } else {
898 ie->name_state = NAME_NOT_KNOWN;
899 list_add(&ie->list, &cache->unknown);
902 update:
903 if (name_known && ie->name_state != NAME_KNOWN &&
904 ie->name_state != NAME_PENDING) {
905 ie->name_state = NAME_KNOWN;
906 list_del(&ie->list);
909 memcpy(&ie->data, data, sizeof(*data));
910 ie->timestamp = jiffies;
911 cache->timestamp = jiffies;
913 if (ie->name_state == NAME_NOT_KNOWN)
914 return false;
916 return true;
919 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
921 struct discovery_state *cache = &hdev->discovery;
922 struct inquiry_info *info = (struct inquiry_info *) buf;
923 struct inquiry_entry *e;
924 int copied = 0;
926 list_for_each_entry(e, &cache->all, all) {
927 struct inquiry_data *data = &e->data;
929 if (copied >= num)
930 break;
932 bacpy(&info->bdaddr, &data->bdaddr);
933 info->pscan_rep_mode = data->pscan_rep_mode;
934 info->pscan_period_mode = data->pscan_period_mode;
935 info->pscan_mode = data->pscan_mode;
936 memcpy(info->dev_class, data->dev_class, 3);
937 info->clock_offset = data->clock_offset;
939 info++;
940 copied++;
943 BT_DBG("cache %p, copied %d", cache, copied);
944 return copied;
947 static void hci_inq_req(struct hci_request *req, unsigned long opt)
949 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
950 struct hci_dev *hdev = req->hdev;
951 struct hci_cp_inquiry cp;
953 BT_DBG("%s", hdev->name);
955 if (test_bit(HCI_INQUIRY, &hdev->flags))
956 return;
958 /* Start Inquiry */
959 memcpy(&cp.lap, &ir->lap, 3);
960 cp.length = ir->length;
961 cp.num_rsp = ir->num_rsp;
962 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
965 static int wait_inquiry(void *word)
967 schedule();
968 return signal_pending(current);
971 int hci_inquiry(void __user *arg)
973 __u8 __user *ptr = arg;
974 struct hci_inquiry_req ir;
975 struct hci_dev *hdev;
976 int err = 0, do_inquiry = 0, max_rsp;
977 long timeo;
978 __u8 *buf;
980 if (copy_from_user(&ir, ptr, sizeof(ir)))
981 return -EFAULT;
983 hdev = hci_dev_get(ir.dev_id);
984 if (!hdev)
985 return -ENODEV;
987 hci_dev_lock(hdev);
988 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
989 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
990 hci_inquiry_cache_flush(hdev);
991 do_inquiry = 1;
993 hci_dev_unlock(hdev);
995 timeo = ir.length * msecs_to_jiffies(2000);
997 if (do_inquiry) {
998 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
999 timeo);
1000 if (err < 0)
1001 goto done;
1003 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1004 * cleared). If it is interrupted by a signal, return -EINTR.
1006 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1007 TASK_INTERRUPTIBLE))
1008 return -EINTR;
1011 /* for unlimited number of responses we will use buffer with
1012 * 255 entries
1014 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1016 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1017 * copy it to the user space.
1019 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1020 if (!buf) {
1021 err = -ENOMEM;
1022 goto done;
1025 hci_dev_lock(hdev);
1026 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1027 hci_dev_unlock(hdev);
1029 BT_DBG("num_rsp %d", ir.num_rsp);
1031 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1032 ptr += sizeof(ir);
1033 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1034 ir.num_rsp))
1035 err = -EFAULT;
1036 } else
1037 err = -EFAULT;
1039 kfree(buf);
1041 done:
1042 hci_dev_put(hdev);
1043 return err;
1046 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1048 u8 ad_len = 0, flags = 0;
1049 size_t name_len;
1051 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1052 flags |= LE_AD_GENERAL;
1054 if (!lmp_bredr_capable(hdev))
1055 flags |= LE_AD_NO_BREDR;
1057 if (lmp_le_br_capable(hdev))
1058 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1060 if (lmp_host_le_br_capable(hdev))
1061 flags |= LE_AD_SIM_LE_BREDR_HOST;
1063 if (flags) {
1064 BT_DBG("adv flags 0x%02x", flags);
1066 ptr[0] = 2;
1067 ptr[1] = EIR_FLAGS;
1068 ptr[2] = flags;
1070 ad_len += 3;
1071 ptr += 3;
1074 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1075 ptr[0] = 2;
1076 ptr[1] = EIR_TX_POWER;
1077 ptr[2] = (u8) hdev->adv_tx_power;
1079 ad_len += 3;
1080 ptr += 3;
1083 name_len = strlen(hdev->dev_name);
1084 if (name_len > 0) {
1085 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1087 if (name_len > max_len) {
1088 name_len = max_len;
1089 ptr[1] = EIR_NAME_SHORT;
1090 } else
1091 ptr[1] = EIR_NAME_COMPLETE;
1093 ptr[0] = name_len + 1;
1095 memcpy(ptr + 2, hdev->dev_name, name_len);
1097 ad_len += (name_len + 2);
1098 ptr += (name_len + 2);
1101 return ad_len;
1104 void hci_update_ad(struct hci_request *req)
1106 struct hci_dev *hdev = req->hdev;
1107 struct hci_cp_le_set_adv_data cp;
1108 u8 len;
1110 if (!lmp_le_capable(hdev))
1111 return;
1113 memset(&cp, 0, sizeof(cp));
1115 len = create_ad(hdev, cp.data);
1117 if (hdev->adv_data_len == len &&
1118 memcmp(cp.data, hdev->adv_data, len) == 0)
1119 return;
1121 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1122 hdev->adv_data_len = len;
1124 cp.length = len;
1126 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1129 /* ---- HCI ioctl helpers ---- */
1131 int hci_dev_open(__u16 dev)
1133 struct hci_dev *hdev;
1134 int ret = 0;
1136 hdev = hci_dev_get(dev);
1137 if (!hdev)
1138 return -ENODEV;
1140 BT_DBG("%s %p", hdev->name, hdev);
1142 hci_req_lock(hdev);
1144 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1145 ret = -ENODEV;
1146 goto done;
1149 /* Check for rfkill but allow the HCI setup stage to proceed
1150 * (which in itself doesn't cause any RF activity).
1152 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1153 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1154 ret = -ERFKILL;
1155 goto done;
1158 if (test_bit(HCI_UP, &hdev->flags)) {
1159 ret = -EALREADY;
1160 goto done;
1163 if (hdev->open(hdev)) {
1164 ret = -EIO;
1165 goto done;
1168 atomic_set(&hdev->cmd_cnt, 1);
1169 set_bit(HCI_INIT, &hdev->flags);
1171 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1172 ret = hdev->setup(hdev);
1174 if (!ret) {
1175 /* Treat all non BR/EDR controllers as raw devices if
1176 * enable_hs is not set.
1178 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1179 set_bit(HCI_RAW, &hdev->flags);
1181 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1182 set_bit(HCI_RAW, &hdev->flags);
1184 if (!test_bit(HCI_RAW, &hdev->flags))
1185 ret = __hci_init(hdev);
1188 clear_bit(HCI_INIT, &hdev->flags);
1190 if (!ret) {
1191 hci_dev_hold(hdev);
1192 set_bit(HCI_UP, &hdev->flags);
1193 hci_notify(hdev, HCI_DEV_UP);
1194 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1195 mgmt_valid_hdev(hdev)) {
1196 hci_dev_lock(hdev);
1197 mgmt_powered(hdev, 1);
1198 hci_dev_unlock(hdev);
1200 } else {
1201 /* Init failed, cleanup */
1202 flush_work(&hdev->tx_work);
1203 flush_work(&hdev->cmd_work);
1204 flush_work(&hdev->rx_work);
1206 skb_queue_purge(&hdev->cmd_q);
1207 skb_queue_purge(&hdev->rx_q);
1209 if (hdev->flush)
1210 hdev->flush(hdev);
1212 if (hdev->sent_cmd) {
1213 kfree_skb(hdev->sent_cmd);
1214 hdev->sent_cmd = NULL;
1217 hdev->close(hdev);
1218 hdev->flags = 0;
1221 done:
1222 hci_req_unlock(hdev);
1223 hci_dev_put(hdev);
1224 return ret;
1227 static int hci_dev_do_close(struct hci_dev *hdev)
1229 BT_DBG("%s %p", hdev->name, hdev);
1231 cancel_delayed_work(&hdev->power_off);
1233 hci_req_cancel(hdev, ENODEV);
1234 hci_req_lock(hdev);
1236 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1237 del_timer_sync(&hdev->cmd_timer);
1238 hci_req_unlock(hdev);
1239 return 0;
1242 /* Flush RX and TX works */
1243 flush_work(&hdev->tx_work);
1244 flush_work(&hdev->rx_work);
1246 if (hdev->discov_timeout > 0) {
1247 cancel_delayed_work(&hdev->discov_off);
1248 hdev->discov_timeout = 0;
1249 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1252 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1253 cancel_delayed_work(&hdev->service_cache);
1255 cancel_delayed_work_sync(&hdev->le_scan_disable);
1257 hci_dev_lock(hdev);
1258 hci_inquiry_cache_flush(hdev);
1259 hci_conn_hash_flush(hdev);
1260 hci_dev_unlock(hdev);
1262 hci_notify(hdev, HCI_DEV_DOWN);
1264 if (hdev->flush)
1265 hdev->flush(hdev);
1267 /* Reset device */
1268 skb_queue_purge(&hdev->cmd_q);
1269 atomic_set(&hdev->cmd_cnt, 1);
1270 if (!test_bit(HCI_RAW, &hdev->flags) &&
1271 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1272 set_bit(HCI_INIT, &hdev->flags);
1273 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1274 clear_bit(HCI_INIT, &hdev->flags);
1277 /* flush cmd work */
1278 flush_work(&hdev->cmd_work);
1280 /* Drop queues */
1281 skb_queue_purge(&hdev->rx_q);
1282 skb_queue_purge(&hdev->cmd_q);
1283 skb_queue_purge(&hdev->raw_q);
1285 /* Drop last sent command */
1286 if (hdev->sent_cmd) {
1287 del_timer_sync(&hdev->cmd_timer);
1288 kfree_skb(hdev->sent_cmd);
1289 hdev->sent_cmd = NULL;
1292 kfree_skb(hdev->recv_evt);
1293 hdev->recv_evt = NULL;
1295 /* After this point our queues are empty
1296 * and no tasks are scheduled. */
1297 hdev->close(hdev);
1299 /* Clear flags */
1300 hdev->flags = 0;
1301 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1303 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1304 mgmt_valid_hdev(hdev)) {
1305 hci_dev_lock(hdev);
1306 mgmt_powered(hdev, 0);
1307 hci_dev_unlock(hdev);
1310 /* Controller radio is available but is currently powered down */
1311 hdev->amp_status = 0;
1313 memset(hdev->eir, 0, sizeof(hdev->eir));
1314 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1316 hci_req_unlock(hdev);
1318 hci_dev_put(hdev);
1319 return 0;
1322 int hci_dev_close(__u16 dev)
1324 struct hci_dev *hdev;
1325 int err;
1327 hdev = hci_dev_get(dev);
1328 if (!hdev)
1329 return -ENODEV;
1331 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1332 cancel_delayed_work(&hdev->power_off);
1334 err = hci_dev_do_close(hdev);
1336 hci_dev_put(hdev);
1337 return err;
1340 int hci_dev_reset(__u16 dev)
1342 struct hci_dev *hdev;
1343 int ret = 0;
1345 hdev = hci_dev_get(dev);
1346 if (!hdev)
1347 return -ENODEV;
1349 hci_req_lock(hdev);
1351 if (!test_bit(HCI_UP, &hdev->flags))
1352 goto done;
1354 /* Drop queues */
1355 skb_queue_purge(&hdev->rx_q);
1356 skb_queue_purge(&hdev->cmd_q);
1358 hci_dev_lock(hdev);
1359 hci_inquiry_cache_flush(hdev);
1360 hci_conn_hash_flush(hdev);
1361 hci_dev_unlock(hdev);
1363 if (hdev->flush)
1364 hdev->flush(hdev);
1366 atomic_set(&hdev->cmd_cnt, 1);
1367 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1369 if (!test_bit(HCI_RAW, &hdev->flags))
1370 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1372 done:
1373 hci_req_unlock(hdev);
1374 hci_dev_put(hdev);
1375 return ret;
1378 int hci_dev_reset_stat(__u16 dev)
1380 struct hci_dev *hdev;
1381 int ret = 0;
1383 hdev = hci_dev_get(dev);
1384 if (!hdev)
1385 return -ENODEV;
1387 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1389 hci_dev_put(hdev);
1391 return ret;
1394 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1396 struct hci_dev *hdev;
1397 struct hci_dev_req dr;
1398 int err = 0;
1400 if (copy_from_user(&dr, arg, sizeof(dr)))
1401 return -EFAULT;
1403 hdev = hci_dev_get(dr.dev_id);
1404 if (!hdev)
1405 return -ENODEV;
1407 switch (cmd) {
1408 case HCISETAUTH:
1409 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1410 HCI_INIT_TIMEOUT);
1411 break;
1413 case HCISETENCRYPT:
1414 if (!lmp_encrypt_capable(hdev)) {
1415 err = -EOPNOTSUPP;
1416 break;
1419 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1420 /* Auth must be enabled first */
1421 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1422 HCI_INIT_TIMEOUT);
1423 if (err)
1424 break;
1427 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1428 HCI_INIT_TIMEOUT);
1429 break;
1431 case HCISETSCAN:
1432 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1433 HCI_INIT_TIMEOUT);
1434 break;
1436 case HCISETLINKPOL:
1437 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1438 HCI_INIT_TIMEOUT);
1439 break;
1441 case HCISETLINKMODE:
1442 hdev->link_mode = ((__u16) dr.dev_opt) &
1443 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1444 break;
1446 case HCISETPTYPE:
1447 hdev->pkt_type = (__u16) dr.dev_opt;
1448 break;
1450 case HCISETACLMTU:
1451 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1452 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1453 break;
1455 case HCISETSCOMTU:
1456 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1457 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1458 break;
1460 default:
1461 err = -EINVAL;
1462 break;
1465 hci_dev_put(hdev);
1466 return err;
1469 int hci_get_dev_list(void __user *arg)
1471 struct hci_dev *hdev;
1472 struct hci_dev_list_req *dl;
1473 struct hci_dev_req *dr;
1474 int n = 0, size, err;
1475 __u16 dev_num;
1477 if (get_user(dev_num, (__u16 __user *) arg))
1478 return -EFAULT;
1480 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1481 return -EINVAL;
1483 size = sizeof(*dl) + dev_num * sizeof(*dr);
1485 dl = kzalloc(size, GFP_KERNEL);
1486 if (!dl)
1487 return -ENOMEM;
1489 dr = dl->dev_req;
1491 read_lock(&hci_dev_list_lock);
1492 list_for_each_entry(hdev, &hci_dev_list, list) {
1493 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1494 cancel_delayed_work(&hdev->power_off);
1496 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1497 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1499 (dr + n)->dev_id = hdev->id;
1500 (dr + n)->dev_opt = hdev->flags;
1502 if (++n >= dev_num)
1503 break;
1505 read_unlock(&hci_dev_list_lock);
1507 dl->dev_num = n;
1508 size = sizeof(*dl) + n * sizeof(*dr);
1510 err = copy_to_user(arg, dl, size);
1511 kfree(dl);
1513 return err ? -EFAULT : 0;
1516 int hci_get_dev_info(void __user *arg)
1518 struct hci_dev *hdev;
1519 struct hci_dev_info di;
1520 int err = 0;
1522 if (copy_from_user(&di, arg, sizeof(di)))
1523 return -EFAULT;
1525 hdev = hci_dev_get(di.dev_id);
1526 if (!hdev)
1527 return -ENODEV;
1529 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1530 cancel_delayed_work_sync(&hdev->power_off);
1532 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1533 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1535 strcpy(di.name, hdev->name);
1536 di.bdaddr = hdev->bdaddr;
1537 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1538 di.flags = hdev->flags;
1539 di.pkt_type = hdev->pkt_type;
1540 if (lmp_bredr_capable(hdev)) {
1541 di.acl_mtu = hdev->acl_mtu;
1542 di.acl_pkts = hdev->acl_pkts;
1543 di.sco_mtu = hdev->sco_mtu;
1544 di.sco_pkts = hdev->sco_pkts;
1545 } else {
1546 di.acl_mtu = hdev->le_mtu;
1547 di.acl_pkts = hdev->le_pkts;
1548 di.sco_mtu = 0;
1549 di.sco_pkts = 0;
1551 di.link_policy = hdev->link_policy;
1552 di.link_mode = hdev->link_mode;
1554 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1555 memcpy(&di.features, &hdev->features, sizeof(di.features));
1557 if (copy_to_user(arg, &di, sizeof(di)))
1558 err = -EFAULT;
1560 hci_dev_put(hdev);
1562 return err;
1565 /* ---- Interface to HCI drivers ---- */
1567 static int hci_rfkill_set_block(void *data, bool blocked)
1569 struct hci_dev *hdev = data;
1571 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1573 if (blocked) {
1574 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1575 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1576 hci_dev_do_close(hdev);
1577 } else {
1578 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1581 return 0;
1584 static const struct rfkill_ops hci_rfkill_ops = {
1585 .set_block = hci_rfkill_set_block,
1588 static void hci_power_on(struct work_struct *work)
1590 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1591 int err;
1593 BT_DBG("%s", hdev->name);
1595 err = hci_dev_open(hdev->id);
1596 if (err < 0) {
1597 mgmt_set_powered_failed(hdev, err);
1598 return;
1601 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1602 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1603 hci_dev_do_close(hdev);
1604 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1605 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1606 HCI_AUTO_OFF_TIMEOUT);
1609 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1610 mgmt_index_added(hdev);
1613 static void hci_power_off(struct work_struct *work)
1615 struct hci_dev *hdev = container_of(work, struct hci_dev,
1616 power_off.work);
1618 BT_DBG("%s", hdev->name);
1620 hci_dev_do_close(hdev);
1623 static void hci_discov_off(struct work_struct *work)
1625 struct hci_dev *hdev;
1626 u8 scan = SCAN_PAGE;
1628 hdev = container_of(work, struct hci_dev, discov_off.work);
1630 BT_DBG("%s", hdev->name);
1632 hci_dev_lock(hdev);
1634 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1636 hdev->discov_timeout = 0;
1638 hci_dev_unlock(hdev);
1641 int hci_uuids_clear(struct hci_dev *hdev)
1643 struct bt_uuid *uuid, *tmp;
1645 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1646 list_del(&uuid->list);
1647 kfree(uuid);
1650 return 0;
1653 int hci_link_keys_clear(struct hci_dev *hdev)
1655 struct list_head *p, *n;
1657 list_for_each_safe(p, n, &hdev->link_keys) {
1658 struct link_key *key;
1660 key = list_entry(p, struct link_key, list);
1662 list_del(p);
1663 kfree(key);
1666 return 0;
1669 int hci_smp_ltks_clear(struct hci_dev *hdev)
1671 struct smp_ltk *k, *tmp;
1673 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1674 list_del(&k->list);
1675 kfree(k);
1678 return 0;
1681 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1683 struct link_key *k;
1685 list_for_each_entry(k, &hdev->link_keys, list)
1686 if (bacmp(bdaddr, &k->bdaddr) == 0)
1687 return k;
1689 return NULL;
1692 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1693 u8 key_type, u8 old_key_type)
1695 /* Legacy key */
1696 if (key_type < 0x03)
1697 return true;
1699 /* Debug keys are insecure so don't store them persistently */
1700 if (key_type == HCI_LK_DEBUG_COMBINATION)
1701 return false;
1703 /* Changed combination key and there's no previous one */
1704 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1705 return false;
1707 /* Security mode 3 case */
1708 if (!conn)
1709 return true;
1711 /* Neither local nor remote side had no-bonding as requirement */
1712 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1713 return true;
1715 /* Local side had dedicated bonding as requirement */
1716 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1717 return true;
1719 /* Remote side had dedicated bonding as requirement */
1720 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1721 return true;
1723 /* If none of the above criteria match, then don't store the key
1724 * persistently */
1725 return false;
1728 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1730 struct smp_ltk *k;
1732 list_for_each_entry(k, &hdev->long_term_keys, list) {
1733 if (k->ediv != ediv ||
1734 memcmp(rand, k->rand, sizeof(k->rand)))
1735 continue;
1737 return k;
1740 return NULL;
1743 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1744 u8 addr_type)
1746 struct smp_ltk *k;
1748 list_for_each_entry(k, &hdev->long_term_keys, list)
1749 if (addr_type == k->bdaddr_type &&
1750 bacmp(bdaddr, &k->bdaddr) == 0)
1751 return k;
1753 return NULL;
1756 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1757 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1759 struct link_key *key, *old_key;
1760 u8 old_key_type;
1761 bool persistent;
1763 old_key = hci_find_link_key(hdev, bdaddr);
1764 if (old_key) {
1765 old_key_type = old_key->type;
1766 key = old_key;
1767 } else {
1768 old_key_type = conn ? conn->key_type : 0xff;
1769 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1770 if (!key)
1771 return -ENOMEM;
1772 list_add(&key->list, &hdev->link_keys);
1775 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1777 /* Some buggy controller combinations generate a changed
1778 * combination key for legacy pairing even when there's no
1779 * previous key */
1780 if (type == HCI_LK_CHANGED_COMBINATION &&
1781 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1782 type = HCI_LK_COMBINATION;
1783 if (conn)
1784 conn->key_type = type;
1787 bacpy(&key->bdaddr, bdaddr);
1788 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1789 key->pin_len = pin_len;
1791 if (type == HCI_LK_CHANGED_COMBINATION)
1792 key->type = old_key_type;
1793 else
1794 key->type = type;
1796 if (!new_key)
1797 return 0;
1799 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1801 mgmt_new_link_key(hdev, key, persistent);
1803 if (conn)
1804 conn->flush_key = !persistent;
1806 return 0;
1809 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1810 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1811 ediv, u8 rand[8])
1813 struct smp_ltk *key, *old_key;
1815 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1816 return 0;
1818 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1819 if (old_key)
1820 key = old_key;
1821 else {
1822 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1823 if (!key)
1824 return -ENOMEM;
1825 list_add(&key->list, &hdev->long_term_keys);
1828 bacpy(&key->bdaddr, bdaddr);
1829 key->bdaddr_type = addr_type;
1830 memcpy(key->val, tk, sizeof(key->val));
1831 key->authenticated = authenticated;
1832 key->ediv = ediv;
1833 key->enc_size = enc_size;
1834 key->type = type;
1835 memcpy(key->rand, rand, sizeof(key->rand));
1837 if (!new_key)
1838 return 0;
1840 if (type & HCI_SMP_LTK)
1841 mgmt_new_ltk(hdev, key, 1);
1843 return 0;
1846 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1848 struct link_key *key;
1850 key = hci_find_link_key(hdev, bdaddr);
1851 if (!key)
1852 return -ENOENT;
1854 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1856 list_del(&key->list);
1857 kfree(key);
1859 return 0;
1862 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1864 struct smp_ltk *k, *tmp;
1866 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1867 if (bacmp(bdaddr, &k->bdaddr))
1868 continue;
1870 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1872 list_del(&k->list);
1873 kfree(k);
1876 return 0;
1879 /* HCI command timer function */
1880 static void hci_cmd_timeout(unsigned long arg)
1882 struct hci_dev *hdev = (void *) arg;
1884 if (hdev->sent_cmd) {
1885 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1886 u16 opcode = __le16_to_cpu(sent->opcode);
1888 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1889 } else {
1890 BT_ERR("%s command tx timeout", hdev->name);
1893 atomic_set(&hdev->cmd_cnt, 1);
1894 queue_work(hdev->workqueue, &hdev->cmd_work);
1897 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1898 bdaddr_t *bdaddr)
1900 struct oob_data *data;
1902 list_for_each_entry(data, &hdev->remote_oob_data, list)
1903 if (bacmp(bdaddr, &data->bdaddr) == 0)
1904 return data;
1906 return NULL;
1909 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1911 struct oob_data *data;
1913 data = hci_find_remote_oob_data(hdev, bdaddr);
1914 if (!data)
1915 return -ENOENT;
1917 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1919 list_del(&data->list);
1920 kfree(data);
1922 return 0;
1925 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1927 struct oob_data *data, *n;
1929 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1930 list_del(&data->list);
1931 kfree(data);
1934 return 0;
1937 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1938 u8 *randomizer)
1940 struct oob_data *data;
1942 data = hci_find_remote_oob_data(hdev, bdaddr);
1944 if (!data) {
1945 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1946 if (!data)
1947 return -ENOMEM;
1949 bacpy(&data->bdaddr, bdaddr);
1950 list_add(&data->list, &hdev->remote_oob_data);
1953 memcpy(data->hash, hash, sizeof(data->hash));
1954 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1956 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1958 return 0;
1961 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1963 struct bdaddr_list *b;
1965 list_for_each_entry(b, &hdev->blacklist, list)
1966 if (bacmp(bdaddr, &b->bdaddr) == 0)
1967 return b;
1969 return NULL;
1972 int hci_blacklist_clear(struct hci_dev *hdev)
1974 struct list_head *p, *n;
1976 list_for_each_safe(p, n, &hdev->blacklist) {
1977 struct bdaddr_list *b;
1979 b = list_entry(p, struct bdaddr_list, list);
1981 list_del(p);
1982 kfree(b);
1985 return 0;
1988 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1990 struct bdaddr_list *entry;
1992 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1993 return -EBADF;
1995 if (hci_blacklist_lookup(hdev, bdaddr))
1996 return -EEXIST;
1998 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1999 if (!entry)
2000 return -ENOMEM;
2002 bacpy(&entry->bdaddr, bdaddr);
2004 list_add(&entry->list, &hdev->blacklist);
2006 return mgmt_device_blocked(hdev, bdaddr, type);
2009 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2011 struct bdaddr_list *entry;
2013 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2014 return hci_blacklist_clear(hdev);
2016 entry = hci_blacklist_lookup(hdev, bdaddr);
2017 if (!entry)
2018 return -ENOENT;
2020 list_del(&entry->list);
2021 kfree(entry);
2023 return mgmt_device_unblocked(hdev, bdaddr, type);
2026 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2028 if (status) {
2029 BT_ERR("Failed to start inquiry: status %d", status);
2031 hci_dev_lock(hdev);
2032 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2033 hci_dev_unlock(hdev);
2034 return;
2038 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2040 /* General inquiry access code (GIAC) */
2041 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2042 struct hci_request req;
2043 struct hci_cp_inquiry cp;
2044 int err;
2046 if (status) {
2047 BT_ERR("Failed to disable LE scanning: status %d", status);
2048 return;
2051 switch (hdev->discovery.type) {
2052 case DISCOV_TYPE_LE:
2053 hci_dev_lock(hdev);
2054 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2055 hci_dev_unlock(hdev);
2056 break;
2058 case DISCOV_TYPE_INTERLEAVED:
2059 hci_req_init(&req, hdev);
2061 memset(&cp, 0, sizeof(cp));
2062 memcpy(&cp.lap, lap, sizeof(cp.lap));
2063 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2064 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2066 hci_dev_lock(hdev);
2068 hci_inquiry_cache_flush(hdev);
2070 err = hci_req_run(&req, inquiry_complete);
2071 if (err) {
2072 BT_ERR("Inquiry request failed: err %d", err);
2073 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2076 hci_dev_unlock(hdev);
2077 break;
2081 static void le_scan_disable_work(struct work_struct *work)
2083 struct hci_dev *hdev = container_of(work, struct hci_dev,
2084 le_scan_disable.work);
2085 struct hci_cp_le_set_scan_enable cp;
2086 struct hci_request req;
2087 int err;
2089 BT_DBG("%s", hdev->name);
2091 hci_req_init(&req, hdev);
2093 memset(&cp, 0, sizeof(cp));
2094 cp.enable = LE_SCAN_DISABLE;
2095 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2097 err = hci_req_run(&req, le_scan_disable_work_complete);
2098 if (err)
2099 BT_ERR("Disable LE scanning request failed: err %d", err);
2102 /* Alloc HCI device */
2103 struct hci_dev *hci_alloc_dev(void)
2105 struct hci_dev *hdev;
2107 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2108 if (!hdev)
2109 return NULL;
2111 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2112 hdev->esco_type = (ESCO_HV1);
2113 hdev->link_mode = (HCI_LM_ACCEPT);
2114 hdev->io_capability = 0x03; /* No Input No Output */
2115 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2116 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2118 hdev->sniff_max_interval = 800;
2119 hdev->sniff_min_interval = 80;
2121 mutex_init(&hdev->lock);
2122 mutex_init(&hdev->req_lock);
2124 INIT_LIST_HEAD(&hdev->mgmt_pending);
2125 INIT_LIST_HEAD(&hdev->blacklist);
2126 INIT_LIST_HEAD(&hdev->uuids);
2127 INIT_LIST_HEAD(&hdev->link_keys);
2128 INIT_LIST_HEAD(&hdev->long_term_keys);
2129 INIT_LIST_HEAD(&hdev->remote_oob_data);
2130 INIT_LIST_HEAD(&hdev->conn_hash.list);
2132 INIT_WORK(&hdev->rx_work, hci_rx_work);
2133 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2134 INIT_WORK(&hdev->tx_work, hci_tx_work);
2135 INIT_WORK(&hdev->power_on, hci_power_on);
2137 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2138 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2139 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2141 skb_queue_head_init(&hdev->rx_q);
2142 skb_queue_head_init(&hdev->cmd_q);
2143 skb_queue_head_init(&hdev->raw_q);
2145 init_waitqueue_head(&hdev->req_wait_q);
2147 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2149 hci_init_sysfs(hdev);
2150 discovery_init(hdev);
2152 return hdev;
2154 EXPORT_SYMBOL(hci_alloc_dev);
2156 /* Free HCI device */
2157 void hci_free_dev(struct hci_dev *hdev)
2159 /* will free via device release */
2160 put_device(&hdev->dev);
2162 EXPORT_SYMBOL(hci_free_dev);
2164 /* Register HCI device */
2165 int hci_register_dev(struct hci_dev *hdev)
2167 int id, error;
2169 if (!hdev->open || !hdev->close)
2170 return -EINVAL;
2172 /* Do not allow HCI_AMP devices to register at index 0,
2173 * so the index can be used as the AMP controller ID.
2175 switch (hdev->dev_type) {
2176 case HCI_BREDR:
2177 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2178 break;
2179 case HCI_AMP:
2180 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2181 break;
2182 default:
2183 return -EINVAL;
2186 if (id < 0)
2187 return id;
2189 sprintf(hdev->name, "hci%d", id);
2190 hdev->id = id;
2192 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2194 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2195 WQ_MEM_RECLAIM, 1, hdev->name);
2196 if (!hdev->workqueue) {
2197 error = -ENOMEM;
2198 goto err;
2201 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2202 WQ_MEM_RECLAIM, 1, hdev->name);
2203 if (!hdev->req_workqueue) {
2204 destroy_workqueue(hdev->workqueue);
2205 error = -ENOMEM;
2206 goto err;
2209 error = hci_add_sysfs(hdev);
2210 if (error < 0)
2211 goto err_wqueue;
2213 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2214 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2215 hdev);
2216 if (hdev->rfkill) {
2217 if (rfkill_register(hdev->rfkill) < 0) {
2218 rfkill_destroy(hdev->rfkill);
2219 hdev->rfkill = NULL;
2223 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2224 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2226 set_bit(HCI_SETUP, &hdev->dev_flags);
2228 if (hdev->dev_type != HCI_AMP)
2229 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2231 write_lock(&hci_dev_list_lock);
2232 list_add(&hdev->list, &hci_dev_list);
2233 write_unlock(&hci_dev_list_lock);
2235 hci_notify(hdev, HCI_DEV_REG);
2236 hci_dev_hold(hdev);
2238 queue_work(hdev->req_workqueue, &hdev->power_on);
2240 return id;
2242 err_wqueue:
2243 destroy_workqueue(hdev->workqueue);
2244 destroy_workqueue(hdev->req_workqueue);
2245 err:
2246 ida_simple_remove(&hci_index_ida, hdev->id);
2248 return error;
2250 EXPORT_SYMBOL(hci_register_dev);
2252 /* Unregister HCI device */
2253 void hci_unregister_dev(struct hci_dev *hdev)
2255 int i, id;
2257 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2259 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2261 id = hdev->id;
2263 write_lock(&hci_dev_list_lock);
2264 list_del(&hdev->list);
2265 write_unlock(&hci_dev_list_lock);
2267 hci_dev_do_close(hdev);
2269 for (i = 0; i < NUM_REASSEMBLY; i++)
2270 kfree_skb(hdev->reassembly[i]);
2272 cancel_work_sync(&hdev->power_on);
2274 if (!test_bit(HCI_INIT, &hdev->flags) &&
2275 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2276 hci_dev_lock(hdev);
2277 mgmt_index_removed(hdev);
2278 hci_dev_unlock(hdev);
2281 /* mgmt_index_removed should take care of emptying the
2282 * pending list */
2283 BUG_ON(!list_empty(&hdev->mgmt_pending));
2285 hci_notify(hdev, HCI_DEV_UNREG);
2287 if (hdev->rfkill) {
2288 rfkill_unregister(hdev->rfkill);
2289 rfkill_destroy(hdev->rfkill);
2292 hci_del_sysfs(hdev);
2294 destroy_workqueue(hdev->workqueue);
2295 destroy_workqueue(hdev->req_workqueue);
2297 hci_dev_lock(hdev);
2298 hci_blacklist_clear(hdev);
2299 hci_uuids_clear(hdev);
2300 hci_link_keys_clear(hdev);
2301 hci_smp_ltks_clear(hdev);
2302 hci_remote_oob_data_clear(hdev);
2303 hci_dev_unlock(hdev);
2305 hci_dev_put(hdev);
2307 ida_simple_remove(&hci_index_ida, id);
2309 EXPORT_SYMBOL(hci_unregister_dev);
2311 /* Suspend HCI device */
2312 int hci_suspend_dev(struct hci_dev *hdev)
2314 hci_notify(hdev, HCI_DEV_SUSPEND);
2315 return 0;
2317 EXPORT_SYMBOL(hci_suspend_dev);
2319 /* Resume HCI device */
2320 int hci_resume_dev(struct hci_dev *hdev)
2322 hci_notify(hdev, HCI_DEV_RESUME);
2323 return 0;
2325 EXPORT_SYMBOL(hci_resume_dev);
2327 /* Receive frame from HCI drivers */
2328 int hci_recv_frame(struct sk_buff *skb)
2330 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2331 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2332 && !test_bit(HCI_INIT, &hdev->flags))) {
2333 kfree_skb(skb);
2334 return -ENXIO;
2337 /* Incoming skb */
2338 bt_cb(skb)->incoming = 1;
2340 /* Time stamp */
2341 __net_timestamp(skb);
2343 skb_queue_tail(&hdev->rx_q, skb);
2344 queue_work(hdev->workqueue, &hdev->rx_work);
2346 return 0;
2348 EXPORT_SYMBOL(hci_recv_frame);
2350 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2351 int count, __u8 index)
2353 int len = 0;
2354 int hlen = 0;
2355 int remain = count;
2356 struct sk_buff *skb;
2357 struct bt_skb_cb *scb;
2359 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2360 index >= NUM_REASSEMBLY)
2361 return -EILSEQ;
2363 skb = hdev->reassembly[index];
2365 if (!skb) {
2366 switch (type) {
2367 case HCI_ACLDATA_PKT:
2368 len = HCI_MAX_FRAME_SIZE;
2369 hlen = HCI_ACL_HDR_SIZE;
2370 break;
2371 case HCI_EVENT_PKT:
2372 len = HCI_MAX_EVENT_SIZE;
2373 hlen = HCI_EVENT_HDR_SIZE;
2374 break;
2375 case HCI_SCODATA_PKT:
2376 len = HCI_MAX_SCO_SIZE;
2377 hlen = HCI_SCO_HDR_SIZE;
2378 break;
2381 skb = bt_skb_alloc(len, GFP_ATOMIC);
2382 if (!skb)
2383 return -ENOMEM;
2385 scb = (void *) skb->cb;
2386 scb->expect = hlen;
2387 scb->pkt_type = type;
2389 skb->dev = (void *) hdev;
2390 hdev->reassembly[index] = skb;
2393 while (count) {
2394 scb = (void *) skb->cb;
2395 len = min_t(uint, scb->expect, count);
2397 memcpy(skb_put(skb, len), data, len);
2399 count -= len;
2400 data += len;
2401 scb->expect -= len;
2402 remain = count;
2404 switch (type) {
2405 case HCI_EVENT_PKT:
2406 if (skb->len == HCI_EVENT_HDR_SIZE) {
2407 struct hci_event_hdr *h = hci_event_hdr(skb);
2408 scb->expect = h->plen;
2410 if (skb_tailroom(skb) < scb->expect) {
2411 kfree_skb(skb);
2412 hdev->reassembly[index] = NULL;
2413 return -ENOMEM;
2416 break;
2418 case HCI_ACLDATA_PKT:
2419 if (skb->len == HCI_ACL_HDR_SIZE) {
2420 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2421 scb->expect = __le16_to_cpu(h->dlen);
2423 if (skb_tailroom(skb) < scb->expect) {
2424 kfree_skb(skb);
2425 hdev->reassembly[index] = NULL;
2426 return -ENOMEM;
2429 break;
2431 case HCI_SCODATA_PKT:
2432 if (skb->len == HCI_SCO_HDR_SIZE) {
2433 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2434 scb->expect = h->dlen;
2436 if (skb_tailroom(skb) < scb->expect) {
2437 kfree_skb(skb);
2438 hdev->reassembly[index] = NULL;
2439 return -ENOMEM;
2442 break;
2445 if (scb->expect == 0) {
2446 /* Complete frame */
2448 bt_cb(skb)->pkt_type = type;
2449 hci_recv_frame(skb);
2451 hdev->reassembly[index] = NULL;
2452 return remain;
2456 return remain;
2459 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2461 int rem = 0;
2463 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2464 return -EILSEQ;
2466 while (count) {
2467 rem = hci_reassembly(hdev, type, data, count, type - 1);
2468 if (rem < 0)
2469 return rem;
2471 data += (count - rem);
2472 count = rem;
2475 return rem;
2477 EXPORT_SYMBOL(hci_recv_fragment);
2479 #define STREAM_REASSEMBLY 0
2481 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2483 int type;
2484 int rem = 0;
2486 while (count) {
2487 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2489 if (!skb) {
2490 struct { char type; } *pkt;
2492 /* Start of the frame */
2493 pkt = data;
2494 type = pkt->type;
2496 data++;
2497 count--;
2498 } else
2499 type = bt_cb(skb)->pkt_type;
2501 rem = hci_reassembly(hdev, type, data, count,
2502 STREAM_REASSEMBLY);
2503 if (rem < 0)
2504 return rem;
2506 data += (count - rem);
2507 count = rem;
2510 return rem;
2512 EXPORT_SYMBOL(hci_recv_stream_fragment);
2514 /* ---- Interface to upper protocols ---- */
2516 int hci_register_cb(struct hci_cb *cb)
2518 BT_DBG("%p name %s", cb, cb->name);
2520 write_lock(&hci_cb_list_lock);
2521 list_add(&cb->list, &hci_cb_list);
2522 write_unlock(&hci_cb_list_lock);
2524 return 0;
2526 EXPORT_SYMBOL(hci_register_cb);
2528 int hci_unregister_cb(struct hci_cb *cb)
2530 BT_DBG("%p name %s", cb, cb->name);
2532 write_lock(&hci_cb_list_lock);
2533 list_del(&cb->list);
2534 write_unlock(&hci_cb_list_lock);
2536 return 0;
2538 EXPORT_SYMBOL(hci_unregister_cb);
2540 static int hci_send_frame(struct sk_buff *skb)
2542 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2544 if (!hdev) {
2545 kfree_skb(skb);
2546 return -ENODEV;
2549 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2551 /* Time stamp */
2552 __net_timestamp(skb);
2554 /* Send copy to monitor */
2555 hci_send_to_monitor(hdev, skb);
2557 if (atomic_read(&hdev->promisc)) {
2558 /* Send copy to the sockets */
2559 hci_send_to_sock(hdev, skb);
2562 /* Get rid of skb owner, prior to sending to the driver. */
2563 skb_orphan(skb);
2565 return hdev->send(skb);
2568 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2570 skb_queue_head_init(&req->cmd_q);
2571 req->hdev = hdev;
2572 req->err = 0;
2575 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2577 struct hci_dev *hdev = req->hdev;
2578 struct sk_buff *skb;
2579 unsigned long flags;
2581 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2583 /* If an error occured during request building, remove all HCI
2584 * commands queued on the HCI request queue.
2586 if (req->err) {
2587 skb_queue_purge(&req->cmd_q);
2588 return req->err;
2591 /* Do not allow empty requests */
2592 if (skb_queue_empty(&req->cmd_q))
2593 return -ENODATA;
2595 skb = skb_peek_tail(&req->cmd_q);
2596 bt_cb(skb)->req.complete = complete;
2598 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2599 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2600 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2602 queue_work(hdev->workqueue, &hdev->cmd_work);
2604 return 0;
2607 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2608 u32 plen, const void *param)
2610 int len = HCI_COMMAND_HDR_SIZE + plen;
2611 struct hci_command_hdr *hdr;
2612 struct sk_buff *skb;
2614 skb = bt_skb_alloc(len, GFP_ATOMIC);
2615 if (!skb)
2616 return NULL;
2618 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2619 hdr->opcode = cpu_to_le16(opcode);
2620 hdr->plen = plen;
2622 if (plen)
2623 memcpy(skb_put(skb, plen), param, plen);
2625 BT_DBG("skb len %d", skb->len);
2627 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2628 skb->dev = (void *) hdev;
2630 return skb;
2633 /* Send HCI command */
2634 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2635 const void *param)
2637 struct sk_buff *skb;
2639 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2641 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2642 if (!skb) {
2643 BT_ERR("%s no memory for command", hdev->name);
2644 return -ENOMEM;
2647 /* Stand-alone HCI commands must be flaged as
2648 * single-command requests.
2650 bt_cb(skb)->req.start = true;
2652 skb_queue_tail(&hdev->cmd_q, skb);
2653 queue_work(hdev->workqueue, &hdev->cmd_work);
2655 return 0;
2658 /* Queue a command to an asynchronous HCI request */
2659 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2660 const void *param, u8 event)
2662 struct hci_dev *hdev = req->hdev;
2663 struct sk_buff *skb;
2665 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2667 /* If an error occured during request building, there is no point in
2668 * queueing the HCI command. We can simply return.
2670 if (req->err)
2671 return;
2673 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2674 if (!skb) {
2675 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2676 hdev->name, opcode);
2677 req->err = -ENOMEM;
2678 return;
2681 if (skb_queue_empty(&req->cmd_q))
2682 bt_cb(skb)->req.start = true;
2684 bt_cb(skb)->req.event = event;
2686 skb_queue_tail(&req->cmd_q, skb);
2689 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2690 const void *param)
2692 hci_req_add_ev(req, opcode, plen, param, 0);
2695 /* Get data from the previously sent command */
2696 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2698 struct hci_command_hdr *hdr;
2700 if (!hdev->sent_cmd)
2701 return NULL;
2703 hdr = (void *) hdev->sent_cmd->data;
2705 if (hdr->opcode != cpu_to_le16(opcode))
2706 return NULL;
2708 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2710 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2713 /* Send ACL data */
2714 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2716 struct hci_acl_hdr *hdr;
2717 int len = skb->len;
2719 skb_push(skb, HCI_ACL_HDR_SIZE);
2720 skb_reset_transport_header(skb);
2721 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2722 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2723 hdr->dlen = cpu_to_le16(len);
2726 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2727 struct sk_buff *skb, __u16 flags)
2729 struct hci_conn *conn = chan->conn;
2730 struct hci_dev *hdev = conn->hdev;
2731 struct sk_buff *list;
2733 skb->len = skb_headlen(skb);
2734 skb->data_len = 0;
2736 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2738 switch (hdev->dev_type) {
2739 case HCI_BREDR:
2740 hci_add_acl_hdr(skb, conn->handle, flags);
2741 break;
2742 case HCI_AMP:
2743 hci_add_acl_hdr(skb, chan->handle, flags);
2744 break;
2745 default:
2746 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2747 return;
2750 list = skb_shinfo(skb)->frag_list;
2751 if (!list) {
2752 /* Non fragmented */
2753 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2755 skb_queue_tail(queue, skb);
2756 } else {
2757 /* Fragmented */
2758 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2760 skb_shinfo(skb)->frag_list = NULL;
2762 /* Queue all fragments atomically */
2763 spin_lock(&queue->lock);
2765 __skb_queue_tail(queue, skb);
2767 flags &= ~ACL_START;
2768 flags |= ACL_CONT;
2769 do {
2770 skb = list; list = list->next;
2772 skb->dev = (void *) hdev;
2773 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2774 hci_add_acl_hdr(skb, conn->handle, flags);
2776 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2778 __skb_queue_tail(queue, skb);
2779 } while (list);
2781 spin_unlock(&queue->lock);
2785 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2787 struct hci_dev *hdev = chan->conn->hdev;
2789 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2791 skb->dev = (void *) hdev;
2793 hci_queue_acl(chan, &chan->data_q, skb, flags);
2795 queue_work(hdev->workqueue, &hdev->tx_work);
2798 /* Send SCO data */
2799 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2801 struct hci_dev *hdev = conn->hdev;
2802 struct hci_sco_hdr hdr;
2804 BT_DBG("%s len %d", hdev->name, skb->len);
2806 hdr.handle = cpu_to_le16(conn->handle);
2807 hdr.dlen = skb->len;
2809 skb_push(skb, HCI_SCO_HDR_SIZE);
2810 skb_reset_transport_header(skb);
2811 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2813 skb->dev = (void *) hdev;
2814 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2816 skb_queue_tail(&conn->data_q, skb);
2817 queue_work(hdev->workqueue, &hdev->tx_work);
2820 /* ---- HCI TX task (outgoing data) ---- */
2822 /* HCI Connection scheduler */
2823 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2824 int *quote)
2826 struct hci_conn_hash *h = &hdev->conn_hash;
2827 struct hci_conn *conn = NULL, *c;
2828 unsigned int num = 0, min = ~0;
2830 /* We don't have to lock device here. Connections are always
2831 * added and removed with TX task disabled. */
2833 rcu_read_lock();
2835 list_for_each_entry_rcu(c, &h->list, list) {
2836 if (c->type != type || skb_queue_empty(&c->data_q))
2837 continue;
2839 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2840 continue;
2842 num++;
2844 if (c->sent < min) {
2845 min = c->sent;
2846 conn = c;
2849 if (hci_conn_num(hdev, type) == num)
2850 break;
2853 rcu_read_unlock();
2855 if (conn) {
2856 int cnt, q;
2858 switch (conn->type) {
2859 case ACL_LINK:
2860 cnt = hdev->acl_cnt;
2861 break;
2862 case SCO_LINK:
2863 case ESCO_LINK:
2864 cnt = hdev->sco_cnt;
2865 break;
2866 case LE_LINK:
2867 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2868 break;
2869 default:
2870 cnt = 0;
2871 BT_ERR("Unknown link type");
2874 q = cnt / num;
2875 *quote = q ? q : 1;
2876 } else
2877 *quote = 0;
2879 BT_DBG("conn %p quote %d", conn, *quote);
2880 return conn;
2883 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2885 struct hci_conn_hash *h = &hdev->conn_hash;
2886 struct hci_conn *c;
2888 BT_ERR("%s link tx timeout", hdev->name);
2890 rcu_read_lock();
2892 /* Kill stalled connections */
2893 list_for_each_entry_rcu(c, &h->list, list) {
2894 if (c->type == type && c->sent) {
2895 BT_ERR("%s killing stalled connection %pMR",
2896 hdev->name, &c->dst);
2897 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2901 rcu_read_unlock();
2904 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2905 int *quote)
2907 struct hci_conn_hash *h = &hdev->conn_hash;
2908 struct hci_chan *chan = NULL;
2909 unsigned int num = 0, min = ~0, cur_prio = 0;
2910 struct hci_conn *conn;
2911 int cnt, q, conn_num = 0;
2913 BT_DBG("%s", hdev->name);
2915 rcu_read_lock();
2917 list_for_each_entry_rcu(conn, &h->list, list) {
2918 struct hci_chan *tmp;
2920 if (conn->type != type)
2921 continue;
2923 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2924 continue;
2926 conn_num++;
2928 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2929 struct sk_buff *skb;
2931 if (skb_queue_empty(&tmp->data_q))
2932 continue;
2934 skb = skb_peek(&tmp->data_q);
2935 if (skb->priority < cur_prio)
2936 continue;
2938 if (skb->priority > cur_prio) {
2939 num = 0;
2940 min = ~0;
2941 cur_prio = skb->priority;
2944 num++;
2946 if (conn->sent < min) {
2947 min = conn->sent;
2948 chan = tmp;
2952 if (hci_conn_num(hdev, type) == conn_num)
2953 break;
2956 rcu_read_unlock();
2958 if (!chan)
2959 return NULL;
2961 switch (chan->conn->type) {
2962 case ACL_LINK:
2963 cnt = hdev->acl_cnt;
2964 break;
2965 case AMP_LINK:
2966 cnt = hdev->block_cnt;
2967 break;
2968 case SCO_LINK:
2969 case ESCO_LINK:
2970 cnt = hdev->sco_cnt;
2971 break;
2972 case LE_LINK:
2973 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2974 break;
2975 default:
2976 cnt = 0;
2977 BT_ERR("Unknown link type");
2980 q = cnt / num;
2981 *quote = q ? q : 1;
2982 BT_DBG("chan %p quote %d", chan, *quote);
2983 return chan;
2986 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2988 struct hci_conn_hash *h = &hdev->conn_hash;
2989 struct hci_conn *conn;
2990 int num = 0;
2992 BT_DBG("%s", hdev->name);
2994 rcu_read_lock();
2996 list_for_each_entry_rcu(conn, &h->list, list) {
2997 struct hci_chan *chan;
2999 if (conn->type != type)
3000 continue;
3002 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3003 continue;
3005 num++;
3007 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3008 struct sk_buff *skb;
3010 if (chan->sent) {
3011 chan->sent = 0;
3012 continue;
3015 if (skb_queue_empty(&chan->data_q))
3016 continue;
3018 skb = skb_peek(&chan->data_q);
3019 if (skb->priority >= HCI_PRIO_MAX - 1)
3020 continue;
3022 skb->priority = HCI_PRIO_MAX - 1;
3024 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3025 skb->priority);
3028 if (hci_conn_num(hdev, type) == num)
3029 break;
3032 rcu_read_unlock();
3036 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3038 /* Calculate count of blocks used by this packet */
3039 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3042 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3044 if (!test_bit(HCI_RAW, &hdev->flags)) {
3045 /* ACL tx timeout must be longer than maximum
3046 * link supervision timeout (40.9 seconds) */
3047 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3048 HCI_ACL_TX_TIMEOUT))
3049 hci_link_tx_to(hdev, ACL_LINK);
3053 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3055 unsigned int cnt = hdev->acl_cnt;
3056 struct hci_chan *chan;
3057 struct sk_buff *skb;
3058 int quote;
3060 __check_timeout(hdev, cnt);
3062 while (hdev->acl_cnt &&
3063 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3064 u32 priority = (skb_peek(&chan->data_q))->priority;
3065 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3066 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3067 skb->len, skb->priority);
3069 /* Stop if priority has changed */
3070 if (skb->priority < priority)
3071 break;
3073 skb = skb_dequeue(&chan->data_q);
3075 hci_conn_enter_active_mode(chan->conn,
3076 bt_cb(skb)->force_active);
3078 hci_send_frame(skb);
3079 hdev->acl_last_tx = jiffies;
3081 hdev->acl_cnt--;
3082 chan->sent++;
3083 chan->conn->sent++;
3087 if (cnt != hdev->acl_cnt)
3088 hci_prio_recalculate(hdev, ACL_LINK);
3091 static void hci_sched_acl_blk(struct hci_dev *hdev)
3093 unsigned int cnt = hdev->block_cnt;
3094 struct hci_chan *chan;
3095 struct sk_buff *skb;
3096 int quote;
3097 u8 type;
3099 __check_timeout(hdev, cnt);
3101 BT_DBG("%s", hdev->name);
3103 if (hdev->dev_type == HCI_AMP)
3104 type = AMP_LINK;
3105 else
3106 type = ACL_LINK;
3108 while (hdev->block_cnt > 0 &&
3109 (chan = hci_chan_sent(hdev, type, &quote))) {
3110 u32 priority = (skb_peek(&chan->data_q))->priority;
3111 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3112 int blocks;
3114 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3115 skb->len, skb->priority);
3117 /* Stop if priority has changed */
3118 if (skb->priority < priority)
3119 break;
3121 skb = skb_dequeue(&chan->data_q);
3123 blocks = __get_blocks(hdev, skb);
3124 if (blocks > hdev->block_cnt)
3125 return;
3127 hci_conn_enter_active_mode(chan->conn,
3128 bt_cb(skb)->force_active);
3130 hci_send_frame(skb);
3131 hdev->acl_last_tx = jiffies;
3133 hdev->block_cnt -= blocks;
3134 quote -= blocks;
3136 chan->sent += blocks;
3137 chan->conn->sent += blocks;
3141 if (cnt != hdev->block_cnt)
3142 hci_prio_recalculate(hdev, type);
3145 static void hci_sched_acl(struct hci_dev *hdev)
3147 BT_DBG("%s", hdev->name);
3149 /* No ACL link over BR/EDR controller */
3150 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3151 return;
3153 /* No AMP link over AMP controller */
3154 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3155 return;
3157 switch (hdev->flow_ctl_mode) {
3158 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3159 hci_sched_acl_pkt(hdev);
3160 break;
3162 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3163 hci_sched_acl_blk(hdev);
3164 break;
3168 /* Schedule SCO */
3169 static void hci_sched_sco(struct hci_dev *hdev)
3171 struct hci_conn *conn;
3172 struct sk_buff *skb;
3173 int quote;
3175 BT_DBG("%s", hdev->name);
3177 if (!hci_conn_num(hdev, SCO_LINK))
3178 return;
3180 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3181 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3182 BT_DBG("skb %p len %d", skb, skb->len);
3183 hci_send_frame(skb);
3185 conn->sent++;
3186 if (conn->sent == ~0)
3187 conn->sent = 0;
3192 static void hci_sched_esco(struct hci_dev *hdev)
3194 struct hci_conn *conn;
3195 struct sk_buff *skb;
3196 int quote;
3198 BT_DBG("%s", hdev->name);
3200 if (!hci_conn_num(hdev, ESCO_LINK))
3201 return;
3203 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3204 &quote))) {
3205 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3206 BT_DBG("skb %p len %d", skb, skb->len);
3207 hci_send_frame(skb);
3209 conn->sent++;
3210 if (conn->sent == ~0)
3211 conn->sent = 0;
3216 static void hci_sched_le(struct hci_dev *hdev)
3218 struct hci_chan *chan;
3219 struct sk_buff *skb;
3220 int quote, cnt, tmp;
3222 BT_DBG("%s", hdev->name);
3224 if (!hci_conn_num(hdev, LE_LINK))
3225 return;
3227 if (!test_bit(HCI_RAW, &hdev->flags)) {
3228 /* LE tx timeout must be longer than maximum
3229 * link supervision timeout (40.9 seconds) */
3230 if (!hdev->le_cnt && hdev->le_pkts &&
3231 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3232 hci_link_tx_to(hdev, LE_LINK);
3235 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3236 tmp = cnt;
3237 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3238 u32 priority = (skb_peek(&chan->data_q))->priority;
3239 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3240 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3241 skb->len, skb->priority);
3243 /* Stop if priority has changed */
3244 if (skb->priority < priority)
3245 break;
3247 skb = skb_dequeue(&chan->data_q);
3249 hci_send_frame(skb);
3250 hdev->le_last_tx = jiffies;
3252 cnt--;
3253 chan->sent++;
3254 chan->conn->sent++;
3258 if (hdev->le_pkts)
3259 hdev->le_cnt = cnt;
3260 else
3261 hdev->acl_cnt = cnt;
3263 if (cnt != tmp)
3264 hci_prio_recalculate(hdev, LE_LINK);
3267 static void hci_tx_work(struct work_struct *work)
3269 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3270 struct sk_buff *skb;
3272 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3273 hdev->sco_cnt, hdev->le_cnt);
3275 /* Schedule queues and send stuff to HCI driver */
3277 hci_sched_acl(hdev);
3279 hci_sched_sco(hdev);
3281 hci_sched_esco(hdev);
3283 hci_sched_le(hdev);
3285 /* Send next queued raw (unknown type) packet */
3286 while ((skb = skb_dequeue(&hdev->raw_q)))
3287 hci_send_frame(skb);
3290 /* ----- HCI RX task (incoming data processing) ----- */
3292 /* ACL data packet */
3293 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3295 struct hci_acl_hdr *hdr = (void *) skb->data;
3296 struct hci_conn *conn;
3297 __u16 handle, flags;
3299 skb_pull(skb, HCI_ACL_HDR_SIZE);
3301 handle = __le16_to_cpu(hdr->handle);
3302 flags = hci_flags(handle);
3303 handle = hci_handle(handle);
3305 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3306 handle, flags);
3308 hdev->stat.acl_rx++;
3310 hci_dev_lock(hdev);
3311 conn = hci_conn_hash_lookup_handle(hdev, handle);
3312 hci_dev_unlock(hdev);
3314 if (conn) {
3315 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3317 /* Send to upper protocol */
3318 l2cap_recv_acldata(conn, skb, flags);
3319 return;
3320 } else {
3321 BT_ERR("%s ACL packet for unknown connection handle %d",
3322 hdev->name, handle);
3325 kfree_skb(skb);
3328 /* SCO data packet */
3329 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3331 struct hci_sco_hdr *hdr = (void *) skb->data;
3332 struct hci_conn *conn;
3333 __u16 handle;
3335 skb_pull(skb, HCI_SCO_HDR_SIZE);
3337 handle = __le16_to_cpu(hdr->handle);
3339 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3341 hdev->stat.sco_rx++;
3343 hci_dev_lock(hdev);
3344 conn = hci_conn_hash_lookup_handle(hdev, handle);
3345 hci_dev_unlock(hdev);
3347 if (conn) {
3348 /* Send to upper protocol */
3349 sco_recv_scodata(conn, skb);
3350 return;
3351 } else {
3352 BT_ERR("%s SCO packet for unknown connection handle %d",
3353 hdev->name, handle);
3356 kfree_skb(skb);
3359 static bool hci_req_is_complete(struct hci_dev *hdev)
3361 struct sk_buff *skb;
3363 skb = skb_peek(&hdev->cmd_q);
3364 if (!skb)
3365 return true;
3367 return bt_cb(skb)->req.start;
3370 static void hci_resend_last(struct hci_dev *hdev)
3372 struct hci_command_hdr *sent;
3373 struct sk_buff *skb;
3374 u16 opcode;
3376 if (!hdev->sent_cmd)
3377 return;
3379 sent = (void *) hdev->sent_cmd->data;
3380 opcode = __le16_to_cpu(sent->opcode);
3381 if (opcode == HCI_OP_RESET)
3382 return;
3384 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3385 if (!skb)
3386 return;
3388 skb_queue_head(&hdev->cmd_q, skb);
3389 queue_work(hdev->workqueue, &hdev->cmd_work);
3392 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3394 hci_req_complete_t req_complete = NULL;
3395 struct sk_buff *skb;
3396 unsigned long flags;
3398 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3400 /* If the completed command doesn't match the last one that was
3401 * sent we need to do special handling of it.
3403 if (!hci_sent_cmd_data(hdev, opcode)) {
3404 /* Some CSR based controllers generate a spontaneous
3405 * reset complete event during init and any pending
3406 * command will never be completed. In such a case we
3407 * need to resend whatever was the last sent
3408 * command.
3410 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3411 hci_resend_last(hdev);
3413 return;
3416 /* If the command succeeded and there's still more commands in
3417 * this request the request is not yet complete.
3419 if (!status && !hci_req_is_complete(hdev))
3420 return;
3422 /* If this was the last command in a request the complete
3423 * callback would be found in hdev->sent_cmd instead of the
3424 * command queue (hdev->cmd_q).
3426 if (hdev->sent_cmd) {
3427 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3429 if (req_complete) {
3430 /* We must set the complete callback to NULL to
3431 * avoid calling the callback more than once if
3432 * this function gets called again.
3434 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3436 goto call_complete;
3440 /* Remove all pending commands belonging to this request */
3441 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3442 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3443 if (bt_cb(skb)->req.start) {
3444 __skb_queue_head(&hdev->cmd_q, skb);
3445 break;
3448 req_complete = bt_cb(skb)->req.complete;
3449 kfree_skb(skb);
3451 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3453 call_complete:
3454 if (req_complete)
3455 req_complete(hdev, status);
3458 static void hci_rx_work(struct work_struct *work)
3460 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3461 struct sk_buff *skb;
3463 BT_DBG("%s", hdev->name);
3465 while ((skb = skb_dequeue(&hdev->rx_q))) {
3466 /* Send copy to monitor */
3467 hci_send_to_monitor(hdev, skb);
3469 if (atomic_read(&hdev->promisc)) {
3470 /* Send copy to the sockets */
3471 hci_send_to_sock(hdev, skb);
3474 if (test_bit(HCI_RAW, &hdev->flags)) {
3475 kfree_skb(skb);
3476 continue;
3479 if (test_bit(HCI_INIT, &hdev->flags)) {
3480 /* Don't process data packets in this states. */
3481 switch (bt_cb(skb)->pkt_type) {
3482 case HCI_ACLDATA_PKT:
3483 case HCI_SCODATA_PKT:
3484 kfree_skb(skb);
3485 continue;
3489 /* Process frame */
3490 switch (bt_cb(skb)->pkt_type) {
3491 case HCI_EVENT_PKT:
3492 BT_DBG("%s Event packet", hdev->name);
3493 hci_event_packet(hdev, skb);
3494 break;
3496 case HCI_ACLDATA_PKT:
3497 BT_DBG("%s ACL data packet", hdev->name);
3498 hci_acldata_packet(hdev, skb);
3499 break;
3501 case HCI_SCODATA_PKT:
3502 BT_DBG("%s SCO data packet", hdev->name);
3503 hci_scodata_packet(hdev, skb);
3504 break;
3506 default:
3507 kfree_skb(skb);
3508 break;
3513 static void hci_cmd_work(struct work_struct *work)
3515 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3516 struct sk_buff *skb;
3518 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3519 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3521 /* Send queued commands */
3522 if (atomic_read(&hdev->cmd_cnt)) {
3523 skb = skb_dequeue(&hdev->cmd_q);
3524 if (!skb)
3525 return;
3527 kfree_skb(hdev->sent_cmd);
3529 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3530 if (hdev->sent_cmd) {
3531 atomic_dec(&hdev->cmd_cnt);
3532 hci_send_frame(skb);
3533 if (test_bit(HCI_RESET, &hdev->flags))
3534 del_timer(&hdev->cmd_timer);
3535 else
3536 mod_timer(&hdev->cmd_timer,
3537 jiffies + HCI_CMD_TIMEOUT);
3538 } else {
3539 skb_queue_head(&hdev->cmd_q, skb);
3540 queue_work(hdev->workqueue, &hdev->cmd_work);
3545 u8 bdaddr_to_le(u8 bdaddr_type)
3547 switch (bdaddr_type) {
3548 case BDADDR_LE_PUBLIC:
3549 return ADDR_LE_DEV_PUBLIC;
3551 default:
3552 /* Fallback to LE Random address type */
3553 return ADDR_LE_DEV_RANDOM;