drm: copy mode type in drm_mode_connector_list_update()
[linux-2.6.git] / net / bluetooth / hci_core.c
blob634debab4d54582f04c69a4664b00c7541696a93
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
89 hci_dev_lock(hdev);
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
94 hci_dev_unlock(hdev);
96 if (!skb)
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
132 failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
175 default:
176 err = -ETIMEDOUT;
177 break;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
184 if (err < 0)
185 return ERR_PTR(err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
201 unsigned long opt),
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
214 func(&req, opt);
216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
225 if (err == -ENODATA)
226 return 0;
228 return err;
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
239 return -EINTR;
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
243 err = -bt_to_errno(hdev->req_result);
244 break;
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
250 default:
251 err = -ETIMEDOUT;
252 break;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
259 return err;
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
264 unsigned long opt),
265 unsigned long opt, __u32 timeout)
267 int ret;
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
272 /* Serialize all requests */
273 hci_req_lock(hdev);
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
277 return ret;
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
284 /* Reset device */
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local AMP Info */
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
313 /* Read Data Blk size */
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
319 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s %ld", hdev->name, opt);
323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
327 switch (hdev->dev_type) {
328 case HCI_BREDR:
329 bredr_init(req);
330 break;
332 case HCI_AMP:
333 amp_init(req);
334 break;
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
342 static void bredr_setup(struct hci_request *req)
344 __le16 param;
345 __u8 flt_type;
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350 /* Read Class of Device */
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353 /* Read Local Name */
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356 /* Read Voice Setting */
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
374 static void le_setup(struct hci_request *req)
376 struct hci_dev *hdev = req->hdev;
378 /* Read LE Buffer Size */
379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
381 /* Read LE Local Supported Features */
382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
384 /* Read LE Advertising Channel TX Power */
385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
387 /* Read LE White List Size */
388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
390 /* Read LE Supported States */
391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
423 return 0x00;
426 static void hci_setup_inquiry_mode(struct hci_request *req)
428 u8 mode;
430 mode = hci_get_inquiry_mode(req->hdev);
432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
435 static void hci_setup_event_mask(struct hci_request *req)
437 struct hci_dev *hdev = req->hdev;
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
518 struct hci_dev *hdev = req->hdev;
520 if (lmp_bredr_capable(hdev))
521 bredr_setup(req);
523 if (lmp_le_capable(hdev))
524 le_setup(req);
526 hci_setup_event_mask(req);
528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
539 } else {
540 struct hci_cp_write_eir cp;
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
549 if (lmp_inq_rssi_capable(hdev))
550 hci_setup_inquiry_mode(req);
552 if (lmp_inq_tx_pwr_capable(hdev))
553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
558 cp.page = 0x01;
559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
570 static void hci_setup_link_policy(struct hci_request *req)
572 struct hci_dev *hdev = req->hdev;
573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
585 cp.policy = cpu_to_le16(link_policy);
586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
589 static void hci_set_le_support(struct hci_request *req)
591 struct hci_dev *hdev = req->hdev;
592 struct hci_cp_write_le_host_supported cp;
594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
598 memset(&cp, 0, sizeof(cp));
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
605 if (cp.le != lmp_host_le_capable(hdev))
606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
612 struct hci_dev *hdev = req->hdev;
613 u8 p;
615 /* Some Broadcom based Bluetooth controllers do not support the
616 * Delete Stored Link Key command. They are clearly indicating its
617 * absence in the bit mask of supported commands.
619 * Check the supported commands and only if the the command is marked
620 * as supported send it. If not supported assume that the controller
621 * does not have actual support for stored link keys which makes this
622 * command redundant anyway.
624 if (hdev->commands[6] & 0x80) {
625 struct hci_cp_delete_stored_link_key cp;
627 bacpy(&cp.bdaddr, BDADDR_ANY);
628 cp.delete_all = 0x01;
629 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
630 sizeof(cp), &cp);
633 if (hdev->commands[5] & 0x10)
634 hci_setup_link_policy(req);
636 if (lmp_le_capable(hdev)) {
637 hci_set_le_support(req);
638 hci_update_ad(req);
641 /* Read features beyond page 1 if available */
642 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
643 struct hci_cp_read_local_ext_features cp;
645 cp.page = p;
646 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
647 sizeof(cp), &cp);
651 static int __hci_init(struct hci_dev *hdev)
653 int err;
655 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
656 if (err < 0)
657 return err;
659 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
660 * BR/EDR/LE type controllers. AMP controllers only need the
661 * first stage init.
663 if (hdev->dev_type != HCI_BREDR)
664 return 0;
666 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
667 if (err < 0)
668 return err;
670 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
673 static void hci_scan_req(struct hci_request *req, unsigned long opt)
675 __u8 scan = opt;
677 BT_DBG("%s %x", req->hdev->name, scan);
679 /* Inquiry and Page scans */
680 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
683 static void hci_auth_req(struct hci_request *req, unsigned long opt)
685 __u8 auth = opt;
687 BT_DBG("%s %x", req->hdev->name, auth);
689 /* Authentication */
690 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
693 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
695 __u8 encrypt = opt;
697 BT_DBG("%s %x", req->hdev->name, encrypt);
699 /* Encryption */
700 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
703 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
705 __le16 policy = cpu_to_le16(opt);
707 BT_DBG("%s %x", req->hdev->name, policy);
709 /* Default link policy */
710 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
713 /* Get HCI device by index.
714 * Device is held on return. */
715 struct hci_dev *hci_dev_get(int index)
717 struct hci_dev *hdev = NULL, *d;
719 BT_DBG("%d", index);
721 if (index < 0)
722 return NULL;
724 read_lock(&hci_dev_list_lock);
725 list_for_each_entry(d, &hci_dev_list, list) {
726 if (d->id == index) {
727 hdev = hci_dev_hold(d);
728 break;
731 read_unlock(&hci_dev_list_lock);
732 return hdev;
735 /* ---- Inquiry support ---- */
737 bool hci_discovery_active(struct hci_dev *hdev)
739 struct discovery_state *discov = &hdev->discovery;
741 switch (discov->state) {
742 case DISCOVERY_FINDING:
743 case DISCOVERY_RESOLVING:
744 return true;
746 default:
747 return false;
751 void hci_discovery_set_state(struct hci_dev *hdev, int state)
753 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
755 if (hdev->discovery.state == state)
756 return;
758 switch (state) {
759 case DISCOVERY_STOPPED:
760 if (hdev->discovery.state != DISCOVERY_STARTING)
761 mgmt_discovering(hdev, 0);
762 break;
763 case DISCOVERY_STARTING:
764 break;
765 case DISCOVERY_FINDING:
766 mgmt_discovering(hdev, 1);
767 break;
768 case DISCOVERY_RESOLVING:
769 break;
770 case DISCOVERY_STOPPING:
771 break;
774 hdev->discovery.state = state;
777 void hci_inquiry_cache_flush(struct hci_dev *hdev)
779 struct discovery_state *cache = &hdev->discovery;
780 struct inquiry_entry *p, *n;
782 list_for_each_entry_safe(p, n, &cache->all, all) {
783 list_del(&p->all);
784 kfree(p);
787 INIT_LIST_HEAD(&cache->unknown);
788 INIT_LIST_HEAD(&cache->resolve);
791 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
792 bdaddr_t *bdaddr)
794 struct discovery_state *cache = &hdev->discovery;
795 struct inquiry_entry *e;
797 BT_DBG("cache %p, %pMR", cache, bdaddr);
799 list_for_each_entry(e, &cache->all, all) {
800 if (!bacmp(&e->data.bdaddr, bdaddr))
801 return e;
804 return NULL;
807 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
808 bdaddr_t *bdaddr)
810 struct discovery_state *cache = &hdev->discovery;
811 struct inquiry_entry *e;
813 BT_DBG("cache %p, %pMR", cache, bdaddr);
815 list_for_each_entry(e, &cache->unknown, list) {
816 if (!bacmp(&e->data.bdaddr, bdaddr))
817 return e;
820 return NULL;
823 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
824 bdaddr_t *bdaddr,
825 int state)
827 struct discovery_state *cache = &hdev->discovery;
828 struct inquiry_entry *e;
830 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
832 list_for_each_entry(e, &cache->resolve, list) {
833 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
834 return e;
835 if (!bacmp(&e->data.bdaddr, bdaddr))
836 return e;
839 return NULL;
842 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
843 struct inquiry_entry *ie)
845 struct discovery_state *cache = &hdev->discovery;
846 struct list_head *pos = &cache->resolve;
847 struct inquiry_entry *p;
849 list_del(&ie->list);
851 list_for_each_entry(p, &cache->resolve, list) {
852 if (p->name_state != NAME_PENDING &&
853 abs(p->data.rssi) >= abs(ie->data.rssi))
854 break;
855 pos = &p->list;
858 list_add(&ie->list, pos);
861 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
862 bool name_known, bool *ssp)
864 struct discovery_state *cache = &hdev->discovery;
865 struct inquiry_entry *ie;
867 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
869 hci_remove_remote_oob_data(hdev, &data->bdaddr);
871 if (ssp)
872 *ssp = data->ssp_mode;
874 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
875 if (ie) {
876 if (ie->data.ssp_mode && ssp)
877 *ssp = true;
879 if (ie->name_state == NAME_NEEDED &&
880 data->rssi != ie->data.rssi) {
881 ie->data.rssi = data->rssi;
882 hci_inquiry_cache_update_resolve(hdev, ie);
885 goto update;
888 /* Entry not in the cache. Add new one. */
889 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
890 if (!ie)
891 return false;
893 list_add(&ie->all, &cache->all);
895 if (name_known) {
896 ie->name_state = NAME_KNOWN;
897 } else {
898 ie->name_state = NAME_NOT_KNOWN;
899 list_add(&ie->list, &cache->unknown);
902 update:
903 if (name_known && ie->name_state != NAME_KNOWN &&
904 ie->name_state != NAME_PENDING) {
905 ie->name_state = NAME_KNOWN;
906 list_del(&ie->list);
909 memcpy(&ie->data, data, sizeof(*data));
910 ie->timestamp = jiffies;
911 cache->timestamp = jiffies;
913 if (ie->name_state == NAME_NOT_KNOWN)
914 return false;
916 return true;
919 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
921 struct discovery_state *cache = &hdev->discovery;
922 struct inquiry_info *info = (struct inquiry_info *) buf;
923 struct inquiry_entry *e;
924 int copied = 0;
926 list_for_each_entry(e, &cache->all, all) {
927 struct inquiry_data *data = &e->data;
929 if (copied >= num)
930 break;
932 bacpy(&info->bdaddr, &data->bdaddr);
933 info->pscan_rep_mode = data->pscan_rep_mode;
934 info->pscan_period_mode = data->pscan_period_mode;
935 info->pscan_mode = data->pscan_mode;
936 memcpy(info->dev_class, data->dev_class, 3);
937 info->clock_offset = data->clock_offset;
939 info++;
940 copied++;
943 BT_DBG("cache %p, copied %d", cache, copied);
944 return copied;
947 static void hci_inq_req(struct hci_request *req, unsigned long opt)
949 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
950 struct hci_dev *hdev = req->hdev;
951 struct hci_cp_inquiry cp;
953 BT_DBG("%s", hdev->name);
955 if (test_bit(HCI_INQUIRY, &hdev->flags))
956 return;
958 /* Start Inquiry */
959 memcpy(&cp.lap, &ir->lap, 3);
960 cp.length = ir->length;
961 cp.num_rsp = ir->num_rsp;
962 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
965 static int wait_inquiry(void *word)
967 schedule();
968 return signal_pending(current);
971 int hci_inquiry(void __user *arg)
973 __u8 __user *ptr = arg;
974 struct hci_inquiry_req ir;
975 struct hci_dev *hdev;
976 int err = 0, do_inquiry = 0, max_rsp;
977 long timeo;
978 __u8 *buf;
980 if (copy_from_user(&ir, ptr, sizeof(ir)))
981 return -EFAULT;
983 hdev = hci_dev_get(ir.dev_id);
984 if (!hdev)
985 return -ENODEV;
987 hci_dev_lock(hdev);
988 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
989 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
990 hci_inquiry_cache_flush(hdev);
991 do_inquiry = 1;
993 hci_dev_unlock(hdev);
995 timeo = ir.length * msecs_to_jiffies(2000);
997 if (do_inquiry) {
998 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
999 timeo);
1000 if (err < 0)
1001 goto done;
1003 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1004 * cleared). If it is interrupted by a signal, return -EINTR.
1006 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1007 TASK_INTERRUPTIBLE))
1008 return -EINTR;
1011 /* for unlimited number of responses we will use buffer with
1012 * 255 entries
1014 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1016 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1017 * copy it to the user space.
1019 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1020 if (!buf) {
1021 err = -ENOMEM;
1022 goto done;
1025 hci_dev_lock(hdev);
1026 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1027 hci_dev_unlock(hdev);
1029 BT_DBG("num_rsp %d", ir.num_rsp);
1031 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1032 ptr += sizeof(ir);
1033 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1034 ir.num_rsp))
1035 err = -EFAULT;
1036 } else
1037 err = -EFAULT;
1039 kfree(buf);
1041 done:
1042 hci_dev_put(hdev);
1043 return err;
1046 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1048 u8 ad_len = 0, flags = 0;
1049 size_t name_len;
1051 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1052 flags |= LE_AD_GENERAL;
1054 if (!lmp_bredr_capable(hdev))
1055 flags |= LE_AD_NO_BREDR;
1057 if (lmp_le_br_capable(hdev))
1058 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1060 if (lmp_host_le_br_capable(hdev))
1061 flags |= LE_AD_SIM_LE_BREDR_HOST;
1063 if (flags) {
1064 BT_DBG("adv flags 0x%02x", flags);
1066 ptr[0] = 2;
1067 ptr[1] = EIR_FLAGS;
1068 ptr[2] = flags;
1070 ad_len += 3;
1071 ptr += 3;
1074 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1075 ptr[0] = 2;
1076 ptr[1] = EIR_TX_POWER;
1077 ptr[2] = (u8) hdev->adv_tx_power;
1079 ad_len += 3;
1080 ptr += 3;
1083 name_len = strlen(hdev->dev_name);
1084 if (name_len > 0) {
1085 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1087 if (name_len > max_len) {
1088 name_len = max_len;
1089 ptr[1] = EIR_NAME_SHORT;
1090 } else
1091 ptr[1] = EIR_NAME_COMPLETE;
1093 ptr[0] = name_len + 1;
1095 memcpy(ptr + 2, hdev->dev_name, name_len);
1097 ad_len += (name_len + 2);
1098 ptr += (name_len + 2);
1101 return ad_len;
1104 void hci_update_ad(struct hci_request *req)
1106 struct hci_dev *hdev = req->hdev;
1107 struct hci_cp_le_set_adv_data cp;
1108 u8 len;
1110 if (!lmp_le_capable(hdev))
1111 return;
1113 memset(&cp, 0, sizeof(cp));
1115 len = create_ad(hdev, cp.data);
1117 if (hdev->adv_data_len == len &&
1118 memcmp(cp.data, hdev->adv_data, len) == 0)
1119 return;
1121 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1122 hdev->adv_data_len = len;
1124 cp.length = len;
1126 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1129 /* ---- HCI ioctl helpers ---- */
1131 int hci_dev_open(__u16 dev)
1133 struct hci_dev *hdev;
1134 int ret = 0;
1136 hdev = hci_dev_get(dev);
1137 if (!hdev)
1138 return -ENODEV;
1140 BT_DBG("%s %p", hdev->name, hdev);
1142 hci_req_lock(hdev);
1144 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1145 ret = -ENODEV;
1146 goto done;
1149 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1150 ret = -ERFKILL;
1151 goto done;
1154 if (test_bit(HCI_UP, &hdev->flags)) {
1155 ret = -EALREADY;
1156 goto done;
1159 if (hdev->open(hdev)) {
1160 ret = -EIO;
1161 goto done;
1164 atomic_set(&hdev->cmd_cnt, 1);
1165 set_bit(HCI_INIT, &hdev->flags);
1167 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1168 ret = hdev->setup(hdev);
1170 if (!ret) {
1171 /* Treat all non BR/EDR controllers as raw devices if
1172 * enable_hs is not set.
1174 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1175 set_bit(HCI_RAW, &hdev->flags);
1177 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1178 set_bit(HCI_RAW, &hdev->flags);
1180 if (!test_bit(HCI_RAW, &hdev->flags))
1181 ret = __hci_init(hdev);
1184 clear_bit(HCI_INIT, &hdev->flags);
1186 if (!ret) {
1187 hci_dev_hold(hdev);
1188 set_bit(HCI_UP, &hdev->flags);
1189 hci_notify(hdev, HCI_DEV_UP);
1190 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1191 mgmt_valid_hdev(hdev)) {
1192 hci_dev_lock(hdev);
1193 mgmt_powered(hdev, 1);
1194 hci_dev_unlock(hdev);
1196 } else {
1197 /* Init failed, cleanup */
1198 flush_work(&hdev->tx_work);
1199 flush_work(&hdev->cmd_work);
1200 flush_work(&hdev->rx_work);
1202 skb_queue_purge(&hdev->cmd_q);
1203 skb_queue_purge(&hdev->rx_q);
1205 if (hdev->flush)
1206 hdev->flush(hdev);
1208 if (hdev->sent_cmd) {
1209 kfree_skb(hdev->sent_cmd);
1210 hdev->sent_cmd = NULL;
1213 hdev->close(hdev);
1214 hdev->flags = 0;
1217 done:
1218 hci_req_unlock(hdev);
1219 hci_dev_put(hdev);
1220 return ret;
1223 static int hci_dev_do_close(struct hci_dev *hdev)
1225 BT_DBG("%s %p", hdev->name, hdev);
1227 cancel_delayed_work(&hdev->power_off);
1229 hci_req_cancel(hdev, ENODEV);
1230 hci_req_lock(hdev);
1232 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1233 del_timer_sync(&hdev->cmd_timer);
1234 hci_req_unlock(hdev);
1235 return 0;
1238 /* Flush RX and TX works */
1239 flush_work(&hdev->tx_work);
1240 flush_work(&hdev->rx_work);
1242 if (hdev->discov_timeout > 0) {
1243 cancel_delayed_work(&hdev->discov_off);
1244 hdev->discov_timeout = 0;
1245 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1248 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1249 cancel_delayed_work(&hdev->service_cache);
1251 cancel_delayed_work_sync(&hdev->le_scan_disable);
1253 hci_dev_lock(hdev);
1254 hci_inquiry_cache_flush(hdev);
1255 hci_conn_hash_flush(hdev);
1256 hci_dev_unlock(hdev);
1258 hci_notify(hdev, HCI_DEV_DOWN);
1260 if (hdev->flush)
1261 hdev->flush(hdev);
1263 /* Reset device */
1264 skb_queue_purge(&hdev->cmd_q);
1265 atomic_set(&hdev->cmd_cnt, 1);
1266 if (!test_bit(HCI_RAW, &hdev->flags) &&
1267 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1268 set_bit(HCI_INIT, &hdev->flags);
1269 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1270 clear_bit(HCI_INIT, &hdev->flags);
1273 /* flush cmd work */
1274 flush_work(&hdev->cmd_work);
1276 /* Drop queues */
1277 skb_queue_purge(&hdev->rx_q);
1278 skb_queue_purge(&hdev->cmd_q);
1279 skb_queue_purge(&hdev->raw_q);
1281 /* Drop last sent command */
1282 if (hdev->sent_cmd) {
1283 del_timer_sync(&hdev->cmd_timer);
1284 kfree_skb(hdev->sent_cmd);
1285 hdev->sent_cmd = NULL;
1288 kfree_skb(hdev->recv_evt);
1289 hdev->recv_evt = NULL;
1291 /* After this point our queues are empty
1292 * and no tasks are scheduled. */
1293 hdev->close(hdev);
1295 /* Clear flags */
1296 hdev->flags = 0;
1297 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1299 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1300 mgmt_valid_hdev(hdev)) {
1301 hci_dev_lock(hdev);
1302 mgmt_powered(hdev, 0);
1303 hci_dev_unlock(hdev);
1306 /* Controller radio is available but is currently powered down */
1307 hdev->amp_status = 0;
1309 memset(hdev->eir, 0, sizeof(hdev->eir));
1310 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1312 hci_req_unlock(hdev);
1314 hci_dev_put(hdev);
1315 return 0;
1318 int hci_dev_close(__u16 dev)
1320 struct hci_dev *hdev;
1321 int err;
1323 hdev = hci_dev_get(dev);
1324 if (!hdev)
1325 return -ENODEV;
1327 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1328 cancel_delayed_work(&hdev->power_off);
1330 err = hci_dev_do_close(hdev);
1332 hci_dev_put(hdev);
1333 return err;
1336 int hci_dev_reset(__u16 dev)
1338 struct hci_dev *hdev;
1339 int ret = 0;
1341 hdev = hci_dev_get(dev);
1342 if (!hdev)
1343 return -ENODEV;
1345 hci_req_lock(hdev);
1347 if (!test_bit(HCI_UP, &hdev->flags))
1348 goto done;
1350 /* Drop queues */
1351 skb_queue_purge(&hdev->rx_q);
1352 skb_queue_purge(&hdev->cmd_q);
1354 hci_dev_lock(hdev);
1355 hci_inquiry_cache_flush(hdev);
1356 hci_conn_hash_flush(hdev);
1357 hci_dev_unlock(hdev);
1359 if (hdev->flush)
1360 hdev->flush(hdev);
1362 atomic_set(&hdev->cmd_cnt, 1);
1363 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1365 if (!test_bit(HCI_RAW, &hdev->flags))
1366 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1368 done:
1369 hci_req_unlock(hdev);
1370 hci_dev_put(hdev);
1371 return ret;
1374 int hci_dev_reset_stat(__u16 dev)
1376 struct hci_dev *hdev;
1377 int ret = 0;
1379 hdev = hci_dev_get(dev);
1380 if (!hdev)
1381 return -ENODEV;
1383 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1385 hci_dev_put(hdev);
1387 return ret;
1390 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1392 struct hci_dev *hdev;
1393 struct hci_dev_req dr;
1394 int err = 0;
1396 if (copy_from_user(&dr, arg, sizeof(dr)))
1397 return -EFAULT;
1399 hdev = hci_dev_get(dr.dev_id);
1400 if (!hdev)
1401 return -ENODEV;
1403 switch (cmd) {
1404 case HCISETAUTH:
1405 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1406 HCI_INIT_TIMEOUT);
1407 break;
1409 case HCISETENCRYPT:
1410 if (!lmp_encrypt_capable(hdev)) {
1411 err = -EOPNOTSUPP;
1412 break;
1415 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1416 /* Auth must be enabled first */
1417 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1418 HCI_INIT_TIMEOUT);
1419 if (err)
1420 break;
1423 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1424 HCI_INIT_TIMEOUT);
1425 break;
1427 case HCISETSCAN:
1428 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1429 HCI_INIT_TIMEOUT);
1430 break;
1432 case HCISETLINKPOL:
1433 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1434 HCI_INIT_TIMEOUT);
1435 break;
1437 case HCISETLINKMODE:
1438 hdev->link_mode = ((__u16) dr.dev_opt) &
1439 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1440 break;
1442 case HCISETPTYPE:
1443 hdev->pkt_type = (__u16) dr.dev_opt;
1444 break;
1446 case HCISETACLMTU:
1447 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1448 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1449 break;
1451 case HCISETSCOMTU:
1452 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1453 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1454 break;
1456 default:
1457 err = -EINVAL;
1458 break;
1461 hci_dev_put(hdev);
1462 return err;
1465 int hci_get_dev_list(void __user *arg)
1467 struct hci_dev *hdev;
1468 struct hci_dev_list_req *dl;
1469 struct hci_dev_req *dr;
1470 int n = 0, size, err;
1471 __u16 dev_num;
1473 if (get_user(dev_num, (__u16 __user *) arg))
1474 return -EFAULT;
1476 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1477 return -EINVAL;
1479 size = sizeof(*dl) + dev_num * sizeof(*dr);
1481 dl = kzalloc(size, GFP_KERNEL);
1482 if (!dl)
1483 return -ENOMEM;
1485 dr = dl->dev_req;
1487 read_lock(&hci_dev_list_lock);
1488 list_for_each_entry(hdev, &hci_dev_list, list) {
1489 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1490 cancel_delayed_work(&hdev->power_off);
1492 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1493 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1495 (dr + n)->dev_id = hdev->id;
1496 (dr + n)->dev_opt = hdev->flags;
1498 if (++n >= dev_num)
1499 break;
1501 read_unlock(&hci_dev_list_lock);
1503 dl->dev_num = n;
1504 size = sizeof(*dl) + n * sizeof(*dr);
1506 err = copy_to_user(arg, dl, size);
1507 kfree(dl);
1509 return err ? -EFAULT : 0;
1512 int hci_get_dev_info(void __user *arg)
1514 struct hci_dev *hdev;
1515 struct hci_dev_info di;
1516 int err = 0;
1518 if (copy_from_user(&di, arg, sizeof(di)))
1519 return -EFAULT;
1521 hdev = hci_dev_get(di.dev_id);
1522 if (!hdev)
1523 return -ENODEV;
1525 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1526 cancel_delayed_work_sync(&hdev->power_off);
1528 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1529 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1531 strcpy(di.name, hdev->name);
1532 di.bdaddr = hdev->bdaddr;
1533 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1534 di.flags = hdev->flags;
1535 di.pkt_type = hdev->pkt_type;
1536 if (lmp_bredr_capable(hdev)) {
1537 di.acl_mtu = hdev->acl_mtu;
1538 di.acl_pkts = hdev->acl_pkts;
1539 di.sco_mtu = hdev->sco_mtu;
1540 di.sco_pkts = hdev->sco_pkts;
1541 } else {
1542 di.acl_mtu = hdev->le_mtu;
1543 di.acl_pkts = hdev->le_pkts;
1544 di.sco_mtu = 0;
1545 di.sco_pkts = 0;
1547 di.link_policy = hdev->link_policy;
1548 di.link_mode = hdev->link_mode;
1550 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1551 memcpy(&di.features, &hdev->features, sizeof(di.features));
1553 if (copy_to_user(arg, &di, sizeof(di)))
1554 err = -EFAULT;
1556 hci_dev_put(hdev);
1558 return err;
1561 /* ---- Interface to HCI drivers ---- */
1563 static int hci_rfkill_set_block(void *data, bool blocked)
1565 struct hci_dev *hdev = data;
1567 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1569 if (!blocked)
1570 return 0;
1572 hci_dev_do_close(hdev);
1574 return 0;
1577 static const struct rfkill_ops hci_rfkill_ops = {
1578 .set_block = hci_rfkill_set_block,
1581 static void hci_power_on(struct work_struct *work)
1583 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1584 int err;
1586 BT_DBG("%s", hdev->name);
1588 err = hci_dev_open(hdev->id);
1589 if (err < 0) {
1590 mgmt_set_powered_failed(hdev, err);
1591 return;
1594 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1595 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1596 HCI_AUTO_OFF_TIMEOUT);
1598 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1599 mgmt_index_added(hdev);
1602 static void hci_power_off(struct work_struct *work)
1604 struct hci_dev *hdev = container_of(work, struct hci_dev,
1605 power_off.work);
1607 BT_DBG("%s", hdev->name);
1609 hci_dev_do_close(hdev);
1612 static void hci_discov_off(struct work_struct *work)
1614 struct hci_dev *hdev;
1615 u8 scan = SCAN_PAGE;
1617 hdev = container_of(work, struct hci_dev, discov_off.work);
1619 BT_DBG("%s", hdev->name);
1621 hci_dev_lock(hdev);
1623 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1625 hdev->discov_timeout = 0;
1627 hci_dev_unlock(hdev);
1630 int hci_uuids_clear(struct hci_dev *hdev)
1632 struct bt_uuid *uuid, *tmp;
1634 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1635 list_del(&uuid->list);
1636 kfree(uuid);
1639 return 0;
1642 int hci_link_keys_clear(struct hci_dev *hdev)
1644 struct list_head *p, *n;
1646 list_for_each_safe(p, n, &hdev->link_keys) {
1647 struct link_key *key;
1649 key = list_entry(p, struct link_key, list);
1651 list_del(p);
1652 kfree(key);
1655 return 0;
1658 int hci_smp_ltks_clear(struct hci_dev *hdev)
1660 struct smp_ltk *k, *tmp;
1662 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1663 list_del(&k->list);
1664 kfree(k);
1667 return 0;
1670 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1672 struct link_key *k;
1674 list_for_each_entry(k, &hdev->link_keys, list)
1675 if (bacmp(bdaddr, &k->bdaddr) == 0)
1676 return k;
1678 return NULL;
1681 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1682 u8 key_type, u8 old_key_type)
1684 /* Legacy key */
1685 if (key_type < 0x03)
1686 return true;
1688 /* Debug keys are insecure so don't store them persistently */
1689 if (key_type == HCI_LK_DEBUG_COMBINATION)
1690 return false;
1692 /* Changed combination key and there's no previous one */
1693 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1694 return false;
1696 /* Security mode 3 case */
1697 if (!conn)
1698 return true;
1700 /* Neither local nor remote side had no-bonding as requirement */
1701 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1702 return true;
1704 /* Local side had dedicated bonding as requirement */
1705 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1706 return true;
1708 /* Remote side had dedicated bonding as requirement */
1709 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1710 return true;
1712 /* If none of the above criteria match, then don't store the key
1713 * persistently */
1714 return false;
1717 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1719 struct smp_ltk *k;
1721 list_for_each_entry(k, &hdev->long_term_keys, list) {
1722 if (k->ediv != ediv ||
1723 memcmp(rand, k->rand, sizeof(k->rand)))
1724 continue;
1726 return k;
1729 return NULL;
1732 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1733 u8 addr_type)
1735 struct smp_ltk *k;
1737 list_for_each_entry(k, &hdev->long_term_keys, list)
1738 if (addr_type == k->bdaddr_type &&
1739 bacmp(bdaddr, &k->bdaddr) == 0)
1740 return k;
1742 return NULL;
1745 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1746 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1748 struct link_key *key, *old_key;
1749 u8 old_key_type;
1750 bool persistent;
1752 old_key = hci_find_link_key(hdev, bdaddr);
1753 if (old_key) {
1754 old_key_type = old_key->type;
1755 key = old_key;
1756 } else {
1757 old_key_type = conn ? conn->key_type : 0xff;
1758 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1759 if (!key)
1760 return -ENOMEM;
1761 list_add(&key->list, &hdev->link_keys);
1764 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1766 /* Some buggy controller combinations generate a changed
1767 * combination key for legacy pairing even when there's no
1768 * previous key */
1769 if (type == HCI_LK_CHANGED_COMBINATION &&
1770 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1771 type = HCI_LK_COMBINATION;
1772 if (conn)
1773 conn->key_type = type;
1776 bacpy(&key->bdaddr, bdaddr);
1777 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1778 key->pin_len = pin_len;
1780 if (type == HCI_LK_CHANGED_COMBINATION)
1781 key->type = old_key_type;
1782 else
1783 key->type = type;
1785 if (!new_key)
1786 return 0;
1788 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1790 mgmt_new_link_key(hdev, key, persistent);
1792 if (conn)
1793 conn->flush_key = !persistent;
1795 return 0;
1798 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1799 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1800 ediv, u8 rand[8])
1802 struct smp_ltk *key, *old_key;
1804 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1805 return 0;
1807 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1808 if (old_key)
1809 key = old_key;
1810 else {
1811 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1812 if (!key)
1813 return -ENOMEM;
1814 list_add(&key->list, &hdev->long_term_keys);
1817 bacpy(&key->bdaddr, bdaddr);
1818 key->bdaddr_type = addr_type;
1819 memcpy(key->val, tk, sizeof(key->val));
1820 key->authenticated = authenticated;
1821 key->ediv = ediv;
1822 key->enc_size = enc_size;
1823 key->type = type;
1824 memcpy(key->rand, rand, sizeof(key->rand));
1826 if (!new_key)
1827 return 0;
1829 if (type & HCI_SMP_LTK)
1830 mgmt_new_ltk(hdev, key, 1);
1832 return 0;
1835 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1837 struct link_key *key;
1839 key = hci_find_link_key(hdev, bdaddr);
1840 if (!key)
1841 return -ENOENT;
1843 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1845 list_del(&key->list);
1846 kfree(key);
1848 return 0;
1851 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1853 struct smp_ltk *k, *tmp;
1855 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1856 if (bacmp(bdaddr, &k->bdaddr))
1857 continue;
1859 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1861 list_del(&k->list);
1862 kfree(k);
1865 return 0;
1868 /* HCI command timer function */
1869 static void hci_cmd_timeout(unsigned long arg)
1871 struct hci_dev *hdev = (void *) arg;
1873 if (hdev->sent_cmd) {
1874 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1875 u16 opcode = __le16_to_cpu(sent->opcode);
1877 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1878 } else {
1879 BT_ERR("%s command tx timeout", hdev->name);
1882 atomic_set(&hdev->cmd_cnt, 1);
1883 queue_work(hdev->workqueue, &hdev->cmd_work);
1886 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1887 bdaddr_t *bdaddr)
1889 struct oob_data *data;
1891 list_for_each_entry(data, &hdev->remote_oob_data, list)
1892 if (bacmp(bdaddr, &data->bdaddr) == 0)
1893 return data;
1895 return NULL;
1898 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1900 struct oob_data *data;
1902 data = hci_find_remote_oob_data(hdev, bdaddr);
1903 if (!data)
1904 return -ENOENT;
1906 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1908 list_del(&data->list);
1909 kfree(data);
1911 return 0;
1914 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1916 struct oob_data *data, *n;
1918 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1919 list_del(&data->list);
1920 kfree(data);
1923 return 0;
1926 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1927 u8 *randomizer)
1929 struct oob_data *data;
1931 data = hci_find_remote_oob_data(hdev, bdaddr);
1933 if (!data) {
1934 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1935 if (!data)
1936 return -ENOMEM;
1938 bacpy(&data->bdaddr, bdaddr);
1939 list_add(&data->list, &hdev->remote_oob_data);
1942 memcpy(data->hash, hash, sizeof(data->hash));
1943 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1945 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1947 return 0;
1950 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1952 struct bdaddr_list *b;
1954 list_for_each_entry(b, &hdev->blacklist, list)
1955 if (bacmp(bdaddr, &b->bdaddr) == 0)
1956 return b;
1958 return NULL;
1961 int hci_blacklist_clear(struct hci_dev *hdev)
1963 struct list_head *p, *n;
1965 list_for_each_safe(p, n, &hdev->blacklist) {
1966 struct bdaddr_list *b;
1968 b = list_entry(p, struct bdaddr_list, list);
1970 list_del(p);
1971 kfree(b);
1974 return 0;
1977 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1979 struct bdaddr_list *entry;
1981 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1982 return -EBADF;
1984 if (hci_blacklist_lookup(hdev, bdaddr))
1985 return -EEXIST;
1987 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1988 if (!entry)
1989 return -ENOMEM;
1991 bacpy(&entry->bdaddr, bdaddr);
1993 list_add(&entry->list, &hdev->blacklist);
1995 return mgmt_device_blocked(hdev, bdaddr, type);
1998 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2000 struct bdaddr_list *entry;
2002 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2003 return hci_blacklist_clear(hdev);
2005 entry = hci_blacklist_lookup(hdev, bdaddr);
2006 if (!entry)
2007 return -ENOENT;
2009 list_del(&entry->list);
2010 kfree(entry);
2012 return mgmt_device_unblocked(hdev, bdaddr, type);
2015 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2017 if (status) {
2018 BT_ERR("Failed to start inquiry: status %d", status);
2020 hci_dev_lock(hdev);
2021 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2022 hci_dev_unlock(hdev);
2023 return;
2027 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2029 /* General inquiry access code (GIAC) */
2030 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2031 struct hci_request req;
2032 struct hci_cp_inquiry cp;
2033 int err;
2035 if (status) {
2036 BT_ERR("Failed to disable LE scanning: status %d", status);
2037 return;
2040 switch (hdev->discovery.type) {
2041 case DISCOV_TYPE_LE:
2042 hci_dev_lock(hdev);
2043 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2044 hci_dev_unlock(hdev);
2045 break;
2047 case DISCOV_TYPE_INTERLEAVED:
2048 hci_req_init(&req, hdev);
2050 memset(&cp, 0, sizeof(cp));
2051 memcpy(&cp.lap, lap, sizeof(cp.lap));
2052 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2053 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2055 hci_dev_lock(hdev);
2057 hci_inquiry_cache_flush(hdev);
2059 err = hci_req_run(&req, inquiry_complete);
2060 if (err) {
2061 BT_ERR("Inquiry request failed: err %d", err);
2062 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2065 hci_dev_unlock(hdev);
2066 break;
2070 static void le_scan_disable_work(struct work_struct *work)
2072 struct hci_dev *hdev = container_of(work, struct hci_dev,
2073 le_scan_disable.work);
2074 struct hci_cp_le_set_scan_enable cp;
2075 struct hci_request req;
2076 int err;
2078 BT_DBG("%s", hdev->name);
2080 hci_req_init(&req, hdev);
2082 memset(&cp, 0, sizeof(cp));
2083 cp.enable = LE_SCAN_DISABLE;
2084 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2086 err = hci_req_run(&req, le_scan_disable_work_complete);
2087 if (err)
2088 BT_ERR("Disable LE scanning request failed: err %d", err);
2091 /* Alloc HCI device */
2092 struct hci_dev *hci_alloc_dev(void)
2094 struct hci_dev *hdev;
2096 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2097 if (!hdev)
2098 return NULL;
2100 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2101 hdev->esco_type = (ESCO_HV1);
2102 hdev->link_mode = (HCI_LM_ACCEPT);
2103 hdev->io_capability = 0x03; /* No Input No Output */
2104 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2105 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2107 hdev->sniff_max_interval = 800;
2108 hdev->sniff_min_interval = 80;
2110 mutex_init(&hdev->lock);
2111 mutex_init(&hdev->req_lock);
2113 INIT_LIST_HEAD(&hdev->mgmt_pending);
2114 INIT_LIST_HEAD(&hdev->blacklist);
2115 INIT_LIST_HEAD(&hdev->uuids);
2116 INIT_LIST_HEAD(&hdev->link_keys);
2117 INIT_LIST_HEAD(&hdev->long_term_keys);
2118 INIT_LIST_HEAD(&hdev->remote_oob_data);
2119 INIT_LIST_HEAD(&hdev->conn_hash.list);
2121 INIT_WORK(&hdev->rx_work, hci_rx_work);
2122 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2123 INIT_WORK(&hdev->tx_work, hci_tx_work);
2124 INIT_WORK(&hdev->power_on, hci_power_on);
2126 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2127 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2128 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2130 skb_queue_head_init(&hdev->rx_q);
2131 skb_queue_head_init(&hdev->cmd_q);
2132 skb_queue_head_init(&hdev->raw_q);
2134 init_waitqueue_head(&hdev->req_wait_q);
2136 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2138 hci_init_sysfs(hdev);
2139 discovery_init(hdev);
2141 return hdev;
2143 EXPORT_SYMBOL(hci_alloc_dev);
2145 /* Free HCI device */
2146 void hci_free_dev(struct hci_dev *hdev)
2148 /* will free via device release */
2149 put_device(&hdev->dev);
2151 EXPORT_SYMBOL(hci_free_dev);
2153 /* Register HCI device */
2154 int hci_register_dev(struct hci_dev *hdev)
2156 int id, error;
2158 if (!hdev->open || !hdev->close)
2159 return -EINVAL;
2161 /* Do not allow HCI_AMP devices to register at index 0,
2162 * so the index can be used as the AMP controller ID.
2164 switch (hdev->dev_type) {
2165 case HCI_BREDR:
2166 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2167 break;
2168 case HCI_AMP:
2169 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2170 break;
2171 default:
2172 return -EINVAL;
2175 if (id < 0)
2176 return id;
2178 sprintf(hdev->name, "hci%d", id);
2179 hdev->id = id;
2181 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2183 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2184 WQ_MEM_RECLAIM, 1, hdev->name);
2185 if (!hdev->workqueue) {
2186 error = -ENOMEM;
2187 goto err;
2190 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2191 WQ_MEM_RECLAIM, 1, hdev->name);
2192 if (!hdev->req_workqueue) {
2193 destroy_workqueue(hdev->workqueue);
2194 error = -ENOMEM;
2195 goto err;
2198 error = hci_add_sysfs(hdev);
2199 if (error < 0)
2200 goto err_wqueue;
2202 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2203 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2204 hdev);
2205 if (hdev->rfkill) {
2206 if (rfkill_register(hdev->rfkill) < 0) {
2207 rfkill_destroy(hdev->rfkill);
2208 hdev->rfkill = NULL;
2212 set_bit(HCI_SETUP, &hdev->dev_flags);
2214 if (hdev->dev_type != HCI_AMP)
2215 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2217 write_lock(&hci_dev_list_lock);
2218 list_add(&hdev->list, &hci_dev_list);
2219 write_unlock(&hci_dev_list_lock);
2221 hci_notify(hdev, HCI_DEV_REG);
2222 hci_dev_hold(hdev);
2224 queue_work(hdev->req_workqueue, &hdev->power_on);
2226 return id;
2228 err_wqueue:
2229 destroy_workqueue(hdev->workqueue);
2230 destroy_workqueue(hdev->req_workqueue);
2231 err:
2232 ida_simple_remove(&hci_index_ida, hdev->id);
2234 return error;
2236 EXPORT_SYMBOL(hci_register_dev);
2238 /* Unregister HCI device */
2239 void hci_unregister_dev(struct hci_dev *hdev)
2241 int i, id;
2243 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2245 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2247 id = hdev->id;
2249 write_lock(&hci_dev_list_lock);
2250 list_del(&hdev->list);
2251 write_unlock(&hci_dev_list_lock);
2253 hci_dev_do_close(hdev);
2255 for (i = 0; i < NUM_REASSEMBLY; i++)
2256 kfree_skb(hdev->reassembly[i]);
2258 cancel_work_sync(&hdev->power_on);
2260 if (!test_bit(HCI_INIT, &hdev->flags) &&
2261 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2262 hci_dev_lock(hdev);
2263 mgmt_index_removed(hdev);
2264 hci_dev_unlock(hdev);
2267 /* mgmt_index_removed should take care of emptying the
2268 * pending list */
2269 BUG_ON(!list_empty(&hdev->mgmt_pending));
2271 hci_notify(hdev, HCI_DEV_UNREG);
2273 if (hdev->rfkill) {
2274 rfkill_unregister(hdev->rfkill);
2275 rfkill_destroy(hdev->rfkill);
2278 hci_del_sysfs(hdev);
2280 destroy_workqueue(hdev->workqueue);
2281 destroy_workqueue(hdev->req_workqueue);
2283 hci_dev_lock(hdev);
2284 hci_blacklist_clear(hdev);
2285 hci_uuids_clear(hdev);
2286 hci_link_keys_clear(hdev);
2287 hci_smp_ltks_clear(hdev);
2288 hci_remote_oob_data_clear(hdev);
2289 hci_dev_unlock(hdev);
2291 hci_dev_put(hdev);
2293 ida_simple_remove(&hci_index_ida, id);
2295 EXPORT_SYMBOL(hci_unregister_dev);
2297 /* Suspend HCI device */
2298 int hci_suspend_dev(struct hci_dev *hdev)
2300 hci_notify(hdev, HCI_DEV_SUSPEND);
2301 return 0;
2303 EXPORT_SYMBOL(hci_suspend_dev);
2305 /* Resume HCI device */
2306 int hci_resume_dev(struct hci_dev *hdev)
2308 hci_notify(hdev, HCI_DEV_RESUME);
2309 return 0;
2311 EXPORT_SYMBOL(hci_resume_dev);
2313 /* Receive frame from HCI drivers */
2314 int hci_recv_frame(struct sk_buff *skb)
2316 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2317 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2318 && !test_bit(HCI_INIT, &hdev->flags))) {
2319 kfree_skb(skb);
2320 return -ENXIO;
2323 /* Incoming skb */
2324 bt_cb(skb)->incoming = 1;
2326 /* Time stamp */
2327 __net_timestamp(skb);
2329 skb_queue_tail(&hdev->rx_q, skb);
2330 queue_work(hdev->workqueue, &hdev->rx_work);
2332 return 0;
2334 EXPORT_SYMBOL(hci_recv_frame);
2336 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2337 int count, __u8 index)
2339 int len = 0;
2340 int hlen = 0;
2341 int remain = count;
2342 struct sk_buff *skb;
2343 struct bt_skb_cb *scb;
2345 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2346 index >= NUM_REASSEMBLY)
2347 return -EILSEQ;
2349 skb = hdev->reassembly[index];
2351 if (!skb) {
2352 switch (type) {
2353 case HCI_ACLDATA_PKT:
2354 len = HCI_MAX_FRAME_SIZE;
2355 hlen = HCI_ACL_HDR_SIZE;
2356 break;
2357 case HCI_EVENT_PKT:
2358 len = HCI_MAX_EVENT_SIZE;
2359 hlen = HCI_EVENT_HDR_SIZE;
2360 break;
2361 case HCI_SCODATA_PKT:
2362 len = HCI_MAX_SCO_SIZE;
2363 hlen = HCI_SCO_HDR_SIZE;
2364 break;
2367 skb = bt_skb_alloc(len, GFP_ATOMIC);
2368 if (!skb)
2369 return -ENOMEM;
2371 scb = (void *) skb->cb;
2372 scb->expect = hlen;
2373 scb->pkt_type = type;
2375 skb->dev = (void *) hdev;
2376 hdev->reassembly[index] = skb;
2379 while (count) {
2380 scb = (void *) skb->cb;
2381 len = min_t(uint, scb->expect, count);
2383 memcpy(skb_put(skb, len), data, len);
2385 count -= len;
2386 data += len;
2387 scb->expect -= len;
2388 remain = count;
2390 switch (type) {
2391 case HCI_EVENT_PKT:
2392 if (skb->len == HCI_EVENT_HDR_SIZE) {
2393 struct hci_event_hdr *h = hci_event_hdr(skb);
2394 scb->expect = h->plen;
2396 if (skb_tailroom(skb) < scb->expect) {
2397 kfree_skb(skb);
2398 hdev->reassembly[index] = NULL;
2399 return -ENOMEM;
2402 break;
2404 case HCI_ACLDATA_PKT:
2405 if (skb->len == HCI_ACL_HDR_SIZE) {
2406 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2407 scb->expect = __le16_to_cpu(h->dlen);
2409 if (skb_tailroom(skb) < scb->expect) {
2410 kfree_skb(skb);
2411 hdev->reassembly[index] = NULL;
2412 return -ENOMEM;
2415 break;
2417 case HCI_SCODATA_PKT:
2418 if (skb->len == HCI_SCO_HDR_SIZE) {
2419 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2420 scb->expect = h->dlen;
2422 if (skb_tailroom(skb) < scb->expect) {
2423 kfree_skb(skb);
2424 hdev->reassembly[index] = NULL;
2425 return -ENOMEM;
2428 break;
2431 if (scb->expect == 0) {
2432 /* Complete frame */
2434 bt_cb(skb)->pkt_type = type;
2435 hci_recv_frame(skb);
2437 hdev->reassembly[index] = NULL;
2438 return remain;
2442 return remain;
2445 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2447 int rem = 0;
2449 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2450 return -EILSEQ;
2452 while (count) {
2453 rem = hci_reassembly(hdev, type, data, count, type - 1);
2454 if (rem < 0)
2455 return rem;
2457 data += (count - rem);
2458 count = rem;
2461 return rem;
2463 EXPORT_SYMBOL(hci_recv_fragment);
2465 #define STREAM_REASSEMBLY 0
2467 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2469 int type;
2470 int rem = 0;
2472 while (count) {
2473 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2475 if (!skb) {
2476 struct { char type; } *pkt;
2478 /* Start of the frame */
2479 pkt = data;
2480 type = pkt->type;
2482 data++;
2483 count--;
2484 } else
2485 type = bt_cb(skb)->pkt_type;
2487 rem = hci_reassembly(hdev, type, data, count,
2488 STREAM_REASSEMBLY);
2489 if (rem < 0)
2490 return rem;
2492 data += (count - rem);
2493 count = rem;
2496 return rem;
2498 EXPORT_SYMBOL(hci_recv_stream_fragment);
2500 /* ---- Interface to upper protocols ---- */
2502 int hci_register_cb(struct hci_cb *cb)
2504 BT_DBG("%p name %s", cb, cb->name);
2506 write_lock(&hci_cb_list_lock);
2507 list_add(&cb->list, &hci_cb_list);
2508 write_unlock(&hci_cb_list_lock);
2510 return 0;
2512 EXPORT_SYMBOL(hci_register_cb);
2514 int hci_unregister_cb(struct hci_cb *cb)
2516 BT_DBG("%p name %s", cb, cb->name);
2518 write_lock(&hci_cb_list_lock);
2519 list_del(&cb->list);
2520 write_unlock(&hci_cb_list_lock);
2522 return 0;
2524 EXPORT_SYMBOL(hci_unregister_cb);
2526 static int hci_send_frame(struct sk_buff *skb)
2528 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2530 if (!hdev) {
2531 kfree_skb(skb);
2532 return -ENODEV;
2535 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2537 /* Time stamp */
2538 __net_timestamp(skb);
2540 /* Send copy to monitor */
2541 hci_send_to_monitor(hdev, skb);
2543 if (atomic_read(&hdev->promisc)) {
2544 /* Send copy to the sockets */
2545 hci_send_to_sock(hdev, skb);
2548 /* Get rid of skb owner, prior to sending to the driver. */
2549 skb_orphan(skb);
2551 return hdev->send(skb);
2554 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2556 skb_queue_head_init(&req->cmd_q);
2557 req->hdev = hdev;
2558 req->err = 0;
2561 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2563 struct hci_dev *hdev = req->hdev;
2564 struct sk_buff *skb;
2565 unsigned long flags;
2567 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2569 /* If an error occured during request building, remove all HCI
2570 * commands queued on the HCI request queue.
2572 if (req->err) {
2573 skb_queue_purge(&req->cmd_q);
2574 return req->err;
2577 /* Do not allow empty requests */
2578 if (skb_queue_empty(&req->cmd_q))
2579 return -ENODATA;
2581 skb = skb_peek_tail(&req->cmd_q);
2582 bt_cb(skb)->req.complete = complete;
2584 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2585 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2586 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2588 queue_work(hdev->workqueue, &hdev->cmd_work);
2590 return 0;
2593 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2594 u32 plen, const void *param)
2596 int len = HCI_COMMAND_HDR_SIZE + plen;
2597 struct hci_command_hdr *hdr;
2598 struct sk_buff *skb;
2600 skb = bt_skb_alloc(len, GFP_ATOMIC);
2601 if (!skb)
2602 return NULL;
2604 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2605 hdr->opcode = cpu_to_le16(opcode);
2606 hdr->plen = plen;
2608 if (plen)
2609 memcpy(skb_put(skb, plen), param, plen);
2611 BT_DBG("skb len %d", skb->len);
2613 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2614 skb->dev = (void *) hdev;
2616 return skb;
2619 /* Send HCI command */
2620 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2621 const void *param)
2623 struct sk_buff *skb;
2625 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2627 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2628 if (!skb) {
2629 BT_ERR("%s no memory for command", hdev->name);
2630 return -ENOMEM;
2633 /* Stand-alone HCI commands must be flaged as
2634 * single-command requests.
2636 bt_cb(skb)->req.start = true;
2638 skb_queue_tail(&hdev->cmd_q, skb);
2639 queue_work(hdev->workqueue, &hdev->cmd_work);
2641 return 0;
2644 /* Queue a command to an asynchronous HCI request */
2645 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2646 const void *param, u8 event)
2648 struct hci_dev *hdev = req->hdev;
2649 struct sk_buff *skb;
2651 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2653 /* If an error occured during request building, there is no point in
2654 * queueing the HCI command. We can simply return.
2656 if (req->err)
2657 return;
2659 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2660 if (!skb) {
2661 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2662 hdev->name, opcode);
2663 req->err = -ENOMEM;
2664 return;
2667 if (skb_queue_empty(&req->cmd_q))
2668 bt_cb(skb)->req.start = true;
2670 bt_cb(skb)->req.event = event;
2672 skb_queue_tail(&req->cmd_q, skb);
2675 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2676 const void *param)
2678 hci_req_add_ev(req, opcode, plen, param, 0);
2681 /* Get data from the previously sent command */
2682 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2684 struct hci_command_hdr *hdr;
2686 if (!hdev->sent_cmd)
2687 return NULL;
2689 hdr = (void *) hdev->sent_cmd->data;
2691 if (hdr->opcode != cpu_to_le16(opcode))
2692 return NULL;
2694 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2696 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2699 /* Send ACL data */
2700 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2702 struct hci_acl_hdr *hdr;
2703 int len = skb->len;
2705 skb_push(skb, HCI_ACL_HDR_SIZE);
2706 skb_reset_transport_header(skb);
2707 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2708 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2709 hdr->dlen = cpu_to_le16(len);
2712 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2713 struct sk_buff *skb, __u16 flags)
2715 struct hci_conn *conn = chan->conn;
2716 struct hci_dev *hdev = conn->hdev;
2717 struct sk_buff *list;
2719 skb->len = skb_headlen(skb);
2720 skb->data_len = 0;
2722 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2724 switch (hdev->dev_type) {
2725 case HCI_BREDR:
2726 hci_add_acl_hdr(skb, conn->handle, flags);
2727 break;
2728 case HCI_AMP:
2729 hci_add_acl_hdr(skb, chan->handle, flags);
2730 break;
2731 default:
2732 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2733 return;
2736 list = skb_shinfo(skb)->frag_list;
2737 if (!list) {
2738 /* Non fragmented */
2739 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2741 skb_queue_tail(queue, skb);
2742 } else {
2743 /* Fragmented */
2744 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2746 skb_shinfo(skb)->frag_list = NULL;
2748 /* Queue all fragments atomically */
2749 spin_lock(&queue->lock);
2751 __skb_queue_tail(queue, skb);
2753 flags &= ~ACL_START;
2754 flags |= ACL_CONT;
2755 do {
2756 skb = list; list = list->next;
2758 skb->dev = (void *) hdev;
2759 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2760 hci_add_acl_hdr(skb, conn->handle, flags);
2762 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2764 __skb_queue_tail(queue, skb);
2765 } while (list);
2767 spin_unlock(&queue->lock);
2771 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2773 struct hci_dev *hdev = chan->conn->hdev;
2775 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2777 skb->dev = (void *) hdev;
2779 hci_queue_acl(chan, &chan->data_q, skb, flags);
2781 queue_work(hdev->workqueue, &hdev->tx_work);
2784 /* Send SCO data */
2785 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2787 struct hci_dev *hdev = conn->hdev;
2788 struct hci_sco_hdr hdr;
2790 BT_DBG("%s len %d", hdev->name, skb->len);
2792 hdr.handle = cpu_to_le16(conn->handle);
2793 hdr.dlen = skb->len;
2795 skb_push(skb, HCI_SCO_HDR_SIZE);
2796 skb_reset_transport_header(skb);
2797 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2799 skb->dev = (void *) hdev;
2800 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2802 skb_queue_tail(&conn->data_q, skb);
2803 queue_work(hdev->workqueue, &hdev->tx_work);
2806 /* ---- HCI TX task (outgoing data) ---- */
2808 /* HCI Connection scheduler */
2809 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2810 int *quote)
2812 struct hci_conn_hash *h = &hdev->conn_hash;
2813 struct hci_conn *conn = NULL, *c;
2814 unsigned int num = 0, min = ~0;
2816 /* We don't have to lock device here. Connections are always
2817 * added and removed with TX task disabled. */
2819 rcu_read_lock();
2821 list_for_each_entry_rcu(c, &h->list, list) {
2822 if (c->type != type || skb_queue_empty(&c->data_q))
2823 continue;
2825 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2826 continue;
2828 num++;
2830 if (c->sent < min) {
2831 min = c->sent;
2832 conn = c;
2835 if (hci_conn_num(hdev, type) == num)
2836 break;
2839 rcu_read_unlock();
2841 if (conn) {
2842 int cnt, q;
2844 switch (conn->type) {
2845 case ACL_LINK:
2846 cnt = hdev->acl_cnt;
2847 break;
2848 case SCO_LINK:
2849 case ESCO_LINK:
2850 cnt = hdev->sco_cnt;
2851 break;
2852 case LE_LINK:
2853 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2854 break;
2855 default:
2856 cnt = 0;
2857 BT_ERR("Unknown link type");
2860 q = cnt / num;
2861 *quote = q ? q : 1;
2862 } else
2863 *quote = 0;
2865 BT_DBG("conn %p quote %d", conn, *quote);
2866 return conn;
2869 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2871 struct hci_conn_hash *h = &hdev->conn_hash;
2872 struct hci_conn *c;
2874 BT_ERR("%s link tx timeout", hdev->name);
2876 rcu_read_lock();
2878 /* Kill stalled connections */
2879 list_for_each_entry_rcu(c, &h->list, list) {
2880 if (c->type == type && c->sent) {
2881 BT_ERR("%s killing stalled connection %pMR",
2882 hdev->name, &c->dst);
2883 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2887 rcu_read_unlock();
2890 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2891 int *quote)
2893 struct hci_conn_hash *h = &hdev->conn_hash;
2894 struct hci_chan *chan = NULL;
2895 unsigned int num = 0, min = ~0, cur_prio = 0;
2896 struct hci_conn *conn;
2897 int cnt, q, conn_num = 0;
2899 BT_DBG("%s", hdev->name);
2901 rcu_read_lock();
2903 list_for_each_entry_rcu(conn, &h->list, list) {
2904 struct hci_chan *tmp;
2906 if (conn->type != type)
2907 continue;
2909 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2910 continue;
2912 conn_num++;
2914 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2915 struct sk_buff *skb;
2917 if (skb_queue_empty(&tmp->data_q))
2918 continue;
2920 skb = skb_peek(&tmp->data_q);
2921 if (skb->priority < cur_prio)
2922 continue;
2924 if (skb->priority > cur_prio) {
2925 num = 0;
2926 min = ~0;
2927 cur_prio = skb->priority;
2930 num++;
2932 if (conn->sent < min) {
2933 min = conn->sent;
2934 chan = tmp;
2938 if (hci_conn_num(hdev, type) == conn_num)
2939 break;
2942 rcu_read_unlock();
2944 if (!chan)
2945 return NULL;
2947 switch (chan->conn->type) {
2948 case ACL_LINK:
2949 cnt = hdev->acl_cnt;
2950 break;
2951 case AMP_LINK:
2952 cnt = hdev->block_cnt;
2953 break;
2954 case SCO_LINK:
2955 case ESCO_LINK:
2956 cnt = hdev->sco_cnt;
2957 break;
2958 case LE_LINK:
2959 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2960 break;
2961 default:
2962 cnt = 0;
2963 BT_ERR("Unknown link type");
2966 q = cnt / num;
2967 *quote = q ? q : 1;
2968 BT_DBG("chan %p quote %d", chan, *quote);
2969 return chan;
2972 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2974 struct hci_conn_hash *h = &hdev->conn_hash;
2975 struct hci_conn *conn;
2976 int num = 0;
2978 BT_DBG("%s", hdev->name);
2980 rcu_read_lock();
2982 list_for_each_entry_rcu(conn, &h->list, list) {
2983 struct hci_chan *chan;
2985 if (conn->type != type)
2986 continue;
2988 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2989 continue;
2991 num++;
2993 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2994 struct sk_buff *skb;
2996 if (chan->sent) {
2997 chan->sent = 0;
2998 continue;
3001 if (skb_queue_empty(&chan->data_q))
3002 continue;
3004 skb = skb_peek(&chan->data_q);
3005 if (skb->priority >= HCI_PRIO_MAX - 1)
3006 continue;
3008 skb->priority = HCI_PRIO_MAX - 1;
3010 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3011 skb->priority);
3014 if (hci_conn_num(hdev, type) == num)
3015 break;
3018 rcu_read_unlock();
3022 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3024 /* Calculate count of blocks used by this packet */
3025 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3028 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3030 if (!test_bit(HCI_RAW, &hdev->flags)) {
3031 /* ACL tx timeout must be longer than maximum
3032 * link supervision timeout (40.9 seconds) */
3033 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3034 HCI_ACL_TX_TIMEOUT))
3035 hci_link_tx_to(hdev, ACL_LINK);
3039 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3041 unsigned int cnt = hdev->acl_cnt;
3042 struct hci_chan *chan;
3043 struct sk_buff *skb;
3044 int quote;
3046 __check_timeout(hdev, cnt);
3048 while (hdev->acl_cnt &&
3049 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3050 u32 priority = (skb_peek(&chan->data_q))->priority;
3051 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3052 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3053 skb->len, skb->priority);
3055 /* Stop if priority has changed */
3056 if (skb->priority < priority)
3057 break;
3059 skb = skb_dequeue(&chan->data_q);
3061 hci_conn_enter_active_mode(chan->conn,
3062 bt_cb(skb)->force_active);
3064 hci_send_frame(skb);
3065 hdev->acl_last_tx = jiffies;
3067 hdev->acl_cnt--;
3068 chan->sent++;
3069 chan->conn->sent++;
3073 if (cnt != hdev->acl_cnt)
3074 hci_prio_recalculate(hdev, ACL_LINK);
3077 static void hci_sched_acl_blk(struct hci_dev *hdev)
3079 unsigned int cnt = hdev->block_cnt;
3080 struct hci_chan *chan;
3081 struct sk_buff *skb;
3082 int quote;
3083 u8 type;
3085 __check_timeout(hdev, cnt);
3087 BT_DBG("%s", hdev->name);
3089 if (hdev->dev_type == HCI_AMP)
3090 type = AMP_LINK;
3091 else
3092 type = ACL_LINK;
3094 while (hdev->block_cnt > 0 &&
3095 (chan = hci_chan_sent(hdev, type, &quote))) {
3096 u32 priority = (skb_peek(&chan->data_q))->priority;
3097 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3098 int blocks;
3100 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3101 skb->len, skb->priority);
3103 /* Stop if priority has changed */
3104 if (skb->priority < priority)
3105 break;
3107 skb = skb_dequeue(&chan->data_q);
3109 blocks = __get_blocks(hdev, skb);
3110 if (blocks > hdev->block_cnt)
3111 return;
3113 hci_conn_enter_active_mode(chan->conn,
3114 bt_cb(skb)->force_active);
3116 hci_send_frame(skb);
3117 hdev->acl_last_tx = jiffies;
3119 hdev->block_cnt -= blocks;
3120 quote -= blocks;
3122 chan->sent += blocks;
3123 chan->conn->sent += blocks;
3127 if (cnt != hdev->block_cnt)
3128 hci_prio_recalculate(hdev, type);
3131 static void hci_sched_acl(struct hci_dev *hdev)
3133 BT_DBG("%s", hdev->name);
3135 /* No ACL link over BR/EDR controller */
3136 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3137 return;
3139 /* No AMP link over AMP controller */
3140 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3141 return;
3143 switch (hdev->flow_ctl_mode) {
3144 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3145 hci_sched_acl_pkt(hdev);
3146 break;
3148 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3149 hci_sched_acl_blk(hdev);
3150 break;
3154 /* Schedule SCO */
3155 static void hci_sched_sco(struct hci_dev *hdev)
3157 struct hci_conn *conn;
3158 struct sk_buff *skb;
3159 int quote;
3161 BT_DBG("%s", hdev->name);
3163 if (!hci_conn_num(hdev, SCO_LINK))
3164 return;
3166 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3167 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3168 BT_DBG("skb %p len %d", skb, skb->len);
3169 hci_send_frame(skb);
3171 conn->sent++;
3172 if (conn->sent == ~0)
3173 conn->sent = 0;
3178 static void hci_sched_esco(struct hci_dev *hdev)
3180 struct hci_conn *conn;
3181 struct sk_buff *skb;
3182 int quote;
3184 BT_DBG("%s", hdev->name);
3186 if (!hci_conn_num(hdev, ESCO_LINK))
3187 return;
3189 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3190 &quote))) {
3191 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3192 BT_DBG("skb %p len %d", skb, skb->len);
3193 hci_send_frame(skb);
3195 conn->sent++;
3196 if (conn->sent == ~0)
3197 conn->sent = 0;
3202 static void hci_sched_le(struct hci_dev *hdev)
3204 struct hci_chan *chan;
3205 struct sk_buff *skb;
3206 int quote, cnt, tmp;
3208 BT_DBG("%s", hdev->name);
3210 if (!hci_conn_num(hdev, LE_LINK))
3211 return;
3213 if (!test_bit(HCI_RAW, &hdev->flags)) {
3214 /* LE tx timeout must be longer than maximum
3215 * link supervision timeout (40.9 seconds) */
3216 if (!hdev->le_cnt && hdev->le_pkts &&
3217 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3218 hci_link_tx_to(hdev, LE_LINK);
3221 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3222 tmp = cnt;
3223 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3224 u32 priority = (skb_peek(&chan->data_q))->priority;
3225 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3226 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3227 skb->len, skb->priority);
3229 /* Stop if priority has changed */
3230 if (skb->priority < priority)
3231 break;
3233 skb = skb_dequeue(&chan->data_q);
3235 hci_send_frame(skb);
3236 hdev->le_last_tx = jiffies;
3238 cnt--;
3239 chan->sent++;
3240 chan->conn->sent++;
3244 if (hdev->le_pkts)
3245 hdev->le_cnt = cnt;
3246 else
3247 hdev->acl_cnt = cnt;
3249 if (cnt != tmp)
3250 hci_prio_recalculate(hdev, LE_LINK);
3253 static void hci_tx_work(struct work_struct *work)
3255 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3256 struct sk_buff *skb;
3258 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3259 hdev->sco_cnt, hdev->le_cnt);
3261 /* Schedule queues and send stuff to HCI driver */
3263 hci_sched_acl(hdev);
3265 hci_sched_sco(hdev);
3267 hci_sched_esco(hdev);
3269 hci_sched_le(hdev);
3271 /* Send next queued raw (unknown type) packet */
3272 while ((skb = skb_dequeue(&hdev->raw_q)))
3273 hci_send_frame(skb);
3276 /* ----- HCI RX task (incoming data processing) ----- */
3278 /* ACL data packet */
3279 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3281 struct hci_acl_hdr *hdr = (void *) skb->data;
3282 struct hci_conn *conn;
3283 __u16 handle, flags;
3285 skb_pull(skb, HCI_ACL_HDR_SIZE);
3287 handle = __le16_to_cpu(hdr->handle);
3288 flags = hci_flags(handle);
3289 handle = hci_handle(handle);
3291 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3292 handle, flags);
3294 hdev->stat.acl_rx++;
3296 hci_dev_lock(hdev);
3297 conn = hci_conn_hash_lookup_handle(hdev, handle);
3298 hci_dev_unlock(hdev);
3300 if (conn) {
3301 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3303 /* Send to upper protocol */
3304 l2cap_recv_acldata(conn, skb, flags);
3305 return;
3306 } else {
3307 BT_ERR("%s ACL packet for unknown connection handle %d",
3308 hdev->name, handle);
3311 kfree_skb(skb);
3314 /* SCO data packet */
3315 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3317 struct hci_sco_hdr *hdr = (void *) skb->data;
3318 struct hci_conn *conn;
3319 __u16 handle;
3321 skb_pull(skb, HCI_SCO_HDR_SIZE);
3323 handle = __le16_to_cpu(hdr->handle);
3325 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3327 hdev->stat.sco_rx++;
3329 hci_dev_lock(hdev);
3330 conn = hci_conn_hash_lookup_handle(hdev, handle);
3331 hci_dev_unlock(hdev);
3333 if (conn) {
3334 /* Send to upper protocol */
3335 sco_recv_scodata(conn, skb);
3336 return;
3337 } else {
3338 BT_ERR("%s SCO packet for unknown connection handle %d",
3339 hdev->name, handle);
3342 kfree_skb(skb);
3345 static bool hci_req_is_complete(struct hci_dev *hdev)
3347 struct sk_buff *skb;
3349 skb = skb_peek(&hdev->cmd_q);
3350 if (!skb)
3351 return true;
3353 return bt_cb(skb)->req.start;
3356 static void hci_resend_last(struct hci_dev *hdev)
3358 struct hci_command_hdr *sent;
3359 struct sk_buff *skb;
3360 u16 opcode;
3362 if (!hdev->sent_cmd)
3363 return;
3365 sent = (void *) hdev->sent_cmd->data;
3366 opcode = __le16_to_cpu(sent->opcode);
3367 if (opcode == HCI_OP_RESET)
3368 return;
3370 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3371 if (!skb)
3372 return;
3374 skb_queue_head(&hdev->cmd_q, skb);
3375 queue_work(hdev->workqueue, &hdev->cmd_work);
3378 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3380 hci_req_complete_t req_complete = NULL;
3381 struct sk_buff *skb;
3382 unsigned long flags;
3384 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3386 /* If the completed command doesn't match the last one that was
3387 * sent we need to do special handling of it.
3389 if (!hci_sent_cmd_data(hdev, opcode)) {
3390 /* Some CSR based controllers generate a spontaneous
3391 * reset complete event during init and any pending
3392 * command will never be completed. In such a case we
3393 * need to resend whatever was the last sent
3394 * command.
3396 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3397 hci_resend_last(hdev);
3399 return;
3402 /* If the command succeeded and there's still more commands in
3403 * this request the request is not yet complete.
3405 if (!status && !hci_req_is_complete(hdev))
3406 return;
3408 /* If this was the last command in a request the complete
3409 * callback would be found in hdev->sent_cmd instead of the
3410 * command queue (hdev->cmd_q).
3412 if (hdev->sent_cmd) {
3413 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3415 if (req_complete) {
3416 /* We must set the complete callback to NULL to
3417 * avoid calling the callback more than once if
3418 * this function gets called again.
3420 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3422 goto call_complete;
3426 /* Remove all pending commands belonging to this request */
3427 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3428 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3429 if (bt_cb(skb)->req.start) {
3430 __skb_queue_head(&hdev->cmd_q, skb);
3431 break;
3434 req_complete = bt_cb(skb)->req.complete;
3435 kfree_skb(skb);
3437 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3439 call_complete:
3440 if (req_complete)
3441 req_complete(hdev, status);
3444 static void hci_rx_work(struct work_struct *work)
3446 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3447 struct sk_buff *skb;
3449 BT_DBG("%s", hdev->name);
3451 while ((skb = skb_dequeue(&hdev->rx_q))) {
3452 /* Send copy to monitor */
3453 hci_send_to_monitor(hdev, skb);
3455 if (atomic_read(&hdev->promisc)) {
3456 /* Send copy to the sockets */
3457 hci_send_to_sock(hdev, skb);
3460 if (test_bit(HCI_RAW, &hdev->flags)) {
3461 kfree_skb(skb);
3462 continue;
3465 if (test_bit(HCI_INIT, &hdev->flags)) {
3466 /* Don't process data packets in this states. */
3467 switch (bt_cb(skb)->pkt_type) {
3468 case HCI_ACLDATA_PKT:
3469 case HCI_SCODATA_PKT:
3470 kfree_skb(skb);
3471 continue;
3475 /* Process frame */
3476 switch (bt_cb(skb)->pkt_type) {
3477 case HCI_EVENT_PKT:
3478 BT_DBG("%s Event packet", hdev->name);
3479 hci_event_packet(hdev, skb);
3480 break;
3482 case HCI_ACLDATA_PKT:
3483 BT_DBG("%s ACL data packet", hdev->name);
3484 hci_acldata_packet(hdev, skb);
3485 break;
3487 case HCI_SCODATA_PKT:
3488 BT_DBG("%s SCO data packet", hdev->name);
3489 hci_scodata_packet(hdev, skb);
3490 break;
3492 default:
3493 kfree_skb(skb);
3494 break;
3499 static void hci_cmd_work(struct work_struct *work)
3501 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3502 struct sk_buff *skb;
3504 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3505 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3507 /* Send queued commands */
3508 if (atomic_read(&hdev->cmd_cnt)) {
3509 skb = skb_dequeue(&hdev->cmd_q);
3510 if (!skb)
3511 return;
3513 kfree_skb(hdev->sent_cmd);
3515 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3516 if (hdev->sent_cmd) {
3517 atomic_dec(&hdev->cmd_cnt);
3518 hci_send_frame(skb);
3519 if (test_bit(HCI_RESET, &hdev->flags))
3520 del_timer(&hdev->cmd_timer);
3521 else
3522 mod_timer(&hdev->cmd_timer,
3523 jiffies + HCI_CMD_TIMEOUT);
3524 } else {
3525 skb_queue_head(&hdev->cmd_q, skb);
3526 queue_work(hdev->workqueue, &hdev->cmd_work);
3531 u8 bdaddr_to_le(u8 bdaddr_type)
3533 switch (bdaddr_type) {
3534 case BDADDR_LE_PUBLIC:
3535 return ADDR_LE_DEV_PUBLIC;
3537 default:
3538 /* Fallback to LE Random address type */
3539 return ADDR_LE_DEV_RANDOM;