wl1251: only call ieee80211_beacon_loss in managed mode
[linux-2.6/btrfs-unstable.git] / net / bluetooth / mgmt.c
blob54abbce3a39e8bc0cdbd9018ca8e616e5c864546
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
34 #include "smp.h"
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_SET_PRIVACY,
85 MGMT_OP_LOAD_IRKS,
88 static const u16 mgmt_events[] = {
89 MGMT_EV_CONTROLLER_ERROR,
90 MGMT_EV_INDEX_ADDED,
91 MGMT_EV_INDEX_REMOVED,
92 MGMT_EV_NEW_SETTINGS,
93 MGMT_EV_CLASS_OF_DEV_CHANGED,
94 MGMT_EV_LOCAL_NAME_CHANGED,
95 MGMT_EV_NEW_LINK_KEY,
96 MGMT_EV_NEW_LONG_TERM_KEY,
97 MGMT_EV_DEVICE_CONNECTED,
98 MGMT_EV_DEVICE_DISCONNECTED,
99 MGMT_EV_CONNECT_FAILED,
100 MGMT_EV_PIN_CODE_REQUEST,
101 MGMT_EV_USER_CONFIRM_REQUEST,
102 MGMT_EV_USER_PASSKEY_REQUEST,
103 MGMT_EV_AUTH_FAILED,
104 MGMT_EV_DEVICE_FOUND,
105 MGMT_EV_DISCOVERING,
106 MGMT_EV_DEVICE_BLOCKED,
107 MGMT_EV_DEVICE_UNBLOCKED,
108 MGMT_EV_DEVICE_UNPAIRED,
109 MGMT_EV_PASSKEY_NOTIFY,
110 MGMT_EV_NEW_IRK,
111 MGMT_EV_NEW_CSRK,
114 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
116 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
117 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
119 struct pending_cmd {
120 struct list_head list;
121 u16 opcode;
122 int index;
123 void *param;
124 struct sock *sk;
125 void *user_data;
128 /* HCI to MGMT error code conversion table */
129 static u8 mgmt_status_table[] = {
130 MGMT_STATUS_SUCCESS,
131 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
132 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
133 MGMT_STATUS_FAILED, /* Hardware Failure */
134 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
135 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
136 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
137 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
138 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
139 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
140 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
141 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
142 MGMT_STATUS_BUSY, /* Command Disallowed */
143 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
144 MGMT_STATUS_REJECTED, /* Rejected Security */
145 MGMT_STATUS_REJECTED, /* Rejected Personal */
146 MGMT_STATUS_TIMEOUT, /* Host Timeout */
147 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
148 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
149 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
150 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
151 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
152 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
153 MGMT_STATUS_BUSY, /* Repeated Attempts */
154 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
155 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
156 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
157 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
158 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
159 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
160 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
161 MGMT_STATUS_FAILED, /* Unspecified Error */
162 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
163 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
164 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
165 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
166 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
167 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
168 MGMT_STATUS_FAILED, /* Unit Link Key Used */
169 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
170 MGMT_STATUS_TIMEOUT, /* Instant Passed */
171 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
172 MGMT_STATUS_FAILED, /* Transaction Collision */
173 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
174 MGMT_STATUS_REJECTED, /* QoS Rejected */
175 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
176 MGMT_STATUS_REJECTED, /* Insufficient Security */
177 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
178 MGMT_STATUS_BUSY, /* Role Switch Pending */
179 MGMT_STATUS_FAILED, /* Slot Violation */
180 MGMT_STATUS_FAILED, /* Role Switch Failed */
181 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
182 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
183 MGMT_STATUS_BUSY, /* Host Busy Pairing */
184 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
185 MGMT_STATUS_BUSY, /* Controller Busy */
186 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
187 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
188 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
189 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
190 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
193 static u8 mgmt_status(u8 hci_status)
195 if (hci_status < ARRAY_SIZE(mgmt_status_table))
196 return mgmt_status_table[hci_status];
198 return MGMT_STATUS_FAILED;
201 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
203 struct sk_buff *skb;
204 struct mgmt_hdr *hdr;
205 struct mgmt_ev_cmd_status *ev;
206 int err;
208 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
210 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
211 if (!skb)
212 return -ENOMEM;
214 hdr = (void *) skb_put(skb, sizeof(*hdr));
216 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
217 hdr->index = cpu_to_le16(index);
218 hdr->len = cpu_to_le16(sizeof(*ev));
220 ev = (void *) skb_put(skb, sizeof(*ev));
221 ev->status = status;
222 ev->opcode = cpu_to_le16(cmd);
224 err = sock_queue_rcv_skb(sk, skb);
225 if (err < 0)
226 kfree_skb(skb);
228 return err;
231 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
232 void *rp, size_t rp_len)
234 struct sk_buff *skb;
235 struct mgmt_hdr *hdr;
236 struct mgmt_ev_cmd_complete *ev;
237 int err;
239 BT_DBG("sock %p", sk);
241 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
242 if (!skb)
243 return -ENOMEM;
245 hdr = (void *) skb_put(skb, sizeof(*hdr));
247 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
248 hdr->index = cpu_to_le16(index);
249 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
251 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
252 ev->opcode = cpu_to_le16(cmd);
253 ev->status = status;
255 if (rp)
256 memcpy(ev->data, rp, rp_len);
258 err = sock_queue_rcv_skb(sk, skb);
259 if (err < 0)
260 kfree_skb(skb);
262 return err;
265 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
266 u16 data_len)
268 struct mgmt_rp_read_version rp;
270 BT_DBG("sock %p", sk);
272 rp.version = MGMT_VERSION;
273 rp.revision = cpu_to_le16(MGMT_REVISION);
275 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
276 sizeof(rp));
279 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
280 u16 data_len)
282 struct mgmt_rp_read_commands *rp;
283 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
284 const u16 num_events = ARRAY_SIZE(mgmt_events);
285 __le16 *opcode;
286 size_t rp_size;
287 int i, err;
289 BT_DBG("sock %p", sk);
291 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
293 rp = kmalloc(rp_size, GFP_KERNEL);
294 if (!rp)
295 return -ENOMEM;
297 rp->num_commands = cpu_to_le16(num_commands);
298 rp->num_events = cpu_to_le16(num_events);
300 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
301 put_unaligned_le16(mgmt_commands[i], opcode);
303 for (i = 0; i < num_events; i++, opcode++)
304 put_unaligned_le16(mgmt_events[i], opcode);
306 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
307 rp_size);
308 kfree(rp);
310 return err;
313 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
314 u16 data_len)
316 struct mgmt_rp_read_index_list *rp;
317 struct hci_dev *d;
318 size_t rp_len;
319 u16 count;
320 int err;
322 BT_DBG("sock %p", sk);
324 read_lock(&hci_dev_list_lock);
326 count = 0;
327 list_for_each_entry(d, &hci_dev_list, list) {
328 if (d->dev_type == HCI_BREDR)
329 count++;
332 rp_len = sizeof(*rp) + (2 * count);
333 rp = kmalloc(rp_len, GFP_ATOMIC);
334 if (!rp) {
335 read_unlock(&hci_dev_list_lock);
336 return -ENOMEM;
339 count = 0;
340 list_for_each_entry(d, &hci_dev_list, list) {
341 if (test_bit(HCI_SETUP, &d->dev_flags))
342 continue;
344 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
345 continue;
347 if (d->dev_type == HCI_BREDR) {
348 rp->index[count++] = cpu_to_le16(d->id);
349 BT_DBG("Added hci%u", d->id);
353 rp->num_controllers = cpu_to_le16(count);
354 rp_len = sizeof(*rp) + (2 * count);
356 read_unlock(&hci_dev_list_lock);
358 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
359 rp_len);
361 kfree(rp);
363 return err;
366 static u32 get_supported_settings(struct hci_dev *hdev)
368 u32 settings = 0;
370 settings |= MGMT_SETTING_POWERED;
371 settings |= MGMT_SETTING_PAIRABLE;
372 settings |= MGMT_SETTING_DEBUG_KEYS;
374 if (lmp_bredr_capable(hdev)) {
375 settings |= MGMT_SETTING_CONNECTABLE;
376 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
377 settings |= MGMT_SETTING_FAST_CONNECTABLE;
378 settings |= MGMT_SETTING_DISCOVERABLE;
379 settings |= MGMT_SETTING_BREDR;
380 settings |= MGMT_SETTING_LINK_SECURITY;
382 if (lmp_ssp_capable(hdev)) {
383 settings |= MGMT_SETTING_SSP;
384 settings |= MGMT_SETTING_HS;
387 if (lmp_sc_capable(hdev) ||
388 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
389 settings |= MGMT_SETTING_SECURE_CONN;
392 if (lmp_le_capable(hdev)) {
393 settings |= MGMT_SETTING_LE;
394 settings |= MGMT_SETTING_ADVERTISING;
395 settings |= MGMT_SETTING_PRIVACY;
398 return settings;
401 static u32 get_current_settings(struct hci_dev *hdev)
403 u32 settings = 0;
405 if (hdev_is_powered(hdev))
406 settings |= MGMT_SETTING_POWERED;
408 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
409 settings |= MGMT_SETTING_CONNECTABLE;
411 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
412 settings |= MGMT_SETTING_FAST_CONNECTABLE;
414 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
415 settings |= MGMT_SETTING_DISCOVERABLE;
417 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
418 settings |= MGMT_SETTING_PAIRABLE;
420 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
421 settings |= MGMT_SETTING_BREDR;
423 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
424 settings |= MGMT_SETTING_LE;
426 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
427 settings |= MGMT_SETTING_LINK_SECURITY;
429 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
430 settings |= MGMT_SETTING_SSP;
432 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
433 settings |= MGMT_SETTING_HS;
435 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
436 settings |= MGMT_SETTING_ADVERTISING;
438 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
439 settings |= MGMT_SETTING_SECURE_CONN;
441 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
442 settings |= MGMT_SETTING_DEBUG_KEYS;
444 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
445 settings |= MGMT_SETTING_PRIVACY;
447 return settings;
450 #define PNP_INFO_SVCLASS_ID 0x1200
452 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
454 u8 *ptr = data, *uuids_start = NULL;
455 struct bt_uuid *uuid;
457 if (len < 4)
458 return ptr;
460 list_for_each_entry(uuid, &hdev->uuids, list) {
461 u16 uuid16;
463 if (uuid->size != 16)
464 continue;
466 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
467 if (uuid16 < 0x1100)
468 continue;
470 if (uuid16 == PNP_INFO_SVCLASS_ID)
471 continue;
473 if (!uuids_start) {
474 uuids_start = ptr;
475 uuids_start[0] = 1;
476 uuids_start[1] = EIR_UUID16_ALL;
477 ptr += 2;
480 /* Stop if not enough space to put next UUID */
481 if ((ptr - data) + sizeof(u16) > len) {
482 uuids_start[1] = EIR_UUID16_SOME;
483 break;
486 *ptr++ = (uuid16 & 0x00ff);
487 *ptr++ = (uuid16 & 0xff00) >> 8;
488 uuids_start[0] += sizeof(uuid16);
491 return ptr;
494 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
496 u8 *ptr = data, *uuids_start = NULL;
497 struct bt_uuid *uuid;
499 if (len < 6)
500 return ptr;
502 list_for_each_entry(uuid, &hdev->uuids, list) {
503 if (uuid->size != 32)
504 continue;
506 if (!uuids_start) {
507 uuids_start = ptr;
508 uuids_start[0] = 1;
509 uuids_start[1] = EIR_UUID32_ALL;
510 ptr += 2;
513 /* Stop if not enough space to put next UUID */
514 if ((ptr - data) + sizeof(u32) > len) {
515 uuids_start[1] = EIR_UUID32_SOME;
516 break;
519 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
520 ptr += sizeof(u32);
521 uuids_start[0] += sizeof(u32);
524 return ptr;
527 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
529 u8 *ptr = data, *uuids_start = NULL;
530 struct bt_uuid *uuid;
532 if (len < 18)
533 return ptr;
535 list_for_each_entry(uuid, &hdev->uuids, list) {
536 if (uuid->size != 128)
537 continue;
539 if (!uuids_start) {
540 uuids_start = ptr;
541 uuids_start[0] = 1;
542 uuids_start[1] = EIR_UUID128_ALL;
543 ptr += 2;
546 /* Stop if not enough space to put next UUID */
547 if ((ptr - data) + 16 > len) {
548 uuids_start[1] = EIR_UUID128_SOME;
549 break;
552 memcpy(ptr, uuid->uuid, 16);
553 ptr += 16;
554 uuids_start[0] += 16;
557 return ptr;
560 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
562 struct pending_cmd *cmd;
564 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
565 if (cmd->opcode == opcode)
566 return cmd;
569 return NULL;
572 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
574 u8 ad_len = 0;
575 size_t name_len;
577 name_len = strlen(hdev->dev_name);
578 if (name_len > 0) {
579 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
581 if (name_len > max_len) {
582 name_len = max_len;
583 ptr[1] = EIR_NAME_SHORT;
584 } else
585 ptr[1] = EIR_NAME_COMPLETE;
587 ptr[0] = name_len + 1;
589 memcpy(ptr + 2, hdev->dev_name, name_len);
591 ad_len += (name_len + 2);
592 ptr += (name_len + 2);
595 return ad_len;
598 static void update_scan_rsp_data(struct hci_request *req)
600 struct hci_dev *hdev = req->hdev;
601 struct hci_cp_le_set_scan_rsp_data cp;
602 u8 len;
604 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
605 return;
607 memset(&cp, 0, sizeof(cp));
609 len = create_scan_rsp_data(hdev, cp.data);
611 if (hdev->scan_rsp_data_len == len &&
612 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
613 return;
615 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
616 hdev->scan_rsp_data_len = len;
618 cp.length = len;
620 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
623 static u8 get_adv_discov_flags(struct hci_dev *hdev)
625 struct pending_cmd *cmd;
627 /* If there's a pending mgmt command the flags will not yet have
628 * their final values, so check for this first.
630 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
631 if (cmd) {
632 struct mgmt_mode *cp = cmd->param;
633 if (cp->val == 0x01)
634 return LE_AD_GENERAL;
635 else if (cp->val == 0x02)
636 return LE_AD_LIMITED;
637 } else {
638 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
639 return LE_AD_LIMITED;
640 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
641 return LE_AD_GENERAL;
644 return 0;
647 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
649 u8 ad_len = 0, flags = 0;
651 flags |= get_adv_discov_flags(hdev);
653 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
654 flags |= LE_AD_NO_BREDR;
656 if (flags) {
657 BT_DBG("adv flags 0x%02x", flags);
659 ptr[0] = 2;
660 ptr[1] = EIR_FLAGS;
661 ptr[2] = flags;
663 ad_len += 3;
664 ptr += 3;
667 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
668 ptr[0] = 2;
669 ptr[1] = EIR_TX_POWER;
670 ptr[2] = (u8) hdev->adv_tx_power;
672 ad_len += 3;
673 ptr += 3;
676 return ad_len;
679 static void update_adv_data(struct hci_request *req)
681 struct hci_dev *hdev = req->hdev;
682 struct hci_cp_le_set_adv_data cp;
683 u8 len;
685 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
686 return;
688 memset(&cp, 0, sizeof(cp));
690 len = create_adv_data(hdev, cp.data);
692 if (hdev->adv_data_len == len &&
693 memcmp(cp.data, hdev->adv_data, len) == 0)
694 return;
696 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
697 hdev->adv_data_len = len;
699 cp.length = len;
701 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
704 static void create_eir(struct hci_dev *hdev, u8 *data)
706 u8 *ptr = data;
707 size_t name_len;
709 name_len = strlen(hdev->dev_name);
711 if (name_len > 0) {
712 /* EIR Data type */
713 if (name_len > 48) {
714 name_len = 48;
715 ptr[1] = EIR_NAME_SHORT;
716 } else
717 ptr[1] = EIR_NAME_COMPLETE;
719 /* EIR Data length */
720 ptr[0] = name_len + 1;
722 memcpy(ptr + 2, hdev->dev_name, name_len);
724 ptr += (name_len + 2);
727 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
728 ptr[0] = 2;
729 ptr[1] = EIR_TX_POWER;
730 ptr[2] = (u8) hdev->inq_tx_power;
732 ptr += 3;
735 if (hdev->devid_source > 0) {
736 ptr[0] = 9;
737 ptr[1] = EIR_DEVICE_ID;
739 put_unaligned_le16(hdev->devid_source, ptr + 2);
740 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
741 put_unaligned_le16(hdev->devid_product, ptr + 6);
742 put_unaligned_le16(hdev->devid_version, ptr + 8);
744 ptr += 10;
747 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
748 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
749 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
752 static void update_eir(struct hci_request *req)
754 struct hci_dev *hdev = req->hdev;
755 struct hci_cp_write_eir cp;
757 if (!hdev_is_powered(hdev))
758 return;
760 if (!lmp_ext_inq_capable(hdev))
761 return;
763 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
764 return;
766 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
767 return;
769 memset(&cp, 0, sizeof(cp));
771 create_eir(hdev, cp.data);
773 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
774 return;
776 memcpy(hdev->eir, cp.data, sizeof(cp.data));
778 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
781 static u8 get_service_classes(struct hci_dev *hdev)
783 struct bt_uuid *uuid;
784 u8 val = 0;
786 list_for_each_entry(uuid, &hdev->uuids, list)
787 val |= uuid->svc_hint;
789 return val;
792 static void update_class(struct hci_request *req)
794 struct hci_dev *hdev = req->hdev;
795 u8 cod[3];
797 BT_DBG("%s", hdev->name);
799 if (!hdev_is_powered(hdev))
800 return;
802 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
803 return;
805 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
806 return;
808 cod[0] = hdev->minor_class;
809 cod[1] = hdev->major_class;
810 cod[2] = get_service_classes(hdev);
812 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
813 cod[1] |= 0x20;
815 if (memcmp(cod, hdev->dev_class, 3) == 0)
816 return;
818 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
821 static bool get_connectable(struct hci_dev *hdev)
823 struct pending_cmd *cmd;
825 /* If there's a pending mgmt command the flag will not yet have
826 * it's final value, so check for this first.
828 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
829 if (cmd) {
830 struct mgmt_mode *cp = cmd->param;
831 return cp->val;
834 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
837 static void enable_advertising(struct hci_request *req)
839 struct hci_dev *hdev = req->hdev;
840 struct hci_cp_le_set_adv_param cp;
841 u8 own_addr_type, enable = 0x01;
842 bool connectable;
844 /* Clear the HCI_ADVERTISING bit temporarily so that the
845 * hci_update_random_address knows that it's safe to go ahead
846 * and write a new random address. The flag will be set back on
847 * as soon as the SET_ADV_ENABLE HCI command completes.
849 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
851 connectable = get_connectable(hdev);
853 /* Set require_privacy to true only when non-connectable
854 * advertising is used. In that case it is fine to use a
855 * non-resolvable private address.
857 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
858 return;
860 memset(&cp, 0, sizeof(cp));
861 cp.min_interval = cpu_to_le16(0x0800);
862 cp.max_interval = cpu_to_le16(0x0800);
863 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
864 cp.own_address_type = own_addr_type;
865 cp.channel_map = hdev->le_adv_channel_map;
867 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
869 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
872 static void disable_advertising(struct hci_request *req)
874 u8 enable = 0x00;
876 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
879 static void service_cache_off(struct work_struct *work)
881 struct hci_dev *hdev = container_of(work, struct hci_dev,
882 service_cache.work);
883 struct hci_request req;
885 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
886 return;
888 hci_req_init(&req, hdev);
890 hci_dev_lock(hdev);
892 update_eir(&req);
893 update_class(&req);
895 hci_dev_unlock(hdev);
897 hci_req_run(&req, NULL);
900 static void rpa_expired(struct work_struct *work)
902 struct hci_dev *hdev = container_of(work, struct hci_dev,
903 rpa_expired.work);
904 struct hci_request req;
906 BT_DBG("");
908 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
910 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
911 hci_conn_num(hdev, LE_LINK) > 0)
912 return;
914 /* The generation of a new RPA and programming it into the
915 * controller happens in the enable_advertising() function.
918 hci_req_init(&req, hdev);
920 disable_advertising(&req);
921 enable_advertising(&req);
923 hci_req_run(&req, NULL);
926 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
928 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
929 return;
931 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
932 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
934 /* Non-mgmt controlled devices get this bit set
935 * implicitly so that pairing works for them, however
936 * for mgmt we require user-space to explicitly enable
937 * it
939 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
942 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
943 void *data, u16 data_len)
945 struct mgmt_rp_read_info rp;
947 BT_DBG("sock %p %s", sk, hdev->name);
949 hci_dev_lock(hdev);
951 memset(&rp, 0, sizeof(rp));
953 bacpy(&rp.bdaddr, &hdev->bdaddr);
955 rp.version = hdev->hci_ver;
956 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
958 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
959 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
961 memcpy(rp.dev_class, hdev->dev_class, 3);
963 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
964 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
966 hci_dev_unlock(hdev);
968 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
969 sizeof(rp));
972 static void mgmt_pending_free(struct pending_cmd *cmd)
974 sock_put(cmd->sk);
975 kfree(cmd->param);
976 kfree(cmd);
979 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
980 struct hci_dev *hdev, void *data,
981 u16 len)
983 struct pending_cmd *cmd;
985 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
986 if (!cmd)
987 return NULL;
989 cmd->opcode = opcode;
990 cmd->index = hdev->id;
992 cmd->param = kmalloc(len, GFP_KERNEL);
993 if (!cmd->param) {
994 kfree(cmd);
995 return NULL;
998 if (data)
999 memcpy(cmd->param, data, len);
1001 cmd->sk = sk;
1002 sock_hold(sk);
1004 list_add(&cmd->list, &hdev->mgmt_pending);
1006 return cmd;
1009 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1010 void (*cb)(struct pending_cmd *cmd,
1011 void *data),
1012 void *data)
1014 struct pending_cmd *cmd, *tmp;
1016 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1017 if (opcode > 0 && cmd->opcode != opcode)
1018 continue;
1020 cb(cmd, data);
1024 static void mgmt_pending_remove(struct pending_cmd *cmd)
1026 list_del(&cmd->list);
1027 mgmt_pending_free(cmd);
1030 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1032 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1034 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1035 sizeof(settings));
1038 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1040 BT_DBG("%s status 0x%02x", hdev->name, status);
1042 if (hci_conn_count(hdev) == 0) {
1043 cancel_delayed_work(&hdev->power_off);
1044 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1048 static int clean_up_hci_state(struct hci_dev *hdev)
1050 struct hci_request req;
1051 struct hci_conn *conn;
1053 hci_req_init(&req, hdev);
1055 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1056 test_bit(HCI_PSCAN, &hdev->flags)) {
1057 u8 scan = 0x00;
1058 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1061 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1062 disable_advertising(&req);
1064 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1065 hci_req_add_le_scan_disable(&req);
1068 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1069 struct hci_cp_disconnect dc;
1070 struct hci_cp_reject_conn_req rej;
1072 switch (conn->state) {
1073 case BT_CONNECTED:
1074 case BT_CONFIG:
1075 dc.handle = cpu_to_le16(conn->handle);
1076 dc.reason = 0x15; /* Terminated due to Power Off */
1077 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1078 break;
1079 case BT_CONNECT:
1080 if (conn->type == LE_LINK)
1081 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1082 0, NULL);
1083 else if (conn->type == ACL_LINK)
1084 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1085 6, &conn->dst);
1086 break;
1087 case BT_CONNECT2:
1088 bacpy(&rej.bdaddr, &conn->dst);
1089 rej.reason = 0x15; /* Terminated due to Power Off */
1090 if (conn->type == ACL_LINK)
1091 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1092 sizeof(rej), &rej);
1093 else if (conn->type == SCO_LINK)
1094 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1095 sizeof(rej), &rej);
1096 break;
1100 return hci_req_run(&req, clean_up_hci_complete);
1103 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1104 u16 len)
1106 struct mgmt_mode *cp = data;
1107 struct pending_cmd *cmd;
1108 int err;
1110 BT_DBG("request for %s", hdev->name);
1112 if (cp->val != 0x00 && cp->val != 0x01)
1113 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1114 MGMT_STATUS_INVALID_PARAMS);
1116 hci_dev_lock(hdev);
1118 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1119 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1120 MGMT_STATUS_BUSY);
1121 goto failed;
1124 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1125 cancel_delayed_work(&hdev->power_off);
1127 if (cp->val) {
1128 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1129 data, len);
1130 err = mgmt_powered(hdev, 1);
1131 goto failed;
1135 if (!!cp->val == hdev_is_powered(hdev)) {
1136 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1137 goto failed;
1140 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1141 if (!cmd) {
1142 err = -ENOMEM;
1143 goto failed;
1146 if (cp->val) {
1147 queue_work(hdev->req_workqueue, &hdev->power_on);
1148 err = 0;
1149 } else {
1150 /* Disconnect connections, stop scans, etc */
1151 err = clean_up_hci_state(hdev);
1152 if (!err)
1153 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1154 HCI_POWER_OFF_TIMEOUT);
1156 /* ENODATA means there were no HCI commands queued */
1157 if (err == -ENODATA) {
1158 cancel_delayed_work(&hdev->power_off);
1159 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1160 err = 0;
1164 failed:
1165 hci_dev_unlock(hdev);
1166 return err;
1169 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1170 struct sock *skip_sk)
1172 struct sk_buff *skb;
1173 struct mgmt_hdr *hdr;
1175 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1176 if (!skb)
1177 return -ENOMEM;
1179 hdr = (void *) skb_put(skb, sizeof(*hdr));
1180 hdr->opcode = cpu_to_le16(event);
1181 if (hdev)
1182 hdr->index = cpu_to_le16(hdev->id);
1183 else
1184 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1185 hdr->len = cpu_to_le16(data_len);
1187 if (data)
1188 memcpy(skb_put(skb, data_len), data, data_len);
1190 /* Time stamp */
1191 __net_timestamp(skb);
1193 hci_send_to_control(skb, skip_sk);
1194 kfree_skb(skb);
1196 return 0;
1199 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1201 __le32 ev;
1203 ev = cpu_to_le32(get_current_settings(hdev));
1205 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1208 struct cmd_lookup {
1209 struct sock *sk;
1210 struct hci_dev *hdev;
1211 u8 mgmt_status;
1214 static void settings_rsp(struct pending_cmd *cmd, void *data)
1216 struct cmd_lookup *match = data;
1218 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1220 list_del(&cmd->list);
1222 if (match->sk == NULL) {
1223 match->sk = cmd->sk;
1224 sock_hold(match->sk);
1227 mgmt_pending_free(cmd);
1230 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1232 u8 *status = data;
1234 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1235 mgmt_pending_remove(cmd);
1238 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1240 if (!lmp_bredr_capable(hdev))
1241 return MGMT_STATUS_NOT_SUPPORTED;
1242 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1243 return MGMT_STATUS_REJECTED;
1244 else
1245 return MGMT_STATUS_SUCCESS;
1248 static u8 mgmt_le_support(struct hci_dev *hdev)
1250 if (!lmp_le_capable(hdev))
1251 return MGMT_STATUS_NOT_SUPPORTED;
1252 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1253 return MGMT_STATUS_REJECTED;
1254 else
1255 return MGMT_STATUS_SUCCESS;
1258 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1260 struct pending_cmd *cmd;
1261 struct mgmt_mode *cp;
1262 struct hci_request req;
1263 bool changed;
1265 BT_DBG("status 0x%02x", status);
1267 hci_dev_lock(hdev);
1269 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1270 if (!cmd)
1271 goto unlock;
1273 if (status) {
1274 u8 mgmt_err = mgmt_status(status);
1275 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1276 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1277 goto remove_cmd;
1280 cp = cmd->param;
1281 if (cp->val) {
1282 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1283 &hdev->dev_flags);
1285 if (hdev->discov_timeout > 0) {
1286 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1287 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1288 to);
1290 } else {
1291 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1292 &hdev->dev_flags);
1295 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1297 if (changed)
1298 new_settings(hdev, cmd->sk);
1300 /* When the discoverable mode gets changed, make sure
1301 * that class of device has the limited discoverable
1302 * bit correctly set.
1304 hci_req_init(&req, hdev);
1305 update_class(&req);
1306 hci_req_run(&req, NULL);
1308 remove_cmd:
1309 mgmt_pending_remove(cmd);
1311 unlock:
1312 hci_dev_unlock(hdev);
1315 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1316 u16 len)
1318 struct mgmt_cp_set_discoverable *cp = data;
1319 struct pending_cmd *cmd;
1320 struct hci_request req;
1321 u16 timeout;
1322 u8 scan;
1323 int err;
1325 BT_DBG("request for %s", hdev->name);
1327 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1328 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1329 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1330 MGMT_STATUS_REJECTED);
1332 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1333 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1334 MGMT_STATUS_INVALID_PARAMS);
1336 timeout = __le16_to_cpu(cp->timeout);
1338 /* Disabling discoverable requires that no timeout is set,
1339 * and enabling limited discoverable requires a timeout.
1341 if ((cp->val == 0x00 && timeout > 0) ||
1342 (cp->val == 0x02 && timeout == 0))
1343 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1344 MGMT_STATUS_INVALID_PARAMS);
1346 hci_dev_lock(hdev);
1348 if (!hdev_is_powered(hdev) && timeout > 0) {
1349 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1350 MGMT_STATUS_NOT_POWERED);
1351 goto failed;
1354 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1355 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1356 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1357 MGMT_STATUS_BUSY);
1358 goto failed;
1361 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1362 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1363 MGMT_STATUS_REJECTED);
1364 goto failed;
1367 if (!hdev_is_powered(hdev)) {
1368 bool changed = false;
1370 /* Setting limited discoverable when powered off is
1371 * not a valid operation since it requires a timeout
1372 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1374 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1375 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1376 changed = true;
1379 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1380 if (err < 0)
1381 goto failed;
1383 if (changed)
1384 err = new_settings(hdev, sk);
1386 goto failed;
1389 /* If the current mode is the same, then just update the timeout
1390 * value with the new value. And if only the timeout gets updated,
1391 * then no need for any HCI transactions.
1393 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1394 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1395 &hdev->dev_flags)) {
1396 cancel_delayed_work(&hdev->discov_off);
1397 hdev->discov_timeout = timeout;
1399 if (cp->val && hdev->discov_timeout > 0) {
1400 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1401 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1402 to);
1405 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1406 goto failed;
1409 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1410 if (!cmd) {
1411 err = -ENOMEM;
1412 goto failed;
1415 /* Cancel any potential discoverable timeout that might be
1416 * still active and store new timeout value. The arming of
1417 * the timeout happens in the complete handler.
1419 cancel_delayed_work(&hdev->discov_off);
1420 hdev->discov_timeout = timeout;
1422 /* Limited discoverable mode */
1423 if (cp->val == 0x02)
1424 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1425 else
1426 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1428 hci_req_init(&req, hdev);
1430 /* The procedure for LE-only controllers is much simpler - just
1431 * update the advertising data.
1433 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1434 goto update_ad;
1436 scan = SCAN_PAGE;
1438 if (cp->val) {
1439 struct hci_cp_write_current_iac_lap hci_cp;
1441 if (cp->val == 0x02) {
1442 /* Limited discoverable mode */
1443 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1444 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1445 hci_cp.iac_lap[1] = 0x8b;
1446 hci_cp.iac_lap[2] = 0x9e;
1447 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1448 hci_cp.iac_lap[4] = 0x8b;
1449 hci_cp.iac_lap[5] = 0x9e;
1450 } else {
1451 /* General discoverable mode */
1452 hci_cp.num_iac = 1;
1453 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1454 hci_cp.iac_lap[1] = 0x8b;
1455 hci_cp.iac_lap[2] = 0x9e;
1458 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1459 (hci_cp.num_iac * 3) + 1, &hci_cp);
1461 scan |= SCAN_INQUIRY;
1462 } else {
1463 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1466 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1468 update_ad:
1469 update_adv_data(&req);
1471 err = hci_req_run(&req, set_discoverable_complete);
1472 if (err < 0)
1473 mgmt_pending_remove(cmd);
1475 failed:
1476 hci_dev_unlock(hdev);
1477 return err;
1480 static void write_fast_connectable(struct hci_request *req, bool enable)
1482 struct hci_dev *hdev = req->hdev;
1483 struct hci_cp_write_page_scan_activity acp;
1484 u8 type;
1486 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1487 return;
1489 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1490 return;
1492 if (enable) {
1493 type = PAGE_SCAN_TYPE_INTERLACED;
1495 /* 160 msec page scan interval */
1496 acp.interval = cpu_to_le16(0x0100);
1497 } else {
1498 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1500 /* default 1.28 sec page scan */
1501 acp.interval = cpu_to_le16(0x0800);
1504 acp.window = cpu_to_le16(0x0012);
1506 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1507 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1508 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1509 sizeof(acp), &acp);
1511 if (hdev->page_scan_type != type)
1512 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1515 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1517 struct pending_cmd *cmd;
1518 struct mgmt_mode *cp;
1519 bool changed;
1521 BT_DBG("status 0x%02x", status);
1523 hci_dev_lock(hdev);
1525 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1526 if (!cmd)
1527 goto unlock;
1529 if (status) {
1530 u8 mgmt_err = mgmt_status(status);
1531 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1532 goto remove_cmd;
1535 cp = cmd->param;
1536 if (cp->val)
1537 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1538 else
1539 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1541 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1543 if (changed)
1544 new_settings(hdev, cmd->sk);
1546 remove_cmd:
1547 mgmt_pending_remove(cmd);
1549 unlock:
1550 hci_dev_unlock(hdev);
1553 static int set_connectable_update_settings(struct hci_dev *hdev,
1554 struct sock *sk, u8 val)
1556 bool changed = false;
1557 int err;
1559 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1560 changed = true;
1562 if (val) {
1563 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1564 } else {
1565 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1566 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1569 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1570 if (err < 0)
1571 return err;
1573 if (changed)
1574 return new_settings(hdev, sk);
1576 return 0;
1579 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1580 u16 len)
1582 struct mgmt_mode *cp = data;
1583 struct pending_cmd *cmd;
1584 struct hci_request req;
1585 u8 scan;
1586 int err;
1588 BT_DBG("request for %s", hdev->name);
1590 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1591 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1592 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1593 MGMT_STATUS_REJECTED);
1595 if (cp->val != 0x00 && cp->val != 0x01)
1596 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1597 MGMT_STATUS_INVALID_PARAMS);
1599 hci_dev_lock(hdev);
1601 if (!hdev_is_powered(hdev)) {
1602 err = set_connectable_update_settings(hdev, sk, cp->val);
1603 goto failed;
1606 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1607 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1608 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1609 MGMT_STATUS_BUSY);
1610 goto failed;
1613 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1614 if (!cmd) {
1615 err = -ENOMEM;
1616 goto failed;
1619 hci_req_init(&req, hdev);
1621 /* If BR/EDR is not enabled and we disable advertising as a
1622 * by-product of disabling connectable, we need to update the
1623 * advertising flags.
1625 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1626 if (!cp->val) {
1627 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1628 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1630 update_adv_data(&req);
1631 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1632 if (cp->val) {
1633 scan = SCAN_PAGE;
1634 } else {
1635 scan = 0;
1637 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1638 hdev->discov_timeout > 0)
1639 cancel_delayed_work(&hdev->discov_off);
1642 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1645 /* If we're going from non-connectable to connectable or
1646 * vice-versa when fast connectable is enabled ensure that fast
1647 * connectable gets disabled. write_fast_connectable won't do
1648 * anything if the page scan parameters are already what they
1649 * should be.
1651 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1652 write_fast_connectable(&req, false);
1654 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1655 hci_conn_num(hdev, LE_LINK) == 0) {
1656 disable_advertising(&req);
1657 enable_advertising(&req);
1660 err = hci_req_run(&req, set_connectable_complete);
1661 if (err < 0) {
1662 mgmt_pending_remove(cmd);
1663 if (err == -ENODATA)
1664 err = set_connectable_update_settings(hdev, sk,
1665 cp->val);
1666 goto failed;
1669 failed:
1670 hci_dev_unlock(hdev);
1671 return err;
1674 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1675 u16 len)
1677 struct mgmt_mode *cp = data;
1678 bool changed;
1679 int err;
1681 BT_DBG("request for %s", hdev->name);
1683 if (cp->val != 0x00 && cp->val != 0x01)
1684 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1685 MGMT_STATUS_INVALID_PARAMS);
1687 hci_dev_lock(hdev);
1689 if (cp->val)
1690 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1691 else
1692 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1694 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1695 if (err < 0)
1696 goto unlock;
1698 if (changed)
1699 err = new_settings(hdev, sk);
1701 unlock:
1702 hci_dev_unlock(hdev);
1703 return err;
1706 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1707 u16 len)
1709 struct mgmt_mode *cp = data;
1710 struct pending_cmd *cmd;
1711 u8 val, status;
1712 int err;
1714 BT_DBG("request for %s", hdev->name);
1716 status = mgmt_bredr_support(hdev);
1717 if (status)
1718 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1719 status);
1721 if (cp->val != 0x00 && cp->val != 0x01)
1722 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1723 MGMT_STATUS_INVALID_PARAMS);
1725 hci_dev_lock(hdev);
1727 if (!hdev_is_powered(hdev)) {
1728 bool changed = false;
1730 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1731 &hdev->dev_flags)) {
1732 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1733 changed = true;
1736 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1737 if (err < 0)
1738 goto failed;
1740 if (changed)
1741 err = new_settings(hdev, sk);
1743 goto failed;
1746 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1747 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1748 MGMT_STATUS_BUSY);
1749 goto failed;
1752 val = !!cp->val;
1754 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1755 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1756 goto failed;
1759 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1760 if (!cmd) {
1761 err = -ENOMEM;
1762 goto failed;
1765 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1766 if (err < 0) {
1767 mgmt_pending_remove(cmd);
1768 goto failed;
1771 failed:
1772 hci_dev_unlock(hdev);
1773 return err;
1776 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1778 struct mgmt_mode *cp = data;
1779 struct pending_cmd *cmd;
1780 u8 status;
1781 int err;
1783 BT_DBG("request for %s", hdev->name);
1785 status = mgmt_bredr_support(hdev);
1786 if (status)
1787 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1789 if (!lmp_ssp_capable(hdev))
1790 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1791 MGMT_STATUS_NOT_SUPPORTED);
1793 if (cp->val != 0x00 && cp->val != 0x01)
1794 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1795 MGMT_STATUS_INVALID_PARAMS);
1797 hci_dev_lock(hdev);
1799 if (!hdev_is_powered(hdev)) {
1800 bool changed;
1802 if (cp->val) {
1803 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1804 &hdev->dev_flags);
1805 } else {
1806 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1807 &hdev->dev_flags);
1808 if (!changed)
1809 changed = test_and_clear_bit(HCI_HS_ENABLED,
1810 &hdev->dev_flags);
1811 else
1812 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1815 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1816 if (err < 0)
1817 goto failed;
1819 if (changed)
1820 err = new_settings(hdev, sk);
1822 goto failed;
1825 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1826 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1827 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1828 MGMT_STATUS_BUSY);
1829 goto failed;
1832 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1833 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1834 goto failed;
1837 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1838 if (!cmd) {
1839 err = -ENOMEM;
1840 goto failed;
1843 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1844 if (err < 0) {
1845 mgmt_pending_remove(cmd);
1846 goto failed;
1849 failed:
1850 hci_dev_unlock(hdev);
1851 return err;
1854 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1856 struct mgmt_mode *cp = data;
1857 bool changed;
1858 u8 status;
1859 int err;
1861 BT_DBG("request for %s", hdev->name);
1863 status = mgmt_bredr_support(hdev);
1864 if (status)
1865 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1867 if (!lmp_ssp_capable(hdev))
1868 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1869 MGMT_STATUS_NOT_SUPPORTED);
1871 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1872 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1873 MGMT_STATUS_REJECTED);
1875 if (cp->val != 0x00 && cp->val != 0x01)
1876 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1877 MGMT_STATUS_INVALID_PARAMS);
1879 hci_dev_lock(hdev);
1881 if (cp->val) {
1882 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1883 } else {
1884 if (hdev_is_powered(hdev)) {
1885 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1886 MGMT_STATUS_REJECTED);
1887 goto unlock;
1890 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1893 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1894 if (err < 0)
1895 goto unlock;
1897 if (changed)
1898 err = new_settings(hdev, sk);
1900 unlock:
1901 hci_dev_unlock(hdev);
1902 return err;
1905 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1907 struct cmd_lookup match = { NULL, hdev };
1909 if (status) {
1910 u8 mgmt_err = mgmt_status(status);
1912 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1913 &mgmt_err);
1914 return;
1917 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1919 new_settings(hdev, match.sk);
1921 if (match.sk)
1922 sock_put(match.sk);
1924 /* Make sure the controller has a good default for
1925 * advertising data. Restrict the update to when LE
1926 * has actually been enabled. During power on, the
1927 * update in powered_update_hci will take care of it.
1929 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1930 struct hci_request req;
1932 hci_dev_lock(hdev);
1934 hci_req_init(&req, hdev);
1935 update_adv_data(&req);
1936 update_scan_rsp_data(&req);
1937 hci_req_run(&req, NULL);
1939 hci_dev_unlock(hdev);
1943 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1945 struct mgmt_mode *cp = data;
1946 struct hci_cp_write_le_host_supported hci_cp;
1947 struct pending_cmd *cmd;
1948 struct hci_request req;
1949 int err;
1950 u8 val, enabled;
1952 BT_DBG("request for %s", hdev->name);
1954 if (!lmp_le_capable(hdev))
1955 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1956 MGMT_STATUS_NOT_SUPPORTED);
1958 if (cp->val != 0x00 && cp->val != 0x01)
1959 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1960 MGMT_STATUS_INVALID_PARAMS);
1962 /* LE-only devices do not allow toggling LE on/off */
1963 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1964 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1965 MGMT_STATUS_REJECTED);
1967 hci_dev_lock(hdev);
1969 val = !!cp->val;
1970 enabled = lmp_host_le_capable(hdev);
1972 if (!hdev_is_powered(hdev) || val == enabled) {
1973 bool changed = false;
1975 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1976 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1977 changed = true;
1980 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1981 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1982 changed = true;
1985 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1986 if (err < 0)
1987 goto unlock;
1989 if (changed)
1990 err = new_settings(hdev, sk);
1992 goto unlock;
1995 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1996 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1997 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1998 MGMT_STATUS_BUSY);
1999 goto unlock;
2002 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2003 if (!cmd) {
2004 err = -ENOMEM;
2005 goto unlock;
2008 hci_req_init(&req, hdev);
2010 memset(&hci_cp, 0, sizeof(hci_cp));
2012 if (val) {
2013 hci_cp.le = val;
2014 hci_cp.simul = lmp_le_br_capable(hdev);
2015 } else {
2016 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2017 disable_advertising(&req);
2020 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2021 &hci_cp);
2023 err = hci_req_run(&req, le_enable_complete);
2024 if (err < 0)
2025 mgmt_pending_remove(cmd);
2027 unlock:
2028 hci_dev_unlock(hdev);
2029 return err;
2032 /* This is a helper function to test for pending mgmt commands that can
2033 * cause CoD or EIR HCI commands. We can only allow one such pending
2034 * mgmt command at a time since otherwise we cannot easily track what
2035 * the current values are, will be, and based on that calculate if a new
2036 * HCI command needs to be sent and if yes with what value.
2038 static bool pending_eir_or_class(struct hci_dev *hdev)
2040 struct pending_cmd *cmd;
2042 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2043 switch (cmd->opcode) {
2044 case MGMT_OP_ADD_UUID:
2045 case MGMT_OP_REMOVE_UUID:
2046 case MGMT_OP_SET_DEV_CLASS:
2047 case MGMT_OP_SET_POWERED:
2048 return true;
2052 return false;
2055 static const u8 bluetooth_base_uuid[] = {
2056 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2057 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2060 static u8 get_uuid_size(const u8 *uuid)
2062 u32 val;
2064 if (memcmp(uuid, bluetooth_base_uuid, 12))
2065 return 128;
2067 val = get_unaligned_le32(&uuid[12]);
2068 if (val > 0xffff)
2069 return 32;
2071 return 16;
2074 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2076 struct pending_cmd *cmd;
2078 hci_dev_lock(hdev);
2080 cmd = mgmt_pending_find(mgmt_op, hdev);
2081 if (!cmd)
2082 goto unlock;
2084 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2085 hdev->dev_class, 3);
2087 mgmt_pending_remove(cmd);
2089 unlock:
2090 hci_dev_unlock(hdev);
2093 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2095 BT_DBG("status 0x%02x", status);
2097 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2100 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2102 struct mgmt_cp_add_uuid *cp = data;
2103 struct pending_cmd *cmd;
2104 struct hci_request req;
2105 struct bt_uuid *uuid;
2106 int err;
2108 BT_DBG("request for %s", hdev->name);
2110 hci_dev_lock(hdev);
2112 if (pending_eir_or_class(hdev)) {
2113 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2114 MGMT_STATUS_BUSY);
2115 goto failed;
2118 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2119 if (!uuid) {
2120 err = -ENOMEM;
2121 goto failed;
2124 memcpy(uuid->uuid, cp->uuid, 16);
2125 uuid->svc_hint = cp->svc_hint;
2126 uuid->size = get_uuid_size(cp->uuid);
2128 list_add_tail(&uuid->list, &hdev->uuids);
2130 hci_req_init(&req, hdev);
2132 update_class(&req);
2133 update_eir(&req);
2135 err = hci_req_run(&req, add_uuid_complete);
2136 if (err < 0) {
2137 if (err != -ENODATA)
2138 goto failed;
2140 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2141 hdev->dev_class, 3);
2142 goto failed;
2145 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2146 if (!cmd) {
2147 err = -ENOMEM;
2148 goto failed;
2151 err = 0;
2153 failed:
2154 hci_dev_unlock(hdev);
2155 return err;
2158 static bool enable_service_cache(struct hci_dev *hdev)
2160 if (!hdev_is_powered(hdev))
2161 return false;
2163 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2164 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2165 CACHE_TIMEOUT);
2166 return true;
2169 return false;
2172 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2174 BT_DBG("status 0x%02x", status);
2176 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2179 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2180 u16 len)
2182 struct mgmt_cp_remove_uuid *cp = data;
2183 struct pending_cmd *cmd;
2184 struct bt_uuid *match, *tmp;
2185 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2186 struct hci_request req;
2187 int err, found;
2189 BT_DBG("request for %s", hdev->name);
2191 hci_dev_lock(hdev);
2193 if (pending_eir_or_class(hdev)) {
2194 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2195 MGMT_STATUS_BUSY);
2196 goto unlock;
2199 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2200 hci_uuids_clear(hdev);
2202 if (enable_service_cache(hdev)) {
2203 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2204 0, hdev->dev_class, 3);
2205 goto unlock;
2208 goto update_class;
2211 found = 0;
2213 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2214 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2215 continue;
2217 list_del(&match->list);
2218 kfree(match);
2219 found++;
2222 if (found == 0) {
2223 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2224 MGMT_STATUS_INVALID_PARAMS);
2225 goto unlock;
2228 update_class:
2229 hci_req_init(&req, hdev);
2231 update_class(&req);
2232 update_eir(&req);
2234 err = hci_req_run(&req, remove_uuid_complete);
2235 if (err < 0) {
2236 if (err != -ENODATA)
2237 goto unlock;
2239 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2240 hdev->dev_class, 3);
2241 goto unlock;
2244 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2245 if (!cmd) {
2246 err = -ENOMEM;
2247 goto unlock;
2250 err = 0;
2252 unlock:
2253 hci_dev_unlock(hdev);
2254 return err;
2257 static void set_class_complete(struct hci_dev *hdev, u8 status)
2259 BT_DBG("status 0x%02x", status);
2261 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2264 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2265 u16 len)
2267 struct mgmt_cp_set_dev_class *cp = data;
2268 struct pending_cmd *cmd;
2269 struct hci_request req;
2270 int err;
2272 BT_DBG("request for %s", hdev->name);
2274 if (!lmp_bredr_capable(hdev))
2275 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2276 MGMT_STATUS_NOT_SUPPORTED);
2278 hci_dev_lock(hdev);
2280 if (pending_eir_or_class(hdev)) {
2281 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2282 MGMT_STATUS_BUSY);
2283 goto unlock;
2286 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2287 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2288 MGMT_STATUS_INVALID_PARAMS);
2289 goto unlock;
2292 hdev->major_class = cp->major;
2293 hdev->minor_class = cp->minor;
2295 if (!hdev_is_powered(hdev)) {
2296 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2297 hdev->dev_class, 3);
2298 goto unlock;
2301 hci_req_init(&req, hdev);
2303 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2304 hci_dev_unlock(hdev);
2305 cancel_delayed_work_sync(&hdev->service_cache);
2306 hci_dev_lock(hdev);
2307 update_eir(&req);
2310 update_class(&req);
2312 err = hci_req_run(&req, set_class_complete);
2313 if (err < 0) {
2314 if (err != -ENODATA)
2315 goto unlock;
2317 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2318 hdev->dev_class, 3);
2319 goto unlock;
2322 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2323 if (!cmd) {
2324 err = -ENOMEM;
2325 goto unlock;
2328 err = 0;
2330 unlock:
2331 hci_dev_unlock(hdev);
2332 return err;
2335 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2336 u16 len)
2338 struct mgmt_cp_load_link_keys *cp = data;
2339 u16 key_count, expected_len;
2340 bool changed;
2341 int i;
2343 BT_DBG("request for %s", hdev->name);
2345 if (!lmp_bredr_capable(hdev))
2346 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2347 MGMT_STATUS_NOT_SUPPORTED);
2349 key_count = __le16_to_cpu(cp->key_count);
2351 expected_len = sizeof(*cp) + key_count *
2352 sizeof(struct mgmt_link_key_info);
2353 if (expected_len != len) {
2354 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2355 expected_len, len);
2356 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2357 MGMT_STATUS_INVALID_PARAMS);
2360 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2361 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2362 MGMT_STATUS_INVALID_PARAMS);
2364 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2365 key_count);
2367 for (i = 0; i < key_count; i++) {
2368 struct mgmt_link_key_info *key = &cp->keys[i];
2370 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2371 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2372 MGMT_STATUS_INVALID_PARAMS);
2375 hci_dev_lock(hdev);
2377 hci_link_keys_clear(hdev);
2379 if (cp->debug_keys)
2380 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2381 else
2382 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2384 if (changed)
2385 new_settings(hdev, NULL);
2387 for (i = 0; i < key_count; i++) {
2388 struct mgmt_link_key_info *key = &cp->keys[i];
2390 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2391 key->type, key->pin_len);
2394 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2396 hci_dev_unlock(hdev);
2398 return 0;
2401 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2402 u8 addr_type, struct sock *skip_sk)
2404 struct mgmt_ev_device_unpaired ev;
2406 bacpy(&ev.addr.bdaddr, bdaddr);
2407 ev.addr.type = addr_type;
2409 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2410 skip_sk);
2413 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2414 u16 len)
2416 struct mgmt_cp_unpair_device *cp = data;
2417 struct mgmt_rp_unpair_device rp;
2418 struct hci_cp_disconnect dc;
2419 struct pending_cmd *cmd;
2420 struct hci_conn *conn;
2421 int err;
2423 memset(&rp, 0, sizeof(rp));
2424 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2425 rp.addr.type = cp->addr.type;
2427 if (!bdaddr_type_is_valid(cp->addr.type))
2428 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2429 MGMT_STATUS_INVALID_PARAMS,
2430 &rp, sizeof(rp));
2432 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2433 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2434 MGMT_STATUS_INVALID_PARAMS,
2435 &rp, sizeof(rp));
2437 hci_dev_lock(hdev);
2439 if (!hdev_is_powered(hdev)) {
2440 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2441 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2442 goto unlock;
2445 if (cp->addr.type == BDADDR_BREDR) {
2446 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2447 } else {
2448 u8 addr_type;
2450 if (cp->addr.type == BDADDR_LE_PUBLIC)
2451 addr_type = ADDR_LE_DEV_PUBLIC;
2452 else
2453 addr_type = ADDR_LE_DEV_RANDOM;
2455 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2457 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2459 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2462 if (err < 0) {
2463 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2464 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2465 goto unlock;
2468 if (cp->disconnect) {
2469 if (cp->addr.type == BDADDR_BREDR)
2470 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2471 &cp->addr.bdaddr);
2472 else
2473 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2474 &cp->addr.bdaddr);
2475 } else {
2476 conn = NULL;
2479 if (!conn) {
2480 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2481 &rp, sizeof(rp));
2482 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2483 goto unlock;
2486 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2487 sizeof(*cp));
2488 if (!cmd) {
2489 err = -ENOMEM;
2490 goto unlock;
2493 dc.handle = cpu_to_le16(conn->handle);
2494 dc.reason = 0x13; /* Remote User Terminated Connection */
2495 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2496 if (err < 0)
2497 mgmt_pending_remove(cmd);
2499 unlock:
2500 hci_dev_unlock(hdev);
2501 return err;
2504 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2505 u16 len)
2507 struct mgmt_cp_disconnect *cp = data;
2508 struct mgmt_rp_disconnect rp;
2509 struct hci_cp_disconnect dc;
2510 struct pending_cmd *cmd;
2511 struct hci_conn *conn;
2512 int err;
2514 BT_DBG("");
2516 memset(&rp, 0, sizeof(rp));
2517 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2518 rp.addr.type = cp->addr.type;
2520 if (!bdaddr_type_is_valid(cp->addr.type))
2521 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2522 MGMT_STATUS_INVALID_PARAMS,
2523 &rp, sizeof(rp));
2525 hci_dev_lock(hdev);
2527 if (!test_bit(HCI_UP, &hdev->flags)) {
2528 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2529 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2530 goto failed;
2533 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2534 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2535 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2536 goto failed;
2539 if (cp->addr.type == BDADDR_BREDR)
2540 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2541 &cp->addr.bdaddr);
2542 else
2543 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2545 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2546 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2547 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2548 goto failed;
2551 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2552 if (!cmd) {
2553 err = -ENOMEM;
2554 goto failed;
2557 dc.handle = cpu_to_le16(conn->handle);
2558 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2560 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2561 if (err < 0)
2562 mgmt_pending_remove(cmd);
2564 failed:
2565 hci_dev_unlock(hdev);
2566 return err;
2569 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2571 switch (link_type) {
2572 case LE_LINK:
2573 switch (addr_type) {
2574 case ADDR_LE_DEV_PUBLIC:
2575 return BDADDR_LE_PUBLIC;
2577 default:
2578 /* Fallback to LE Random address type */
2579 return BDADDR_LE_RANDOM;
2582 default:
2583 /* Fallback to BR/EDR type */
2584 return BDADDR_BREDR;
2588 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2589 u16 data_len)
2591 struct mgmt_rp_get_connections *rp;
2592 struct hci_conn *c;
2593 size_t rp_len;
2594 int err;
2595 u16 i;
2597 BT_DBG("");
2599 hci_dev_lock(hdev);
2601 if (!hdev_is_powered(hdev)) {
2602 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2603 MGMT_STATUS_NOT_POWERED);
2604 goto unlock;
2607 i = 0;
2608 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2609 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2610 i++;
2613 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2614 rp = kmalloc(rp_len, GFP_KERNEL);
2615 if (!rp) {
2616 err = -ENOMEM;
2617 goto unlock;
2620 i = 0;
2621 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2622 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2623 continue;
2624 bacpy(&rp->addr[i].bdaddr, &c->dst);
2625 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2626 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2627 continue;
2628 i++;
2631 rp->conn_count = cpu_to_le16(i);
2633 /* Recalculate length in case of filtered SCO connections, etc */
2634 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2636 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2637 rp_len);
2639 kfree(rp);
2641 unlock:
2642 hci_dev_unlock(hdev);
2643 return err;
2646 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2647 struct mgmt_cp_pin_code_neg_reply *cp)
2649 struct pending_cmd *cmd;
2650 int err;
2652 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2653 sizeof(*cp));
2654 if (!cmd)
2655 return -ENOMEM;
2657 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2658 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2659 if (err < 0)
2660 mgmt_pending_remove(cmd);
2662 return err;
2665 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2666 u16 len)
2668 struct hci_conn *conn;
2669 struct mgmt_cp_pin_code_reply *cp = data;
2670 struct hci_cp_pin_code_reply reply;
2671 struct pending_cmd *cmd;
2672 int err;
2674 BT_DBG("");
2676 hci_dev_lock(hdev);
2678 if (!hdev_is_powered(hdev)) {
2679 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2680 MGMT_STATUS_NOT_POWERED);
2681 goto failed;
2684 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2685 if (!conn) {
2686 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2687 MGMT_STATUS_NOT_CONNECTED);
2688 goto failed;
2691 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2692 struct mgmt_cp_pin_code_neg_reply ncp;
2694 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2696 BT_ERR("PIN code is not 16 bytes long");
2698 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2699 if (err >= 0)
2700 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2701 MGMT_STATUS_INVALID_PARAMS);
2703 goto failed;
2706 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2707 if (!cmd) {
2708 err = -ENOMEM;
2709 goto failed;
2712 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2713 reply.pin_len = cp->pin_len;
2714 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2716 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2717 if (err < 0)
2718 mgmt_pending_remove(cmd);
2720 failed:
2721 hci_dev_unlock(hdev);
2722 return err;
2725 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2726 u16 len)
2728 struct mgmt_cp_set_io_capability *cp = data;
2730 BT_DBG("");
2732 hci_dev_lock(hdev);
2734 hdev->io_capability = cp->io_capability;
2736 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2737 hdev->io_capability);
2739 hci_dev_unlock(hdev);
2741 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2745 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2747 struct hci_dev *hdev = conn->hdev;
2748 struct pending_cmd *cmd;
2750 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2751 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2752 continue;
2754 if (cmd->user_data != conn)
2755 continue;
2757 return cmd;
2760 return NULL;
2763 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2765 struct mgmt_rp_pair_device rp;
2766 struct hci_conn *conn = cmd->user_data;
2768 bacpy(&rp.addr.bdaddr, &conn->dst);
2769 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2771 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2772 &rp, sizeof(rp));
2774 /* So we don't get further callbacks for this connection */
2775 conn->connect_cfm_cb = NULL;
2776 conn->security_cfm_cb = NULL;
2777 conn->disconn_cfm_cb = NULL;
2779 hci_conn_drop(conn);
2781 mgmt_pending_remove(cmd);
2784 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2786 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2787 struct pending_cmd *cmd;
2789 cmd = find_pairing(conn);
2790 if (cmd)
2791 pairing_complete(cmd, status);
2794 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2796 struct pending_cmd *cmd;
2798 BT_DBG("status %u", status);
2800 cmd = find_pairing(conn);
2801 if (!cmd)
2802 BT_DBG("Unable to find a pending command");
2803 else
2804 pairing_complete(cmd, mgmt_status(status));
2807 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2809 struct pending_cmd *cmd;
2811 BT_DBG("status %u", status);
2813 if (!status)
2814 return;
2816 cmd = find_pairing(conn);
2817 if (!cmd)
2818 BT_DBG("Unable to find a pending command");
2819 else
2820 pairing_complete(cmd, mgmt_status(status));
2823 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2824 u16 len)
2826 struct mgmt_cp_pair_device *cp = data;
2827 struct mgmt_rp_pair_device rp;
2828 struct pending_cmd *cmd;
2829 u8 sec_level, auth_type;
2830 struct hci_conn *conn;
2831 int err;
2833 BT_DBG("");
2835 memset(&rp, 0, sizeof(rp));
2836 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2837 rp.addr.type = cp->addr.type;
2839 if (!bdaddr_type_is_valid(cp->addr.type))
2840 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2841 MGMT_STATUS_INVALID_PARAMS,
2842 &rp, sizeof(rp));
2844 hci_dev_lock(hdev);
2846 if (!hdev_is_powered(hdev)) {
2847 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2848 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2849 goto unlock;
2852 sec_level = BT_SECURITY_MEDIUM;
2853 auth_type = HCI_AT_DEDICATED_BONDING;
2855 if (cp->addr.type == BDADDR_BREDR) {
2856 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2857 auth_type);
2858 } else {
2859 u8 addr_type;
2861 /* Convert from L2CAP channel address type to HCI address type
2863 if (cp->addr.type == BDADDR_LE_PUBLIC)
2864 addr_type = ADDR_LE_DEV_PUBLIC;
2865 else
2866 addr_type = ADDR_LE_DEV_RANDOM;
2868 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2869 sec_level, auth_type);
2872 if (IS_ERR(conn)) {
2873 int status;
2875 if (PTR_ERR(conn) == -EBUSY)
2876 status = MGMT_STATUS_BUSY;
2877 else
2878 status = MGMT_STATUS_CONNECT_FAILED;
2880 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2881 status, &rp,
2882 sizeof(rp));
2883 goto unlock;
2886 if (conn->connect_cfm_cb) {
2887 hci_conn_drop(conn);
2888 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2889 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2890 goto unlock;
2893 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2894 if (!cmd) {
2895 err = -ENOMEM;
2896 hci_conn_drop(conn);
2897 goto unlock;
2900 /* For LE, just connecting isn't a proof that the pairing finished */
2901 if (cp->addr.type == BDADDR_BREDR) {
2902 conn->connect_cfm_cb = pairing_complete_cb;
2903 conn->security_cfm_cb = pairing_complete_cb;
2904 conn->disconn_cfm_cb = pairing_complete_cb;
2905 } else {
2906 conn->connect_cfm_cb = le_pairing_complete_cb;
2907 conn->security_cfm_cb = le_pairing_complete_cb;
2908 conn->disconn_cfm_cb = le_pairing_complete_cb;
2911 conn->io_capability = cp->io_cap;
2912 cmd->user_data = conn;
2914 if (conn->state == BT_CONNECTED &&
2915 hci_conn_security(conn, sec_level, auth_type))
2916 pairing_complete(cmd, 0);
2918 err = 0;
2920 unlock:
2921 hci_dev_unlock(hdev);
2922 return err;
2925 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2926 u16 len)
2928 struct mgmt_addr_info *addr = data;
2929 struct pending_cmd *cmd;
2930 struct hci_conn *conn;
2931 int err;
2933 BT_DBG("");
2935 hci_dev_lock(hdev);
2937 if (!hdev_is_powered(hdev)) {
2938 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2939 MGMT_STATUS_NOT_POWERED);
2940 goto unlock;
2943 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2944 if (!cmd) {
2945 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2946 MGMT_STATUS_INVALID_PARAMS);
2947 goto unlock;
2950 conn = cmd->user_data;
2952 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2953 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2954 MGMT_STATUS_INVALID_PARAMS);
2955 goto unlock;
2958 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2960 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2961 addr, sizeof(*addr));
2962 unlock:
2963 hci_dev_unlock(hdev);
2964 return err;
2967 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2968 struct mgmt_addr_info *addr, u16 mgmt_op,
2969 u16 hci_op, __le32 passkey)
2971 struct pending_cmd *cmd;
2972 struct hci_conn *conn;
2973 int err;
2975 hci_dev_lock(hdev);
2977 if (!hdev_is_powered(hdev)) {
2978 err = cmd_complete(sk, hdev->id, mgmt_op,
2979 MGMT_STATUS_NOT_POWERED, addr,
2980 sizeof(*addr));
2981 goto done;
2984 if (addr->type == BDADDR_BREDR)
2985 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2986 else
2987 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2989 if (!conn) {
2990 err = cmd_complete(sk, hdev->id, mgmt_op,
2991 MGMT_STATUS_NOT_CONNECTED, addr,
2992 sizeof(*addr));
2993 goto done;
2996 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2997 /* Continue with pairing via SMP */
2998 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3000 if (!err)
3001 err = cmd_complete(sk, hdev->id, mgmt_op,
3002 MGMT_STATUS_SUCCESS, addr,
3003 sizeof(*addr));
3004 else
3005 err = cmd_complete(sk, hdev->id, mgmt_op,
3006 MGMT_STATUS_FAILED, addr,
3007 sizeof(*addr));
3009 goto done;
3012 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3013 if (!cmd) {
3014 err = -ENOMEM;
3015 goto done;
3018 /* Continue with pairing via HCI */
3019 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3020 struct hci_cp_user_passkey_reply cp;
3022 bacpy(&cp.bdaddr, &addr->bdaddr);
3023 cp.passkey = passkey;
3024 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3025 } else
3026 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3027 &addr->bdaddr);
3029 if (err < 0)
3030 mgmt_pending_remove(cmd);
3032 done:
3033 hci_dev_unlock(hdev);
3034 return err;
3037 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3038 void *data, u16 len)
3040 struct mgmt_cp_pin_code_neg_reply *cp = data;
3042 BT_DBG("");
3044 return user_pairing_resp(sk, hdev, &cp->addr,
3045 MGMT_OP_PIN_CODE_NEG_REPLY,
3046 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3049 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3050 u16 len)
3052 struct mgmt_cp_user_confirm_reply *cp = data;
3054 BT_DBG("");
3056 if (len != sizeof(*cp))
3057 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3058 MGMT_STATUS_INVALID_PARAMS);
3060 return user_pairing_resp(sk, hdev, &cp->addr,
3061 MGMT_OP_USER_CONFIRM_REPLY,
3062 HCI_OP_USER_CONFIRM_REPLY, 0);
3065 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3066 void *data, u16 len)
3068 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3070 BT_DBG("");
3072 return user_pairing_resp(sk, hdev, &cp->addr,
3073 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3074 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3077 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3078 u16 len)
3080 struct mgmt_cp_user_passkey_reply *cp = data;
3082 BT_DBG("");
3084 return user_pairing_resp(sk, hdev, &cp->addr,
3085 MGMT_OP_USER_PASSKEY_REPLY,
3086 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3089 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3090 void *data, u16 len)
3092 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3094 BT_DBG("");
3096 return user_pairing_resp(sk, hdev, &cp->addr,
3097 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3098 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3101 static void update_name(struct hci_request *req)
3103 struct hci_dev *hdev = req->hdev;
3104 struct hci_cp_write_local_name cp;
3106 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3108 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3111 static void set_name_complete(struct hci_dev *hdev, u8 status)
3113 struct mgmt_cp_set_local_name *cp;
3114 struct pending_cmd *cmd;
3116 BT_DBG("status 0x%02x", status);
3118 hci_dev_lock(hdev);
3120 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3121 if (!cmd)
3122 goto unlock;
3124 cp = cmd->param;
3126 if (status)
3127 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3128 mgmt_status(status));
3129 else
3130 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3131 cp, sizeof(*cp));
3133 mgmt_pending_remove(cmd);
3135 unlock:
3136 hci_dev_unlock(hdev);
3139 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3140 u16 len)
3142 struct mgmt_cp_set_local_name *cp = data;
3143 struct pending_cmd *cmd;
3144 struct hci_request req;
3145 int err;
3147 BT_DBG("");
3149 hci_dev_lock(hdev);
3151 /* If the old values are the same as the new ones just return a
3152 * direct command complete event.
3154 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3155 !memcmp(hdev->short_name, cp->short_name,
3156 sizeof(hdev->short_name))) {
3157 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3158 data, len);
3159 goto failed;
3162 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3164 if (!hdev_is_powered(hdev)) {
3165 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3167 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3168 data, len);
3169 if (err < 0)
3170 goto failed;
3172 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3173 sk);
3175 goto failed;
3178 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3179 if (!cmd) {
3180 err = -ENOMEM;
3181 goto failed;
3184 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3186 hci_req_init(&req, hdev);
3188 if (lmp_bredr_capable(hdev)) {
3189 update_name(&req);
3190 update_eir(&req);
3193 /* The name is stored in the scan response data and so
3194 * no need to udpate the advertising data here.
3196 if (lmp_le_capable(hdev))
3197 update_scan_rsp_data(&req);
3199 err = hci_req_run(&req, set_name_complete);
3200 if (err < 0)
3201 mgmt_pending_remove(cmd);
3203 failed:
3204 hci_dev_unlock(hdev);
3205 return err;
3208 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3209 void *data, u16 data_len)
3211 struct pending_cmd *cmd;
3212 int err;
3214 BT_DBG("%s", hdev->name);
3216 hci_dev_lock(hdev);
3218 if (!hdev_is_powered(hdev)) {
3219 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3220 MGMT_STATUS_NOT_POWERED);
3221 goto unlock;
3224 if (!lmp_ssp_capable(hdev)) {
3225 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3226 MGMT_STATUS_NOT_SUPPORTED);
3227 goto unlock;
3230 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3231 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3232 MGMT_STATUS_BUSY);
3233 goto unlock;
3236 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3237 if (!cmd) {
3238 err = -ENOMEM;
3239 goto unlock;
3242 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3243 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3244 0, NULL);
3245 else
3246 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3248 if (err < 0)
3249 mgmt_pending_remove(cmd);
3251 unlock:
3252 hci_dev_unlock(hdev);
3253 return err;
3256 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3257 void *data, u16 len)
3259 int err;
3261 BT_DBG("%s ", hdev->name);
3263 hci_dev_lock(hdev);
3265 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3266 struct mgmt_cp_add_remote_oob_data *cp = data;
3267 u8 status;
3269 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3270 cp->hash, cp->randomizer);
3271 if (err < 0)
3272 status = MGMT_STATUS_FAILED;
3273 else
3274 status = MGMT_STATUS_SUCCESS;
3276 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3277 status, &cp->addr, sizeof(cp->addr));
3278 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3279 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3280 u8 status;
3282 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3283 cp->hash192,
3284 cp->randomizer192,
3285 cp->hash256,
3286 cp->randomizer256);
3287 if (err < 0)
3288 status = MGMT_STATUS_FAILED;
3289 else
3290 status = MGMT_STATUS_SUCCESS;
3292 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3293 status, &cp->addr, sizeof(cp->addr));
3294 } else {
3295 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3296 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3297 MGMT_STATUS_INVALID_PARAMS);
3300 hci_dev_unlock(hdev);
3301 return err;
3304 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3305 void *data, u16 len)
3307 struct mgmt_cp_remove_remote_oob_data *cp = data;
3308 u8 status;
3309 int err;
3311 BT_DBG("%s", hdev->name);
3313 hci_dev_lock(hdev);
3315 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3316 if (err < 0)
3317 status = MGMT_STATUS_INVALID_PARAMS;
3318 else
3319 status = MGMT_STATUS_SUCCESS;
3321 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3322 status, &cp->addr, sizeof(cp->addr));
3324 hci_dev_unlock(hdev);
3325 return err;
3328 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3330 struct pending_cmd *cmd;
3331 u8 type;
3332 int err;
3334 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3336 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3337 if (!cmd)
3338 return -ENOENT;
3340 type = hdev->discovery.type;
3342 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3343 &type, sizeof(type));
3344 mgmt_pending_remove(cmd);
3346 return err;
3349 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3351 unsigned long timeout = 0;
3353 BT_DBG("status %d", status);
3355 if (status) {
3356 hci_dev_lock(hdev);
3357 mgmt_start_discovery_failed(hdev, status);
3358 hci_dev_unlock(hdev);
3359 return;
3362 hci_dev_lock(hdev);
3363 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3364 hci_dev_unlock(hdev);
3366 switch (hdev->discovery.type) {
3367 case DISCOV_TYPE_LE:
3368 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3369 break;
3371 case DISCOV_TYPE_INTERLEAVED:
3372 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3373 break;
3375 case DISCOV_TYPE_BREDR:
3376 break;
3378 default:
3379 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3382 if (!timeout)
3383 return;
3385 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3388 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3389 void *data, u16 len)
3391 struct mgmt_cp_start_discovery *cp = data;
3392 struct pending_cmd *cmd;
3393 struct hci_cp_le_set_scan_param param_cp;
3394 struct hci_cp_le_set_scan_enable enable_cp;
3395 struct hci_cp_inquiry inq_cp;
3396 struct hci_request req;
3397 /* General inquiry access code (GIAC) */
3398 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3399 u8 status, own_addr_type;
3400 int err;
3402 BT_DBG("%s", hdev->name);
3404 hci_dev_lock(hdev);
3406 if (!hdev_is_powered(hdev)) {
3407 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3408 MGMT_STATUS_NOT_POWERED);
3409 goto failed;
3412 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3413 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3414 MGMT_STATUS_BUSY);
3415 goto failed;
3418 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3419 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3420 MGMT_STATUS_BUSY);
3421 goto failed;
3424 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3425 if (!cmd) {
3426 err = -ENOMEM;
3427 goto failed;
3430 hdev->discovery.type = cp->type;
3432 hci_req_init(&req, hdev);
3434 switch (hdev->discovery.type) {
3435 case DISCOV_TYPE_BREDR:
3436 status = mgmt_bredr_support(hdev);
3437 if (status) {
3438 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3439 status);
3440 mgmt_pending_remove(cmd);
3441 goto failed;
3444 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3445 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3446 MGMT_STATUS_BUSY);
3447 mgmt_pending_remove(cmd);
3448 goto failed;
3451 hci_inquiry_cache_flush(hdev);
3453 memset(&inq_cp, 0, sizeof(inq_cp));
3454 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3455 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3456 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3457 break;
3459 case DISCOV_TYPE_LE:
3460 case DISCOV_TYPE_INTERLEAVED:
3461 status = mgmt_le_support(hdev);
3462 if (status) {
3463 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3464 status);
3465 mgmt_pending_remove(cmd);
3466 goto failed;
3469 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3470 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3471 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3472 MGMT_STATUS_NOT_SUPPORTED);
3473 mgmt_pending_remove(cmd);
3474 goto failed;
3477 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3478 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3479 MGMT_STATUS_REJECTED);
3480 mgmt_pending_remove(cmd);
3481 goto failed;
3484 /* If controller is scanning, it means the background scanning
3485 * is running. Thus, we should temporarily stop it in order to
3486 * set the discovery scanning parameters.
3488 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3489 hci_req_add_le_scan_disable(&req);
3491 memset(&param_cp, 0, sizeof(param_cp));
3493 /* All active scans will be done with either a resolvable
3494 * private address (when privacy feature has been enabled)
3495 * or unresolvable private address.
3497 err = hci_update_random_address(&req, true, &own_addr_type);
3498 if (err < 0) {
3499 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3500 MGMT_STATUS_FAILED);
3501 mgmt_pending_remove(cmd);
3502 goto failed;
3505 param_cp.type = LE_SCAN_ACTIVE;
3506 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3507 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3508 param_cp.own_address_type = own_addr_type;
3509 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3510 &param_cp);
3512 memset(&enable_cp, 0, sizeof(enable_cp));
3513 enable_cp.enable = LE_SCAN_ENABLE;
3514 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3515 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3516 &enable_cp);
3517 break;
3519 default:
3520 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3521 MGMT_STATUS_INVALID_PARAMS);
3522 mgmt_pending_remove(cmd);
3523 goto failed;
3526 err = hci_req_run(&req, start_discovery_complete);
3527 if (err < 0)
3528 mgmt_pending_remove(cmd);
3529 else
3530 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3532 failed:
3533 hci_dev_unlock(hdev);
3534 return err;
3537 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3539 struct pending_cmd *cmd;
3540 int err;
3542 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3543 if (!cmd)
3544 return -ENOENT;
3546 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3547 &hdev->discovery.type, sizeof(hdev->discovery.type));
3548 mgmt_pending_remove(cmd);
3550 return err;
3553 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3555 BT_DBG("status %d", status);
3557 hci_dev_lock(hdev);
3559 if (status) {
3560 mgmt_stop_discovery_failed(hdev, status);
3561 goto unlock;
3564 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3566 unlock:
3567 hci_dev_unlock(hdev);
3570 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3571 u16 len)
3573 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3574 struct pending_cmd *cmd;
3575 struct hci_cp_remote_name_req_cancel cp;
3576 struct inquiry_entry *e;
3577 struct hci_request req;
3578 int err;
3580 BT_DBG("%s", hdev->name);
3582 hci_dev_lock(hdev);
3584 if (!hci_discovery_active(hdev)) {
3585 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3586 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3587 sizeof(mgmt_cp->type));
3588 goto unlock;
3591 if (hdev->discovery.type != mgmt_cp->type) {
3592 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3593 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3594 sizeof(mgmt_cp->type));
3595 goto unlock;
3598 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3599 if (!cmd) {
3600 err = -ENOMEM;
3601 goto unlock;
3604 hci_req_init(&req, hdev);
3606 switch (hdev->discovery.state) {
3607 case DISCOVERY_FINDING:
3608 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3609 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3610 } else {
3611 cancel_delayed_work(&hdev->le_scan_disable);
3613 hci_req_add_le_scan_disable(&req);
3616 break;
3618 case DISCOVERY_RESOLVING:
3619 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3620 NAME_PENDING);
3621 if (!e) {
3622 mgmt_pending_remove(cmd);
3623 err = cmd_complete(sk, hdev->id,
3624 MGMT_OP_STOP_DISCOVERY, 0,
3625 &mgmt_cp->type,
3626 sizeof(mgmt_cp->type));
3627 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3628 goto unlock;
3631 bacpy(&cp.bdaddr, &e->data.bdaddr);
3632 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3633 &cp);
3635 break;
3637 default:
3638 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3640 mgmt_pending_remove(cmd);
3641 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3642 MGMT_STATUS_FAILED, &mgmt_cp->type,
3643 sizeof(mgmt_cp->type));
3644 goto unlock;
3647 err = hci_req_run(&req, stop_discovery_complete);
3648 if (err < 0)
3649 mgmt_pending_remove(cmd);
3650 else
3651 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3653 unlock:
3654 hci_dev_unlock(hdev);
3655 return err;
3658 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3659 u16 len)
3661 struct mgmt_cp_confirm_name *cp = data;
3662 struct inquiry_entry *e;
3663 int err;
3665 BT_DBG("%s", hdev->name);
3667 hci_dev_lock(hdev);
3669 if (!hci_discovery_active(hdev)) {
3670 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3671 MGMT_STATUS_FAILED, &cp->addr,
3672 sizeof(cp->addr));
3673 goto failed;
3676 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3677 if (!e) {
3678 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3679 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3680 sizeof(cp->addr));
3681 goto failed;
3684 if (cp->name_known) {
3685 e->name_state = NAME_KNOWN;
3686 list_del(&e->list);
3687 } else {
3688 e->name_state = NAME_NEEDED;
3689 hci_inquiry_cache_update_resolve(hdev, e);
3692 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3693 sizeof(cp->addr));
3695 failed:
3696 hci_dev_unlock(hdev);
3697 return err;
3700 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3701 u16 len)
3703 struct mgmt_cp_block_device *cp = data;
3704 u8 status;
3705 int err;
3707 BT_DBG("%s", hdev->name);
3709 if (!bdaddr_type_is_valid(cp->addr.type))
3710 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3711 MGMT_STATUS_INVALID_PARAMS,
3712 &cp->addr, sizeof(cp->addr));
3714 hci_dev_lock(hdev);
3716 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3717 if (err < 0)
3718 status = MGMT_STATUS_FAILED;
3719 else
3720 status = MGMT_STATUS_SUCCESS;
3722 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3723 &cp->addr, sizeof(cp->addr));
3725 hci_dev_unlock(hdev);
3727 return err;
3730 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3731 u16 len)
3733 struct mgmt_cp_unblock_device *cp = data;
3734 u8 status;
3735 int err;
3737 BT_DBG("%s", hdev->name);
3739 if (!bdaddr_type_is_valid(cp->addr.type))
3740 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3741 MGMT_STATUS_INVALID_PARAMS,
3742 &cp->addr, sizeof(cp->addr));
3744 hci_dev_lock(hdev);
3746 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3747 if (err < 0)
3748 status = MGMT_STATUS_INVALID_PARAMS;
3749 else
3750 status = MGMT_STATUS_SUCCESS;
3752 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3753 &cp->addr, sizeof(cp->addr));
3755 hci_dev_unlock(hdev);
3757 return err;
3760 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3761 u16 len)
3763 struct mgmt_cp_set_device_id *cp = data;
3764 struct hci_request req;
3765 int err;
3766 __u16 source;
3768 BT_DBG("%s", hdev->name);
3770 source = __le16_to_cpu(cp->source);
3772 if (source > 0x0002)
3773 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3774 MGMT_STATUS_INVALID_PARAMS);
3776 hci_dev_lock(hdev);
3778 hdev->devid_source = source;
3779 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3780 hdev->devid_product = __le16_to_cpu(cp->product);
3781 hdev->devid_version = __le16_to_cpu(cp->version);
3783 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3785 hci_req_init(&req, hdev);
3786 update_eir(&req);
3787 hci_req_run(&req, NULL);
3789 hci_dev_unlock(hdev);
3791 return err;
3794 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3796 struct cmd_lookup match = { NULL, hdev };
3798 if (status) {
3799 u8 mgmt_err = mgmt_status(status);
3801 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3802 cmd_status_rsp, &mgmt_err);
3803 return;
3806 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3807 &match);
3809 new_settings(hdev, match.sk);
3811 if (match.sk)
3812 sock_put(match.sk);
3815 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3816 u16 len)
3818 struct mgmt_mode *cp = data;
3819 struct pending_cmd *cmd;
3820 struct hci_request req;
3821 u8 val, enabled, status;
3822 int err;
3824 BT_DBG("request for %s", hdev->name);
3826 status = mgmt_le_support(hdev);
3827 if (status)
3828 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3829 status);
3831 if (cp->val != 0x00 && cp->val != 0x01)
3832 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3833 MGMT_STATUS_INVALID_PARAMS);
3835 hci_dev_lock(hdev);
3837 val = !!cp->val;
3838 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3840 /* The following conditions are ones which mean that we should
3841 * not do any HCI communication but directly send a mgmt
3842 * response to user space (after toggling the flag if
3843 * necessary).
3845 if (!hdev_is_powered(hdev) || val == enabled ||
3846 hci_conn_num(hdev, LE_LINK) > 0) {
3847 bool changed = false;
3849 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3850 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3851 changed = true;
3854 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3855 if (err < 0)
3856 goto unlock;
3858 if (changed)
3859 err = new_settings(hdev, sk);
3861 goto unlock;
3864 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3865 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3866 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3867 MGMT_STATUS_BUSY);
3868 goto unlock;
3871 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3872 if (!cmd) {
3873 err = -ENOMEM;
3874 goto unlock;
3877 hci_req_init(&req, hdev);
3879 if (val)
3880 enable_advertising(&req);
3881 else
3882 disable_advertising(&req);
3884 err = hci_req_run(&req, set_advertising_complete);
3885 if (err < 0)
3886 mgmt_pending_remove(cmd);
3888 unlock:
3889 hci_dev_unlock(hdev);
3890 return err;
3893 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3894 void *data, u16 len)
3896 struct mgmt_cp_set_static_address *cp = data;
3897 int err;
3899 BT_DBG("%s", hdev->name);
3901 if (!lmp_le_capable(hdev))
3902 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3903 MGMT_STATUS_NOT_SUPPORTED);
3905 if (hdev_is_powered(hdev))
3906 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3907 MGMT_STATUS_REJECTED);
3909 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3910 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3911 return cmd_status(sk, hdev->id,
3912 MGMT_OP_SET_STATIC_ADDRESS,
3913 MGMT_STATUS_INVALID_PARAMS);
3915 /* Two most significant bits shall be set */
3916 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3917 return cmd_status(sk, hdev->id,
3918 MGMT_OP_SET_STATIC_ADDRESS,
3919 MGMT_STATUS_INVALID_PARAMS);
3922 hci_dev_lock(hdev);
3924 bacpy(&hdev->static_addr, &cp->bdaddr);
3926 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3928 hci_dev_unlock(hdev);
3930 return err;
3933 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3934 void *data, u16 len)
3936 struct mgmt_cp_set_scan_params *cp = data;
3937 __u16 interval, window;
3938 int err;
3940 BT_DBG("%s", hdev->name);
3942 if (!lmp_le_capable(hdev))
3943 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3944 MGMT_STATUS_NOT_SUPPORTED);
3946 interval = __le16_to_cpu(cp->interval);
3948 if (interval < 0x0004 || interval > 0x4000)
3949 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3950 MGMT_STATUS_INVALID_PARAMS);
3952 window = __le16_to_cpu(cp->window);
3954 if (window < 0x0004 || window > 0x4000)
3955 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3956 MGMT_STATUS_INVALID_PARAMS);
3958 if (window > interval)
3959 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3960 MGMT_STATUS_INVALID_PARAMS);
3962 hci_dev_lock(hdev);
3964 hdev->le_scan_interval = interval;
3965 hdev->le_scan_window = window;
3967 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3969 /* If background scan is running, restart it so new parameters are
3970 * loaded.
3972 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3973 hdev->discovery.state == DISCOVERY_STOPPED) {
3974 struct hci_request req;
3976 hci_req_init(&req, hdev);
3978 hci_req_add_le_scan_disable(&req);
3979 hci_req_add_le_passive_scan(&req);
3981 hci_req_run(&req, NULL);
3984 hci_dev_unlock(hdev);
3986 return err;
3989 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3991 struct pending_cmd *cmd;
3993 BT_DBG("status 0x%02x", status);
3995 hci_dev_lock(hdev);
3997 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3998 if (!cmd)
3999 goto unlock;
4001 if (status) {
4002 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4003 mgmt_status(status));
4004 } else {
4005 struct mgmt_mode *cp = cmd->param;
4007 if (cp->val)
4008 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4009 else
4010 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4012 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4013 new_settings(hdev, cmd->sk);
4016 mgmt_pending_remove(cmd);
4018 unlock:
4019 hci_dev_unlock(hdev);
4022 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4023 void *data, u16 len)
4025 struct mgmt_mode *cp = data;
4026 struct pending_cmd *cmd;
4027 struct hci_request req;
4028 int err;
4030 BT_DBG("%s", hdev->name);
4032 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4033 hdev->hci_ver < BLUETOOTH_VER_1_2)
4034 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4035 MGMT_STATUS_NOT_SUPPORTED);
4037 if (cp->val != 0x00 && cp->val != 0x01)
4038 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4039 MGMT_STATUS_INVALID_PARAMS);
4041 if (!hdev_is_powered(hdev))
4042 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4043 MGMT_STATUS_NOT_POWERED);
4045 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4046 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4047 MGMT_STATUS_REJECTED);
4049 hci_dev_lock(hdev);
4051 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4052 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4053 MGMT_STATUS_BUSY);
4054 goto unlock;
4057 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4058 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4059 hdev);
4060 goto unlock;
4063 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4064 data, len);
4065 if (!cmd) {
4066 err = -ENOMEM;
4067 goto unlock;
4070 hci_req_init(&req, hdev);
4072 write_fast_connectable(&req, cp->val);
4074 err = hci_req_run(&req, fast_connectable_complete);
4075 if (err < 0) {
4076 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4077 MGMT_STATUS_FAILED);
4078 mgmt_pending_remove(cmd);
4081 unlock:
4082 hci_dev_unlock(hdev);
4084 return err;
4087 static void set_bredr_scan(struct hci_request *req)
4089 struct hci_dev *hdev = req->hdev;
4090 u8 scan = 0;
4092 /* Ensure that fast connectable is disabled. This function will
4093 * not do anything if the page scan parameters are already what
4094 * they should be.
4096 write_fast_connectable(req, false);
4098 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4099 scan |= SCAN_PAGE;
4100 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4101 scan |= SCAN_INQUIRY;
4103 if (scan)
4104 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4107 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4109 struct pending_cmd *cmd;
4111 BT_DBG("status 0x%02x", status);
4113 hci_dev_lock(hdev);
4115 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4116 if (!cmd)
4117 goto unlock;
4119 if (status) {
4120 u8 mgmt_err = mgmt_status(status);
4122 /* We need to restore the flag if related HCI commands
4123 * failed.
4125 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4127 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4128 } else {
4129 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4130 new_settings(hdev, cmd->sk);
4133 mgmt_pending_remove(cmd);
4135 unlock:
4136 hci_dev_unlock(hdev);
4139 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4141 struct mgmt_mode *cp = data;
4142 struct pending_cmd *cmd;
4143 struct hci_request req;
4144 int err;
4146 BT_DBG("request for %s", hdev->name);
4148 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4149 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4150 MGMT_STATUS_NOT_SUPPORTED);
4152 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4153 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4154 MGMT_STATUS_REJECTED);
4156 if (cp->val != 0x00 && cp->val != 0x01)
4157 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4158 MGMT_STATUS_INVALID_PARAMS);
4160 hci_dev_lock(hdev);
4162 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4163 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4164 goto unlock;
4167 if (!hdev_is_powered(hdev)) {
4168 if (!cp->val) {
4169 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4170 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4171 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4172 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4173 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4176 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4178 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4179 if (err < 0)
4180 goto unlock;
4182 err = new_settings(hdev, sk);
4183 goto unlock;
4186 /* Reject disabling when powered on */
4187 if (!cp->val) {
4188 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4189 MGMT_STATUS_REJECTED);
4190 goto unlock;
4193 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4194 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4195 MGMT_STATUS_BUSY);
4196 goto unlock;
4199 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4200 if (!cmd) {
4201 err = -ENOMEM;
4202 goto unlock;
4205 /* We need to flip the bit already here so that update_adv_data
4206 * generates the correct flags.
4208 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4210 hci_req_init(&req, hdev);
4212 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4213 set_bredr_scan(&req);
4215 /* Since only the advertising data flags will change, there
4216 * is no need to update the scan response data.
4218 update_adv_data(&req);
4220 err = hci_req_run(&req, set_bredr_complete);
4221 if (err < 0)
4222 mgmt_pending_remove(cmd);
4224 unlock:
4225 hci_dev_unlock(hdev);
4226 return err;
4229 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4230 void *data, u16 len)
4232 struct mgmt_mode *cp = data;
4233 struct pending_cmd *cmd;
4234 u8 val, status;
4235 int err;
4237 BT_DBG("request for %s", hdev->name);
4239 status = mgmt_bredr_support(hdev);
4240 if (status)
4241 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4242 status);
4244 if (!lmp_sc_capable(hdev) &&
4245 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4246 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4247 MGMT_STATUS_NOT_SUPPORTED);
4249 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4250 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4251 MGMT_STATUS_INVALID_PARAMS);
4253 hci_dev_lock(hdev);
4255 if (!hdev_is_powered(hdev)) {
4256 bool changed;
4258 if (cp->val) {
4259 changed = !test_and_set_bit(HCI_SC_ENABLED,
4260 &hdev->dev_flags);
4261 if (cp->val == 0x02)
4262 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4263 else
4264 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4265 } else {
4266 changed = test_and_clear_bit(HCI_SC_ENABLED,
4267 &hdev->dev_flags);
4268 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4271 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4272 if (err < 0)
4273 goto failed;
4275 if (changed)
4276 err = new_settings(hdev, sk);
4278 goto failed;
4281 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4282 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4283 MGMT_STATUS_BUSY);
4284 goto failed;
4287 val = !!cp->val;
4289 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4290 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4291 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4292 goto failed;
4295 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4296 if (!cmd) {
4297 err = -ENOMEM;
4298 goto failed;
4301 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4302 if (err < 0) {
4303 mgmt_pending_remove(cmd);
4304 goto failed;
4307 if (cp->val == 0x02)
4308 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4309 else
4310 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4312 failed:
4313 hci_dev_unlock(hdev);
4314 return err;
4317 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4318 void *data, u16 len)
4320 struct mgmt_mode *cp = data;
4321 bool changed;
4322 int err;
4324 BT_DBG("request for %s", hdev->name);
4326 if (cp->val != 0x00 && cp->val != 0x01)
4327 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4328 MGMT_STATUS_INVALID_PARAMS);
4330 hci_dev_lock(hdev);
4332 if (cp->val)
4333 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4334 else
4335 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4337 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4338 if (err < 0)
4339 goto unlock;
4341 if (changed)
4342 err = new_settings(hdev, sk);
4344 unlock:
4345 hci_dev_unlock(hdev);
4346 return err;
4349 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4350 u16 len)
4352 struct mgmt_cp_set_privacy *cp = cp_data;
4353 bool changed;
4354 int err;
4356 BT_DBG("request for %s", hdev->name);
4358 if (!lmp_le_capable(hdev))
4359 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4360 MGMT_STATUS_NOT_SUPPORTED);
4362 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4363 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4364 MGMT_STATUS_INVALID_PARAMS);
4366 if (hdev_is_powered(hdev))
4367 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4368 MGMT_STATUS_REJECTED);
4370 hci_dev_lock(hdev);
4372 /* If user space supports this command it is also expected to
4373 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4375 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4377 if (cp->privacy) {
4378 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4379 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4380 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4381 } else {
4382 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4383 memset(hdev->irk, 0, sizeof(hdev->irk));
4384 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4387 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4388 if (err < 0)
4389 goto unlock;
4391 if (changed)
4392 err = new_settings(hdev, sk);
4394 unlock:
4395 hci_dev_unlock(hdev);
4396 return err;
4399 static bool irk_is_valid(struct mgmt_irk_info *irk)
4401 switch (irk->addr.type) {
4402 case BDADDR_LE_PUBLIC:
4403 return true;
4405 case BDADDR_LE_RANDOM:
4406 /* Two most significant bits shall be set */
4407 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4408 return false;
4409 return true;
4412 return false;
4415 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4416 u16 len)
4418 struct mgmt_cp_load_irks *cp = cp_data;
4419 u16 irk_count, expected_len;
4420 int i, err;
4422 BT_DBG("request for %s", hdev->name);
4424 if (!lmp_le_capable(hdev))
4425 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4426 MGMT_STATUS_NOT_SUPPORTED);
4428 irk_count = __le16_to_cpu(cp->irk_count);
4430 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4431 if (expected_len != len) {
4432 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4433 expected_len, len);
4434 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4435 MGMT_STATUS_INVALID_PARAMS);
4438 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4440 for (i = 0; i < irk_count; i++) {
4441 struct mgmt_irk_info *key = &cp->irks[i];
4443 if (!irk_is_valid(key))
4444 return cmd_status(sk, hdev->id,
4445 MGMT_OP_LOAD_IRKS,
4446 MGMT_STATUS_INVALID_PARAMS);
4449 hci_dev_lock(hdev);
4451 hci_smp_irks_clear(hdev);
4453 for (i = 0; i < irk_count; i++) {
4454 struct mgmt_irk_info *irk = &cp->irks[i];
4455 u8 addr_type;
4457 if (irk->addr.type == BDADDR_LE_PUBLIC)
4458 addr_type = ADDR_LE_DEV_PUBLIC;
4459 else
4460 addr_type = ADDR_LE_DEV_RANDOM;
4462 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4463 BDADDR_ANY);
4466 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4468 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4470 hci_dev_unlock(hdev);
4472 return err;
4475 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4477 if (key->master != 0x00 && key->master != 0x01)
4478 return false;
4480 switch (key->addr.type) {
4481 case BDADDR_LE_PUBLIC:
4482 return true;
4484 case BDADDR_LE_RANDOM:
4485 /* Two most significant bits shall be set */
4486 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4487 return false;
4488 return true;
4491 return false;
4494 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4495 void *cp_data, u16 len)
4497 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4498 u16 key_count, expected_len;
4499 int i, err;
4501 BT_DBG("request for %s", hdev->name);
4503 if (!lmp_le_capable(hdev))
4504 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4505 MGMT_STATUS_NOT_SUPPORTED);
4507 key_count = __le16_to_cpu(cp->key_count);
4509 expected_len = sizeof(*cp) + key_count *
4510 sizeof(struct mgmt_ltk_info);
4511 if (expected_len != len) {
4512 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4513 expected_len, len);
4514 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4515 MGMT_STATUS_INVALID_PARAMS);
4518 BT_DBG("%s key_count %u", hdev->name, key_count);
4520 for (i = 0; i < key_count; i++) {
4521 struct mgmt_ltk_info *key = &cp->keys[i];
4523 if (!ltk_is_valid(key))
4524 return cmd_status(sk, hdev->id,
4525 MGMT_OP_LOAD_LONG_TERM_KEYS,
4526 MGMT_STATUS_INVALID_PARAMS);
4529 hci_dev_lock(hdev);
4531 hci_smp_ltks_clear(hdev);
4533 for (i = 0; i < key_count; i++) {
4534 struct mgmt_ltk_info *key = &cp->keys[i];
4535 u8 type, addr_type;
4537 if (key->addr.type == BDADDR_LE_PUBLIC)
4538 addr_type = ADDR_LE_DEV_PUBLIC;
4539 else
4540 addr_type = ADDR_LE_DEV_RANDOM;
4542 if (key->master)
4543 type = HCI_SMP_LTK;
4544 else
4545 type = HCI_SMP_LTK_SLAVE;
4547 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4548 key->type, key->val, key->enc_size, key->ediv,
4549 key->rand);
4552 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4553 NULL, 0);
4555 hci_dev_unlock(hdev);
4557 return err;
4560 static const struct mgmt_handler {
4561 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4562 u16 data_len);
4563 bool var_len;
4564 size_t data_len;
4565 } mgmt_handlers[] = {
4566 { NULL }, /* 0x0000 (no command) */
4567 { read_version, false, MGMT_READ_VERSION_SIZE },
4568 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4569 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4570 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4571 { set_powered, false, MGMT_SETTING_SIZE },
4572 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4573 { set_connectable, false, MGMT_SETTING_SIZE },
4574 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4575 { set_pairable, false, MGMT_SETTING_SIZE },
4576 { set_link_security, false, MGMT_SETTING_SIZE },
4577 { set_ssp, false, MGMT_SETTING_SIZE },
4578 { set_hs, false, MGMT_SETTING_SIZE },
4579 { set_le, false, MGMT_SETTING_SIZE },
4580 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4581 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4582 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4583 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4584 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4585 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4586 { disconnect, false, MGMT_DISCONNECT_SIZE },
4587 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4588 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4589 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4590 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4591 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4592 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4593 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4594 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4595 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4596 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4597 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4598 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4599 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4600 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4601 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4602 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4603 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4604 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4605 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4606 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4607 { set_advertising, false, MGMT_SETTING_SIZE },
4608 { set_bredr, false, MGMT_SETTING_SIZE },
4609 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4610 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4611 { set_secure_conn, false, MGMT_SETTING_SIZE },
4612 { set_debug_keys, false, MGMT_SETTING_SIZE },
4613 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4614 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4618 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4620 void *buf;
4621 u8 *cp;
4622 struct mgmt_hdr *hdr;
4623 u16 opcode, index, len;
4624 struct hci_dev *hdev = NULL;
4625 const struct mgmt_handler *handler;
4626 int err;
4628 BT_DBG("got %zu bytes", msglen);
4630 if (msglen < sizeof(*hdr))
4631 return -EINVAL;
4633 buf = kmalloc(msglen, GFP_KERNEL);
4634 if (!buf)
4635 return -ENOMEM;
4637 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4638 err = -EFAULT;
4639 goto done;
4642 hdr = buf;
4643 opcode = __le16_to_cpu(hdr->opcode);
4644 index = __le16_to_cpu(hdr->index);
4645 len = __le16_to_cpu(hdr->len);
4647 if (len != msglen - sizeof(*hdr)) {
4648 err = -EINVAL;
4649 goto done;
4652 if (index != MGMT_INDEX_NONE) {
4653 hdev = hci_dev_get(index);
4654 if (!hdev) {
4655 err = cmd_status(sk, index, opcode,
4656 MGMT_STATUS_INVALID_INDEX);
4657 goto done;
4660 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4661 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4662 err = cmd_status(sk, index, opcode,
4663 MGMT_STATUS_INVALID_INDEX);
4664 goto done;
4668 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4669 mgmt_handlers[opcode].func == NULL) {
4670 BT_DBG("Unknown op %u", opcode);
4671 err = cmd_status(sk, index, opcode,
4672 MGMT_STATUS_UNKNOWN_COMMAND);
4673 goto done;
4676 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4677 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4678 err = cmd_status(sk, index, opcode,
4679 MGMT_STATUS_INVALID_INDEX);
4680 goto done;
4683 handler = &mgmt_handlers[opcode];
4685 if ((handler->var_len && len < handler->data_len) ||
4686 (!handler->var_len && len != handler->data_len)) {
4687 err = cmd_status(sk, index, opcode,
4688 MGMT_STATUS_INVALID_PARAMS);
4689 goto done;
4692 if (hdev)
4693 mgmt_init_hdev(sk, hdev);
4695 cp = buf + sizeof(*hdr);
4697 err = handler->func(sk, hdev, cp, len);
4698 if (err < 0)
4699 goto done;
4701 err = msglen;
4703 done:
4704 if (hdev)
4705 hci_dev_put(hdev);
4707 kfree(buf);
4708 return err;
4711 void mgmt_index_added(struct hci_dev *hdev)
4713 if (hdev->dev_type != HCI_BREDR)
4714 return;
4716 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4719 void mgmt_index_removed(struct hci_dev *hdev)
4721 u8 status = MGMT_STATUS_INVALID_INDEX;
4723 if (hdev->dev_type != HCI_BREDR)
4724 return;
4726 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4728 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4731 /* This function requires the caller holds hdev->lock */
4732 static void restart_le_auto_conns(struct hci_dev *hdev)
4734 struct hci_conn_params *p;
4736 list_for_each_entry(p, &hdev->le_conn_params, list) {
4737 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4738 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4742 static void powered_complete(struct hci_dev *hdev, u8 status)
4744 struct cmd_lookup match = { NULL, hdev };
4746 BT_DBG("status 0x%02x", status);
4748 hci_dev_lock(hdev);
4750 restart_le_auto_conns(hdev);
4752 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4754 new_settings(hdev, match.sk);
4756 hci_dev_unlock(hdev);
4758 if (match.sk)
4759 sock_put(match.sk);
4762 static int powered_update_hci(struct hci_dev *hdev)
4764 struct hci_request req;
4765 u8 link_sec;
4767 hci_req_init(&req, hdev);
4769 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4770 !lmp_host_ssp_capable(hdev)) {
4771 u8 ssp = 1;
4773 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4776 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4777 lmp_bredr_capable(hdev)) {
4778 struct hci_cp_write_le_host_supported cp;
4780 cp.le = 1;
4781 cp.simul = lmp_le_br_capable(hdev);
4783 /* Check first if we already have the right
4784 * host state (host features set)
4786 if (cp.le != lmp_host_le_capable(hdev) ||
4787 cp.simul != lmp_host_le_br_capable(hdev))
4788 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4789 sizeof(cp), &cp);
4792 if (lmp_le_capable(hdev)) {
4793 /* Make sure the controller has a good default for
4794 * advertising data. This also applies to the case
4795 * where BR/EDR was toggled during the AUTO_OFF phase.
4797 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4798 update_adv_data(&req);
4799 update_scan_rsp_data(&req);
4802 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4803 enable_advertising(&req);
4806 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4807 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4808 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4809 sizeof(link_sec), &link_sec);
4811 if (lmp_bredr_capable(hdev)) {
4812 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4813 set_bredr_scan(&req);
4814 update_class(&req);
4815 update_name(&req);
4816 update_eir(&req);
4819 return hci_req_run(&req, powered_complete);
4822 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4824 struct cmd_lookup match = { NULL, hdev };
4825 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4826 u8 zero_cod[] = { 0, 0, 0 };
4827 int err;
4829 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4830 return 0;
4832 if (powered) {
4833 if (powered_update_hci(hdev) == 0)
4834 return 0;
4836 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4837 &match);
4838 goto new_settings;
4841 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4842 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4844 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4845 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4846 zero_cod, sizeof(zero_cod), NULL);
4848 new_settings:
4849 err = new_settings(hdev, match.sk);
4851 if (match.sk)
4852 sock_put(match.sk);
4854 return err;
4857 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4859 struct pending_cmd *cmd;
4860 u8 status;
4862 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4863 if (!cmd)
4864 return;
4866 if (err == -ERFKILL)
4867 status = MGMT_STATUS_RFKILLED;
4868 else
4869 status = MGMT_STATUS_FAILED;
4871 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4873 mgmt_pending_remove(cmd);
4876 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4878 struct hci_request req;
4880 hci_dev_lock(hdev);
4882 /* When discoverable timeout triggers, then just make sure
4883 * the limited discoverable flag is cleared. Even in the case
4884 * of a timeout triggered from general discoverable, it is
4885 * safe to unconditionally clear the flag.
4887 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4888 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4890 hci_req_init(&req, hdev);
4891 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4892 u8 scan = SCAN_PAGE;
4893 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4894 sizeof(scan), &scan);
4896 update_class(&req);
4897 update_adv_data(&req);
4898 hci_req_run(&req, NULL);
4900 hdev->discov_timeout = 0;
4902 new_settings(hdev, NULL);
4904 hci_dev_unlock(hdev);
4907 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4909 bool changed;
4911 /* Nothing needed here if there's a pending command since that
4912 * commands request completion callback takes care of everything
4913 * necessary.
4915 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4916 return;
4918 /* Powering off may clear the scan mode - don't let that interfere */
4919 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4920 return;
4922 if (discoverable) {
4923 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4924 } else {
4925 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4926 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4929 if (changed) {
4930 struct hci_request req;
4932 /* In case this change in discoverable was triggered by
4933 * a disabling of connectable there could be a need to
4934 * update the advertising flags.
4936 hci_req_init(&req, hdev);
4937 update_adv_data(&req);
4938 hci_req_run(&req, NULL);
4940 new_settings(hdev, NULL);
4944 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4946 bool changed;
4948 /* Nothing needed here if there's a pending command since that
4949 * commands request completion callback takes care of everything
4950 * necessary.
4952 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4953 return;
4955 /* Powering off may clear the scan mode - don't let that interfere */
4956 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4957 return;
4959 if (connectable)
4960 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4961 else
4962 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4964 if (changed)
4965 new_settings(hdev, NULL);
4968 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
4970 /* Powering off may stop advertising - don't let that interfere */
4971 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4972 return;
4974 if (advertising)
4975 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4976 else
4977 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4980 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4982 u8 mgmt_err = mgmt_status(status);
4984 if (scan & SCAN_PAGE)
4985 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4986 cmd_status_rsp, &mgmt_err);
4988 if (scan & SCAN_INQUIRY)
4989 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4990 cmd_status_rsp, &mgmt_err);
4993 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4994 bool persistent)
4996 struct mgmt_ev_new_link_key ev;
4998 memset(&ev, 0, sizeof(ev));
5000 ev.store_hint = persistent;
5001 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5002 ev.key.addr.type = BDADDR_BREDR;
5003 ev.key.type = key->type;
5004 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5005 ev.key.pin_len = key->pin_len;
5007 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5010 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5012 struct mgmt_ev_new_long_term_key ev;
5014 memset(&ev, 0, sizeof(ev));
5016 /* Devices using resolvable or non-resolvable random addresses
5017 * without providing an indentity resolving key don't require
5018 * to store long term keys. Their addresses will change the
5019 * next time around.
5021 * Only when a remote device provides an identity address
5022 * make sure the long term key is stored. If the remote
5023 * identity is known, the long term keys are internally
5024 * mapped to the identity address. So allow static random
5025 * and public addresses here.
5027 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5028 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5029 ev.store_hint = 0x00;
5030 else
5031 ev.store_hint = persistent;
5033 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5034 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5035 ev.key.type = key->authenticated;
5036 ev.key.enc_size = key->enc_size;
5037 ev.key.ediv = key->ediv;
5038 ev.key.rand = key->rand;
5040 if (key->type == HCI_SMP_LTK)
5041 ev.key.master = 1;
5043 memcpy(ev.key.val, key->val, sizeof(key->val));
5045 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5048 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5050 struct mgmt_ev_new_irk ev;
5052 memset(&ev, 0, sizeof(ev));
5054 /* For identity resolving keys from devices that are already
5055 * using a public address or static random address, do not
5056 * ask for storing this key. The identity resolving key really
5057 * is only mandatory for devices using resovlable random
5058 * addresses.
5060 * Storing all identity resolving keys has the downside that
5061 * they will be also loaded on next boot of they system. More
5062 * identity resolving keys, means more time during scanning is
5063 * needed to actually resolve these addresses.
5065 if (bacmp(&irk->rpa, BDADDR_ANY))
5066 ev.store_hint = 0x01;
5067 else
5068 ev.store_hint = 0x00;
5070 bacpy(&ev.rpa, &irk->rpa);
5071 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5072 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5073 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5075 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5078 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5079 bool persistent)
5081 struct mgmt_ev_new_csrk ev;
5083 memset(&ev, 0, sizeof(ev));
5085 /* Devices using resolvable or non-resolvable random addresses
5086 * without providing an indentity resolving key don't require
5087 * to store signature resolving keys. Their addresses will change
5088 * the next time around.
5090 * Only when a remote device provides an identity address
5091 * make sure the signature resolving key is stored. So allow
5092 * static random and public addresses here.
5094 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5095 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5096 ev.store_hint = 0x00;
5097 else
5098 ev.store_hint = persistent;
5100 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5101 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5102 ev.key.master = csrk->master;
5103 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5105 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5108 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5109 u8 data_len)
5111 eir[eir_len++] = sizeof(type) + data_len;
5112 eir[eir_len++] = type;
5113 memcpy(&eir[eir_len], data, data_len);
5114 eir_len += data_len;
5116 return eir_len;
5119 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5120 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5121 u8 *dev_class)
5123 char buf[512];
5124 struct mgmt_ev_device_connected *ev = (void *) buf;
5125 u16 eir_len = 0;
5127 bacpy(&ev->addr.bdaddr, bdaddr);
5128 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5130 ev->flags = __cpu_to_le32(flags);
5132 if (name_len > 0)
5133 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5134 name, name_len);
5136 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5137 eir_len = eir_append_data(ev->eir, eir_len,
5138 EIR_CLASS_OF_DEV, dev_class, 3);
5140 ev->eir_len = cpu_to_le16(eir_len);
5142 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5143 sizeof(*ev) + eir_len, NULL);
5146 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5148 struct mgmt_cp_disconnect *cp = cmd->param;
5149 struct sock **sk = data;
5150 struct mgmt_rp_disconnect rp;
5152 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5153 rp.addr.type = cp->addr.type;
5155 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5156 sizeof(rp));
5158 *sk = cmd->sk;
5159 sock_hold(*sk);
5161 mgmt_pending_remove(cmd);
5164 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5166 struct hci_dev *hdev = data;
5167 struct mgmt_cp_unpair_device *cp = cmd->param;
5168 struct mgmt_rp_unpair_device rp;
5170 memset(&rp, 0, sizeof(rp));
5171 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5172 rp.addr.type = cp->addr.type;
5174 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5176 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5178 mgmt_pending_remove(cmd);
5181 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5182 u8 link_type, u8 addr_type, u8 reason,
5183 bool mgmt_connected)
5185 struct mgmt_ev_device_disconnected ev;
5186 struct pending_cmd *power_off;
5187 struct sock *sk = NULL;
5189 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5190 if (power_off) {
5191 struct mgmt_mode *cp = power_off->param;
5193 /* The connection is still in hci_conn_hash so test for 1
5194 * instead of 0 to know if this is the last one.
5196 if (!cp->val && hci_conn_count(hdev) == 1) {
5197 cancel_delayed_work(&hdev->power_off);
5198 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5202 if (!mgmt_connected)
5203 return;
5205 if (link_type != ACL_LINK && link_type != LE_LINK)
5206 return;
5208 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5210 bacpy(&ev.addr.bdaddr, bdaddr);
5211 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5212 ev.reason = reason;
5214 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5216 if (sk)
5217 sock_put(sk);
5219 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5220 hdev);
5223 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5224 u8 link_type, u8 addr_type, u8 status)
5226 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5227 struct mgmt_cp_disconnect *cp;
5228 struct mgmt_rp_disconnect rp;
5229 struct pending_cmd *cmd;
5231 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5232 hdev);
5234 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5235 if (!cmd)
5236 return;
5238 cp = cmd->param;
5240 if (bacmp(bdaddr, &cp->addr.bdaddr))
5241 return;
5243 if (cp->addr.type != bdaddr_type)
5244 return;
5246 bacpy(&rp.addr.bdaddr, bdaddr);
5247 rp.addr.type = bdaddr_type;
5249 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5250 mgmt_status(status), &rp, sizeof(rp));
5252 mgmt_pending_remove(cmd);
5255 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5256 u8 addr_type, u8 status)
5258 struct mgmt_ev_connect_failed ev;
5259 struct pending_cmd *power_off;
5261 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5262 if (power_off) {
5263 struct mgmt_mode *cp = power_off->param;
5265 /* The connection is still in hci_conn_hash so test for 1
5266 * instead of 0 to know if this is the last one.
5268 if (!cp->val && hci_conn_count(hdev) == 1) {
5269 cancel_delayed_work(&hdev->power_off);
5270 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5274 bacpy(&ev.addr.bdaddr, bdaddr);
5275 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5276 ev.status = mgmt_status(status);
5278 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5281 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5283 struct mgmt_ev_pin_code_request ev;
5285 bacpy(&ev.addr.bdaddr, bdaddr);
5286 ev.addr.type = BDADDR_BREDR;
5287 ev.secure = secure;
5289 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5292 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5293 u8 status)
5295 struct pending_cmd *cmd;
5296 struct mgmt_rp_pin_code_reply rp;
5298 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5299 if (!cmd)
5300 return;
5302 bacpy(&rp.addr.bdaddr, bdaddr);
5303 rp.addr.type = BDADDR_BREDR;
5305 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5306 mgmt_status(status), &rp, sizeof(rp));
5308 mgmt_pending_remove(cmd);
5311 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5312 u8 status)
5314 struct pending_cmd *cmd;
5315 struct mgmt_rp_pin_code_reply rp;
5317 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5318 if (!cmd)
5319 return;
5321 bacpy(&rp.addr.bdaddr, bdaddr);
5322 rp.addr.type = BDADDR_BREDR;
5324 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5325 mgmt_status(status), &rp, sizeof(rp));
5327 mgmt_pending_remove(cmd);
5330 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5331 u8 link_type, u8 addr_type, u32 value,
5332 u8 confirm_hint)
5334 struct mgmt_ev_user_confirm_request ev;
5336 BT_DBG("%s", hdev->name);
5338 bacpy(&ev.addr.bdaddr, bdaddr);
5339 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5340 ev.confirm_hint = confirm_hint;
5341 ev.value = cpu_to_le32(value);
5343 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5344 NULL);
5347 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5348 u8 link_type, u8 addr_type)
5350 struct mgmt_ev_user_passkey_request ev;
5352 BT_DBG("%s", hdev->name);
5354 bacpy(&ev.addr.bdaddr, bdaddr);
5355 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5357 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5358 NULL);
5361 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5362 u8 link_type, u8 addr_type, u8 status,
5363 u8 opcode)
5365 struct pending_cmd *cmd;
5366 struct mgmt_rp_user_confirm_reply rp;
5367 int err;
5369 cmd = mgmt_pending_find(opcode, hdev);
5370 if (!cmd)
5371 return -ENOENT;
5373 bacpy(&rp.addr.bdaddr, bdaddr);
5374 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5375 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5376 &rp, sizeof(rp));
5378 mgmt_pending_remove(cmd);
5380 return err;
5383 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5384 u8 link_type, u8 addr_type, u8 status)
5386 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5387 status, MGMT_OP_USER_CONFIRM_REPLY);
5390 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5391 u8 link_type, u8 addr_type, u8 status)
5393 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5394 status,
5395 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5398 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5399 u8 link_type, u8 addr_type, u8 status)
5401 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5402 status, MGMT_OP_USER_PASSKEY_REPLY);
5405 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5406 u8 link_type, u8 addr_type, u8 status)
5408 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5409 status,
5410 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5413 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5414 u8 link_type, u8 addr_type, u32 passkey,
5415 u8 entered)
5417 struct mgmt_ev_passkey_notify ev;
5419 BT_DBG("%s", hdev->name);
5421 bacpy(&ev.addr.bdaddr, bdaddr);
5422 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5423 ev.passkey = __cpu_to_le32(passkey);
5424 ev.entered = entered;
5426 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5429 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5430 u8 addr_type, u8 status)
5432 struct mgmt_ev_auth_failed ev;
5434 bacpy(&ev.addr.bdaddr, bdaddr);
5435 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5436 ev.status = mgmt_status(status);
5438 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5441 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5443 struct cmd_lookup match = { NULL, hdev };
5444 bool changed;
5446 if (status) {
5447 u8 mgmt_err = mgmt_status(status);
5448 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5449 cmd_status_rsp, &mgmt_err);
5450 return;
5453 if (test_bit(HCI_AUTH, &hdev->flags))
5454 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5455 &hdev->dev_flags);
5456 else
5457 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5458 &hdev->dev_flags);
5460 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5461 &match);
5463 if (changed)
5464 new_settings(hdev, match.sk);
5466 if (match.sk)
5467 sock_put(match.sk);
5470 static void clear_eir(struct hci_request *req)
5472 struct hci_dev *hdev = req->hdev;
5473 struct hci_cp_write_eir cp;
5475 if (!lmp_ext_inq_capable(hdev))
5476 return;
5478 memset(hdev->eir, 0, sizeof(hdev->eir));
5480 memset(&cp, 0, sizeof(cp));
5482 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5485 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5487 struct cmd_lookup match = { NULL, hdev };
5488 struct hci_request req;
5489 bool changed = false;
5491 if (status) {
5492 u8 mgmt_err = mgmt_status(status);
5494 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5495 &hdev->dev_flags)) {
5496 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5497 new_settings(hdev, NULL);
5500 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5501 &mgmt_err);
5502 return;
5505 if (enable) {
5506 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5507 } else {
5508 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5509 if (!changed)
5510 changed = test_and_clear_bit(HCI_HS_ENABLED,
5511 &hdev->dev_flags);
5512 else
5513 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5516 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5518 if (changed)
5519 new_settings(hdev, match.sk);
5521 if (match.sk)
5522 sock_put(match.sk);
5524 hci_req_init(&req, hdev);
5526 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5527 update_eir(&req);
5528 else
5529 clear_eir(&req);
5531 hci_req_run(&req, NULL);
5534 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5536 struct cmd_lookup match = { NULL, hdev };
5537 bool changed = false;
5539 if (status) {
5540 u8 mgmt_err = mgmt_status(status);
5542 if (enable) {
5543 if (test_and_clear_bit(HCI_SC_ENABLED,
5544 &hdev->dev_flags))
5545 new_settings(hdev, NULL);
5546 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5549 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5550 cmd_status_rsp, &mgmt_err);
5551 return;
5554 if (enable) {
5555 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5556 } else {
5557 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5558 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5561 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5562 settings_rsp, &match);
5564 if (changed)
5565 new_settings(hdev, match.sk);
5567 if (match.sk)
5568 sock_put(match.sk);
5571 static void sk_lookup(struct pending_cmd *cmd, void *data)
5573 struct cmd_lookup *match = data;
5575 if (match->sk == NULL) {
5576 match->sk = cmd->sk;
5577 sock_hold(match->sk);
5581 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5582 u8 status)
5584 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5586 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5587 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5588 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5590 if (!status)
5591 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5592 NULL);
5594 if (match.sk)
5595 sock_put(match.sk);
5598 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5600 struct mgmt_cp_set_local_name ev;
5601 struct pending_cmd *cmd;
5603 if (status)
5604 return;
5606 memset(&ev, 0, sizeof(ev));
5607 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5608 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5610 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5611 if (!cmd) {
5612 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5614 /* If this is a HCI command related to powering on the
5615 * HCI dev don't send any mgmt signals.
5617 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5618 return;
5621 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5622 cmd ? cmd->sk : NULL);
5625 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5626 u8 *randomizer192, u8 *hash256,
5627 u8 *randomizer256, u8 status)
5629 struct pending_cmd *cmd;
5631 BT_DBG("%s status %u", hdev->name, status);
5633 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5634 if (!cmd)
5635 return;
5637 if (status) {
5638 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5639 mgmt_status(status));
5640 } else {
5641 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5642 hash256 && randomizer256) {
5643 struct mgmt_rp_read_local_oob_ext_data rp;
5645 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5646 memcpy(rp.randomizer192, randomizer192,
5647 sizeof(rp.randomizer192));
5649 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5650 memcpy(rp.randomizer256, randomizer256,
5651 sizeof(rp.randomizer256));
5653 cmd_complete(cmd->sk, hdev->id,
5654 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5655 &rp, sizeof(rp));
5656 } else {
5657 struct mgmt_rp_read_local_oob_data rp;
5659 memcpy(rp.hash, hash192, sizeof(rp.hash));
5660 memcpy(rp.randomizer, randomizer192,
5661 sizeof(rp.randomizer));
5663 cmd_complete(cmd->sk, hdev->id,
5664 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5665 &rp, sizeof(rp));
5669 mgmt_pending_remove(cmd);
5672 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5673 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
5674 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
5675 u8 scan_rsp_len)
5677 char buf[512];
5678 struct mgmt_ev_device_found *ev = (void *) buf;
5679 struct smp_irk *irk;
5680 size_t ev_size;
5682 if (!hci_discovery_active(hdev))
5683 return;
5685 /* Make sure that the buffer is big enough. The 5 extra bytes
5686 * are for the potential CoD field.
5688 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
5689 return;
5691 memset(buf, 0, sizeof(buf));
5693 irk = hci_get_irk(hdev, bdaddr, addr_type);
5694 if (irk) {
5695 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5696 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5697 } else {
5698 bacpy(&ev->addr.bdaddr, bdaddr);
5699 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5702 ev->rssi = rssi;
5703 if (cfm_name)
5704 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5705 if (!ssp)
5706 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5708 if (eir_len > 0)
5709 memcpy(ev->eir, eir, eir_len);
5711 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5712 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5713 dev_class, 3);
5715 if (scan_rsp_len > 0)
5716 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
5718 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
5719 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
5721 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5724 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5725 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5727 struct mgmt_ev_device_found *ev;
5728 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5729 u16 eir_len;
5731 ev = (struct mgmt_ev_device_found *) buf;
5733 memset(buf, 0, sizeof(buf));
5735 bacpy(&ev->addr.bdaddr, bdaddr);
5736 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5737 ev->rssi = rssi;
5739 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5740 name_len);
5742 ev->eir_len = cpu_to_le16(eir_len);
5744 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5747 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5749 struct mgmt_ev_discovering ev;
5750 struct pending_cmd *cmd;
5752 BT_DBG("%s discovering %u", hdev->name, discovering);
5754 if (discovering)
5755 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5756 else
5757 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5759 if (cmd != NULL) {
5760 u8 type = hdev->discovery.type;
5762 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5763 sizeof(type));
5764 mgmt_pending_remove(cmd);
5767 memset(&ev, 0, sizeof(ev));
5768 ev.type = hdev->discovery.type;
5769 ev.discovering = discovering;
5771 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5774 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5776 struct pending_cmd *cmd;
5777 struct mgmt_ev_device_blocked ev;
5779 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5781 bacpy(&ev.addr.bdaddr, bdaddr);
5782 ev.addr.type = type;
5784 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5785 cmd ? cmd->sk : NULL);
5788 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5790 struct pending_cmd *cmd;
5791 struct mgmt_ev_device_unblocked ev;
5793 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5795 bacpy(&ev.addr.bdaddr, bdaddr);
5796 ev.addr.type = type;
5798 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5799 cmd ? cmd->sk : NULL);
5802 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5804 BT_DBG("%s status %u", hdev->name, status);
5806 /* Clear the advertising mgmt setting if we failed to re-enable it */
5807 if (status) {
5808 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5809 new_settings(hdev, NULL);
5813 void mgmt_reenable_advertising(struct hci_dev *hdev)
5815 struct hci_request req;
5817 if (hci_conn_num(hdev, LE_LINK) > 0)
5818 return;
5820 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5821 return;
5823 hci_req_init(&req, hdev);
5824 enable_advertising(&req);
5826 /* If this fails we have no option but to let user space know
5827 * that we've disabled advertising.
5829 if (hci_req_run(&req, adv_enable_complete) < 0) {
5830 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5831 new_settings(hdev, NULL);