minmax: simplify min()/max()/clamp() implementation
[linux-stable.git] / net / bluetooth / mgmt.c
blob40d4887c7f791314040a292a5f363e993e6d384e
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
137 static const u16 mgmt_events[] = {
138 MGMT_EV_CONTROLLER_ERROR,
139 MGMT_EV_INDEX_ADDED,
140 MGMT_EV_INDEX_REMOVED,
141 MGMT_EV_NEW_SETTINGS,
142 MGMT_EV_CLASS_OF_DEV_CHANGED,
143 MGMT_EV_LOCAL_NAME_CHANGED,
144 MGMT_EV_NEW_LINK_KEY,
145 MGMT_EV_NEW_LONG_TERM_KEY,
146 MGMT_EV_DEVICE_CONNECTED,
147 MGMT_EV_DEVICE_DISCONNECTED,
148 MGMT_EV_CONNECT_FAILED,
149 MGMT_EV_PIN_CODE_REQUEST,
150 MGMT_EV_USER_CONFIRM_REQUEST,
151 MGMT_EV_USER_PASSKEY_REQUEST,
152 MGMT_EV_AUTH_FAILED,
153 MGMT_EV_DEVICE_FOUND,
154 MGMT_EV_DISCOVERING,
155 MGMT_EV_DEVICE_BLOCKED,
156 MGMT_EV_DEVICE_UNBLOCKED,
157 MGMT_EV_DEVICE_UNPAIRED,
158 MGMT_EV_PASSKEY_NOTIFY,
159 MGMT_EV_NEW_IRK,
160 MGMT_EV_NEW_CSRK,
161 MGMT_EV_DEVICE_ADDED,
162 MGMT_EV_DEVICE_REMOVED,
163 MGMT_EV_NEW_CONN_PARAM,
164 MGMT_EV_UNCONF_INDEX_ADDED,
165 MGMT_EV_UNCONF_INDEX_REMOVED,
166 MGMT_EV_NEW_CONFIG_OPTIONS,
167 MGMT_EV_EXT_INDEX_ADDED,
168 MGMT_EV_EXT_INDEX_REMOVED,
169 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
170 MGMT_EV_ADVERTISING_ADDED,
171 MGMT_EV_ADVERTISING_REMOVED,
172 MGMT_EV_EXT_INFO_CHANGED,
173 MGMT_EV_PHY_CONFIGURATION_CHANGED,
174 MGMT_EV_EXP_FEATURE_CHANGED,
175 MGMT_EV_DEVICE_FLAGS_CHANGED,
176 MGMT_EV_ADV_MONITOR_ADDED,
177 MGMT_EV_ADV_MONITOR_REMOVED,
178 MGMT_EV_CONTROLLER_SUSPEND,
179 MGMT_EV_CONTROLLER_RESUME,
180 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
181 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
184 static const u16 mgmt_untrusted_commands[] = {
185 MGMT_OP_READ_INDEX_LIST,
186 MGMT_OP_READ_INFO,
187 MGMT_OP_READ_UNCONF_INDEX_LIST,
188 MGMT_OP_READ_CONFIG_INFO,
189 MGMT_OP_READ_EXT_INDEX_LIST,
190 MGMT_OP_READ_EXT_INFO,
191 MGMT_OP_READ_CONTROLLER_CAP,
192 MGMT_OP_READ_EXP_FEATURES_INFO,
193 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
194 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
197 static const u16 mgmt_untrusted_events[] = {
198 MGMT_EV_INDEX_ADDED,
199 MGMT_EV_INDEX_REMOVED,
200 MGMT_EV_NEW_SETTINGS,
201 MGMT_EV_CLASS_OF_DEV_CHANGED,
202 MGMT_EV_LOCAL_NAME_CHANGED,
203 MGMT_EV_UNCONF_INDEX_ADDED,
204 MGMT_EV_UNCONF_INDEX_REMOVED,
205 MGMT_EV_NEW_CONFIG_OPTIONS,
206 MGMT_EV_EXT_INDEX_ADDED,
207 MGMT_EV_EXT_INDEX_REMOVED,
208 MGMT_EV_EXT_INFO_CHANGED,
209 MGMT_EV_EXP_FEATURE_CHANGED,
212 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
214 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
215 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 /* HCI to MGMT error code conversion table */
218 static const u8 mgmt_status_table[] = {
219 MGMT_STATUS_SUCCESS,
220 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
221 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
222 MGMT_STATUS_FAILED, /* Hardware Failure */
223 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
224 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
225 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
226 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
227 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
228 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
230 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
231 MGMT_STATUS_BUSY, /* Command Disallowed */
232 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
233 MGMT_STATUS_REJECTED, /* Rejected Security */
234 MGMT_STATUS_REJECTED, /* Rejected Personal */
235 MGMT_STATUS_TIMEOUT, /* Host Timeout */
236 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
237 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
238 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
239 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
240 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
241 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
242 MGMT_STATUS_BUSY, /* Repeated Attempts */
243 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
244 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
245 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
246 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
247 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
248 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
249 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
250 MGMT_STATUS_FAILED, /* Unspecified Error */
251 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
252 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
253 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
254 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
255 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
256 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
257 MGMT_STATUS_FAILED, /* Unit Link Key Used */
258 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
259 MGMT_STATUS_TIMEOUT, /* Instant Passed */
260 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
261 MGMT_STATUS_FAILED, /* Transaction Collision */
262 MGMT_STATUS_FAILED, /* Reserved for future use */
263 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
264 MGMT_STATUS_REJECTED, /* QoS Rejected */
265 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
266 MGMT_STATUS_REJECTED, /* Insufficient Security */
267 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
268 MGMT_STATUS_FAILED, /* Reserved for future use */
269 MGMT_STATUS_BUSY, /* Role Switch Pending */
270 MGMT_STATUS_FAILED, /* Reserved for future use */
271 MGMT_STATUS_FAILED, /* Slot Violation */
272 MGMT_STATUS_FAILED, /* Role Switch Failed */
273 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
274 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
275 MGMT_STATUS_BUSY, /* Host Busy Pairing */
276 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
277 MGMT_STATUS_BUSY, /* Controller Busy */
278 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
279 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
280 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
281 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
282 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
285 static u8 mgmt_errno_status(int err)
287 switch (err) {
288 case 0:
289 return MGMT_STATUS_SUCCESS;
290 case -EPERM:
291 return MGMT_STATUS_REJECTED;
292 case -EINVAL:
293 return MGMT_STATUS_INVALID_PARAMS;
294 case -EOPNOTSUPP:
295 return MGMT_STATUS_NOT_SUPPORTED;
296 case -EBUSY:
297 return MGMT_STATUS_BUSY;
298 case -ETIMEDOUT:
299 return MGMT_STATUS_AUTH_FAILED;
300 case -ENOMEM:
301 return MGMT_STATUS_NO_RESOURCES;
302 case -EISCONN:
303 return MGMT_STATUS_ALREADY_CONNECTED;
304 case -ENOTCONN:
305 return MGMT_STATUS_DISCONNECTED;
308 return MGMT_STATUS_FAILED;
311 static u8 mgmt_status(int err)
313 if (err < 0)
314 return mgmt_errno_status(err);
316 if (err < ARRAY_SIZE(mgmt_status_table))
317 return mgmt_status_table[err];
319 return MGMT_STATUS_FAILED;
322 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
323 u16 len, int flag)
325 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
326 flag, NULL);
329 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
330 u16 len, int flag, struct sock *skip_sk)
332 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
333 flag, skip_sk);
336 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
337 struct sock *skip_sk)
339 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
340 HCI_SOCK_TRUSTED, skip_sk);
343 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
346 skip_sk);
349 static u8 le_addr_type(u8 mgmt_addr_type)
351 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
352 return ADDR_LE_DEV_PUBLIC;
353 else
354 return ADDR_LE_DEV_RANDOM;
357 void mgmt_fill_version_info(void *ver)
359 struct mgmt_rp_read_version *rp = ver;
361 rp->version = MGMT_VERSION;
362 rp->revision = cpu_to_le16(MGMT_REVISION);
365 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
366 u16 data_len)
368 struct mgmt_rp_read_version rp;
370 bt_dev_dbg(hdev, "sock %p", sk);
372 mgmt_fill_version_info(&rp);
374 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
375 &rp, sizeof(rp));
378 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
379 u16 data_len)
381 struct mgmt_rp_read_commands *rp;
382 u16 num_commands, num_events;
383 size_t rp_size;
384 int i, err;
386 bt_dev_dbg(hdev, "sock %p", sk);
388 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
389 num_commands = ARRAY_SIZE(mgmt_commands);
390 num_events = ARRAY_SIZE(mgmt_events);
391 } else {
392 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
393 num_events = ARRAY_SIZE(mgmt_untrusted_events);
396 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 rp = kmalloc(rp_size, GFP_KERNEL);
399 if (!rp)
400 return -ENOMEM;
402 rp->num_commands = cpu_to_le16(num_commands);
403 rp->num_events = cpu_to_le16(num_events);
405 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
406 __le16 *opcode = rp->opcodes;
408 for (i = 0; i < num_commands; i++, opcode++)
409 put_unaligned_le16(mgmt_commands[i], opcode);
411 for (i = 0; i < num_events; i++, opcode++)
412 put_unaligned_le16(mgmt_events[i], opcode);
413 } else {
414 __le16 *opcode = rp->opcodes;
416 for (i = 0; i < num_commands; i++, opcode++)
417 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 for (i = 0; i < num_events; i++, opcode++)
420 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
423 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
424 rp, rp_size);
425 kfree(rp);
427 return err;
430 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
431 u16 data_len)
433 struct mgmt_rp_read_index_list *rp;
434 struct hci_dev *d;
435 size_t rp_len;
436 u16 count;
437 int err;
439 bt_dev_dbg(hdev, "sock %p", sk);
441 read_lock(&hci_dev_list_lock);
443 count = 0;
444 list_for_each_entry(d, &hci_dev_list, list) {
445 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
446 count++;
449 rp_len = sizeof(*rp) + (2 * count);
450 rp = kmalloc(rp_len, GFP_ATOMIC);
451 if (!rp) {
452 read_unlock(&hci_dev_list_lock);
453 return -ENOMEM;
456 count = 0;
457 list_for_each_entry(d, &hci_dev_list, list) {
458 if (hci_dev_test_flag(d, HCI_SETUP) ||
459 hci_dev_test_flag(d, HCI_CONFIG) ||
460 hci_dev_test_flag(d, HCI_USER_CHANNEL))
461 continue;
463 /* Devices marked as raw-only are neither configured
464 * nor unconfigured controllers.
466 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
467 continue;
469 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
470 rp->index[count++] = cpu_to_le16(d->id);
471 bt_dev_dbg(hdev, "Added hci%u", d->id);
475 rp->num_controllers = cpu_to_le16(count);
476 rp_len = sizeof(*rp) + (2 * count);
478 read_unlock(&hci_dev_list_lock);
480 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
481 0, rp, rp_len);
483 kfree(rp);
485 return err;
488 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
489 void *data, u16 data_len)
491 struct mgmt_rp_read_unconf_index_list *rp;
492 struct hci_dev *d;
493 size_t rp_len;
494 u16 count;
495 int err;
497 bt_dev_dbg(hdev, "sock %p", sk);
499 read_lock(&hci_dev_list_lock);
501 count = 0;
502 list_for_each_entry(d, &hci_dev_list, list) {
503 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 count++;
507 rp_len = sizeof(*rp) + (2 * count);
508 rp = kmalloc(rp_len, GFP_ATOMIC);
509 if (!rp) {
510 read_unlock(&hci_dev_list_lock);
511 return -ENOMEM;
514 count = 0;
515 list_for_each_entry(d, &hci_dev_list, list) {
516 if (hci_dev_test_flag(d, HCI_SETUP) ||
517 hci_dev_test_flag(d, HCI_CONFIG) ||
518 hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 continue;
521 /* Devices marked as raw-only are neither configured
522 * nor unconfigured controllers.
524 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 continue;
527 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
528 rp->index[count++] = cpu_to_le16(d->id);
529 bt_dev_dbg(hdev, "Added hci%u", d->id);
533 rp->num_controllers = cpu_to_le16(count);
534 rp_len = sizeof(*rp) + (2 * count);
536 read_unlock(&hci_dev_list_lock);
538 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
539 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 kfree(rp);
543 return err;
546 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
547 void *data, u16 data_len)
549 struct mgmt_rp_read_ext_index_list *rp;
550 struct hci_dev *d;
551 u16 count;
552 int err;
554 bt_dev_dbg(hdev, "sock %p", sk);
556 read_lock(&hci_dev_list_lock);
558 count = 0;
559 list_for_each_entry(d, &hci_dev_list, list)
560 count++;
562 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
563 if (!rp) {
564 read_unlock(&hci_dev_list_lock);
565 return -ENOMEM;
568 count = 0;
569 list_for_each_entry(d, &hci_dev_list, list) {
570 if (hci_dev_test_flag(d, HCI_SETUP) ||
571 hci_dev_test_flag(d, HCI_CONFIG) ||
572 hci_dev_test_flag(d, HCI_USER_CHANNEL))
573 continue;
575 /* Devices marked as raw-only are neither configured
576 * nor unconfigured controllers.
578 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
579 continue;
581 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
582 rp->entry[count].type = 0x01;
583 else
584 rp->entry[count].type = 0x00;
586 rp->entry[count].bus = d->bus;
587 rp->entry[count++].index = cpu_to_le16(d->id);
588 bt_dev_dbg(hdev, "Added hci%u", d->id);
591 rp->num_controllers = cpu_to_le16(count);
593 read_unlock(&hci_dev_list_lock);
595 /* If this command is called at least once, then all the
596 * default index and unconfigured index events are disabled
597 * and from now on only extended index events are used.
599 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
600 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
604 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
605 struct_size(rp, entry, count));
607 kfree(rp);
609 return err;
612 static bool is_configured(struct hci_dev *hdev)
614 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
615 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
616 return false;
618 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
619 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
620 !bacmp(&hdev->public_addr, BDADDR_ANY))
621 return false;
623 return true;
626 static __le32 get_missing_options(struct hci_dev *hdev)
628 u32 options = 0;
630 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
631 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
632 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
635 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
636 !bacmp(&hdev->public_addr, BDADDR_ANY))
637 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 return cpu_to_le32(options);
642 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 __le32 options = get_missing_options(hdev);
646 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
647 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
650 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 __le32 options = get_missing_options(hdev);
654 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
655 sizeof(options));
658 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
659 void *data, u16 data_len)
661 struct mgmt_rp_read_config_info rp;
662 u32 options = 0;
664 bt_dev_dbg(hdev, "sock %p", sk);
666 hci_dev_lock(hdev);
668 memset(&rp, 0, sizeof(rp));
669 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
672 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 if (hdev->set_bdaddr)
675 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 rp.supported_options = cpu_to_le32(options);
678 rp.missing_options = get_missing_options(hdev);
680 hci_dev_unlock(hdev);
682 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
683 &rp, sizeof(rp));
686 static u32 get_supported_phys(struct hci_dev *hdev)
688 u32 supported_phys = 0;
690 if (lmp_bredr_capable(hdev)) {
691 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 if (hdev->features[0][0] & LMP_3SLOT)
694 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 if (hdev->features[0][0] & LMP_5SLOT)
697 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 if (lmp_edr_2m_capable(hdev)) {
700 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 if (lmp_edr_3slot_capable(hdev))
703 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 if (lmp_edr_5slot_capable(hdev))
706 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 if (lmp_edr_3m_capable(hdev)) {
709 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 if (lmp_edr_3slot_capable(hdev))
712 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 if (lmp_edr_5slot_capable(hdev))
715 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
720 if (lmp_le_capable(hdev)) {
721 supported_phys |= MGMT_PHY_LE_1M_TX;
722 supported_phys |= MGMT_PHY_LE_1M_RX;
724 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
725 supported_phys |= MGMT_PHY_LE_2M_TX;
726 supported_phys |= MGMT_PHY_LE_2M_RX;
729 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
730 supported_phys |= MGMT_PHY_LE_CODED_TX;
731 supported_phys |= MGMT_PHY_LE_CODED_RX;
735 return supported_phys;
738 static u32 get_selected_phys(struct hci_dev *hdev)
740 u32 selected_phys = 0;
742 if (lmp_bredr_capable(hdev)) {
743 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
746 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
749 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 if (lmp_edr_2m_capable(hdev)) {
752 if (!(hdev->pkt_type & HCI_2DH1))
753 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 if (lmp_edr_3slot_capable(hdev) &&
756 !(hdev->pkt_type & HCI_2DH3))
757 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 if (lmp_edr_5slot_capable(hdev) &&
760 !(hdev->pkt_type & HCI_2DH5))
761 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 if (lmp_edr_3m_capable(hdev)) {
764 if (!(hdev->pkt_type & HCI_3DH1))
765 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 if (lmp_edr_3slot_capable(hdev) &&
768 !(hdev->pkt_type & HCI_3DH3))
769 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 if (lmp_edr_5slot_capable(hdev) &&
772 !(hdev->pkt_type & HCI_3DH5))
773 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
778 if (lmp_le_capable(hdev)) {
779 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
780 selected_phys |= MGMT_PHY_LE_1M_TX;
782 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
783 selected_phys |= MGMT_PHY_LE_1M_RX;
785 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
786 selected_phys |= MGMT_PHY_LE_2M_TX;
788 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
789 selected_phys |= MGMT_PHY_LE_2M_RX;
791 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
792 selected_phys |= MGMT_PHY_LE_CODED_TX;
794 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
795 selected_phys |= MGMT_PHY_LE_CODED_RX;
798 return selected_phys;
801 static u32 get_configurable_phys(struct hci_dev *hdev)
803 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
804 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
807 static u32 get_supported_settings(struct hci_dev *hdev)
809 u32 settings = 0;
811 settings |= MGMT_SETTING_POWERED;
812 settings |= MGMT_SETTING_BONDABLE;
813 settings |= MGMT_SETTING_DEBUG_KEYS;
814 settings |= MGMT_SETTING_CONNECTABLE;
815 settings |= MGMT_SETTING_DISCOVERABLE;
817 if (lmp_bredr_capable(hdev)) {
818 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
819 settings |= MGMT_SETTING_FAST_CONNECTABLE;
820 settings |= MGMT_SETTING_BREDR;
821 settings |= MGMT_SETTING_LINK_SECURITY;
823 if (lmp_ssp_capable(hdev)) {
824 settings |= MGMT_SETTING_SSP;
827 if (lmp_sc_capable(hdev))
828 settings |= MGMT_SETTING_SECURE_CONN;
830 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
831 &hdev->quirks))
832 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
835 if (lmp_le_capable(hdev)) {
836 settings |= MGMT_SETTING_LE;
837 settings |= MGMT_SETTING_SECURE_CONN;
838 settings |= MGMT_SETTING_PRIVACY;
839 settings |= MGMT_SETTING_STATIC_ADDRESS;
840 settings |= MGMT_SETTING_ADVERTISING;
843 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
844 hdev->set_bdaddr)
845 settings |= MGMT_SETTING_CONFIGURATION;
847 if (cis_central_capable(hdev))
848 settings |= MGMT_SETTING_CIS_CENTRAL;
850 if (cis_peripheral_capable(hdev))
851 settings |= MGMT_SETTING_CIS_PERIPHERAL;
853 settings |= MGMT_SETTING_PHY_CONFIGURATION;
855 return settings;
858 static u32 get_current_settings(struct hci_dev *hdev)
860 u32 settings = 0;
862 if (hdev_is_powered(hdev))
863 settings |= MGMT_SETTING_POWERED;
865 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
866 settings |= MGMT_SETTING_CONNECTABLE;
868 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
869 settings |= MGMT_SETTING_FAST_CONNECTABLE;
871 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
872 settings |= MGMT_SETTING_DISCOVERABLE;
874 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
875 settings |= MGMT_SETTING_BONDABLE;
877 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
878 settings |= MGMT_SETTING_BREDR;
880 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
881 settings |= MGMT_SETTING_LE;
883 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
884 settings |= MGMT_SETTING_LINK_SECURITY;
886 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
887 settings |= MGMT_SETTING_SSP;
889 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
890 settings |= MGMT_SETTING_ADVERTISING;
892 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
893 settings |= MGMT_SETTING_SECURE_CONN;
895 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
896 settings |= MGMT_SETTING_DEBUG_KEYS;
898 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
899 settings |= MGMT_SETTING_PRIVACY;
901 /* The current setting for static address has two purposes. The
902 * first is to indicate if the static address will be used and
903 * the second is to indicate if it is actually set.
905 * This means if the static address is not configured, this flag
906 * will never be set. If the address is configured, then if the
907 * address is actually used decides if the flag is set or not.
909 * For single mode LE only controllers and dual-mode controllers
910 * with BR/EDR disabled, the existence of the static address will
911 * be evaluated.
913 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
914 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
915 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
916 if (bacmp(&hdev->static_addr, BDADDR_ANY))
917 settings |= MGMT_SETTING_STATIC_ADDRESS;
920 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
921 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923 if (cis_central_capable(hdev))
924 settings |= MGMT_SETTING_CIS_CENTRAL;
926 if (cis_peripheral_capable(hdev))
927 settings |= MGMT_SETTING_CIS_PERIPHERAL;
929 if (bis_capable(hdev))
930 settings |= MGMT_SETTING_ISO_BROADCASTER;
932 if (sync_recv_capable(hdev))
933 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935 return settings;
938 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
943 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 struct mgmt_pending_cmd *cmd;
947 /* If there's a pending mgmt command the flags will not yet have
948 * their final values, so check for this first.
950 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
951 if (cmd) {
952 struct mgmt_mode *cp = cmd->param;
953 if (cp->val == 0x01)
954 return LE_AD_GENERAL;
955 else if (cp->val == 0x02)
956 return LE_AD_LIMITED;
957 } else {
958 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
959 return LE_AD_LIMITED;
960 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
961 return LE_AD_GENERAL;
964 return 0;
967 bool mgmt_get_connectable(struct hci_dev *hdev)
969 struct mgmt_pending_cmd *cmd;
971 /* If there's a pending mgmt command the flag will not yet have
972 * it's final value, so check for this first.
974 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
975 if (cmd) {
976 struct mgmt_mode *cp = cmd->param;
978 return cp->val;
981 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
984 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 hci_update_eir_sync(hdev);
987 hci_update_class_sync(hdev);
989 return 0;
992 static void service_cache_off(struct work_struct *work)
994 struct hci_dev *hdev = container_of(work, struct hci_dev,
995 service_cache.work);
997 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
998 return;
1000 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1003 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 /* The generation of a new RPA and programming it into the
1006 * controller happens in the hci_req_enable_advertising()
1007 * function.
1009 if (ext_adv_capable(hdev))
1010 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1011 else
1012 return hci_enable_advertising_sync(hdev);
1015 static void rpa_expired(struct work_struct *work)
1017 struct hci_dev *hdev = container_of(work, struct hci_dev,
1018 rpa_expired.work);
1020 bt_dev_dbg(hdev, "");
1022 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1025 return;
1027 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1030 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032 static void discov_off(struct work_struct *work)
1034 struct hci_dev *hdev = container_of(work, struct hci_dev,
1035 discov_off.work);
1037 bt_dev_dbg(hdev, "");
1039 hci_dev_lock(hdev);
1041 /* When discoverable timeout triggers, then just make sure
1042 * the limited discoverable flag is cleared. Even in the case
1043 * of a timeout triggered from general discoverable, it is
1044 * safe to unconditionally clear the flag.
1046 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1047 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1048 hdev->discov_timeout = 0;
1050 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052 mgmt_new_settings(hdev);
1054 hci_dev_unlock(hdev);
1057 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059 static void mesh_send_complete(struct hci_dev *hdev,
1060 struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 u8 handle = mesh_tx->handle;
1064 if (!silent)
1065 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1066 sizeof(handle), NULL);
1068 mgmt_mesh_remove(mesh_tx);
1071 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 struct mgmt_mesh_tx *mesh_tx;
1075 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1076 hci_disable_advertising_sync(hdev);
1077 mesh_tx = mgmt_mesh_next(hdev, NULL);
1079 if (mesh_tx)
1080 mesh_send_complete(hdev, mesh_tx, false);
1082 return 0;
1085 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1086 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1087 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091 if (!mesh_tx)
1092 return;
1094 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1095 mesh_send_start_complete);
1097 if (err < 0)
1098 mesh_send_complete(hdev, mesh_tx, false);
1099 else
1100 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1103 static void mesh_send_done(struct work_struct *work)
1105 struct hci_dev *hdev = container_of(work, struct hci_dev,
1106 mesh_send_done.work);
1108 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1109 return;
1111 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1114 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 if (hci_dev_test_flag(hdev, HCI_MGMT))
1117 return;
1119 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1122 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1123 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1124 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126 /* Non-mgmt controlled devices get this bit set
1127 * implicitly so that pairing works for them, however
1128 * for mgmt we require user-space to explicitly enable
1129 * it
1131 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133 hci_dev_set_flag(hdev, HCI_MGMT);
1136 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1137 void *data, u16 data_len)
1139 struct mgmt_rp_read_info rp;
1141 bt_dev_dbg(hdev, "sock %p", sk);
1143 hci_dev_lock(hdev);
1145 memset(&rp, 0, sizeof(rp));
1147 bacpy(&rp.bdaddr, &hdev->bdaddr);
1149 rp.version = hdev->hci_ver;
1150 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1153 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155 memcpy(rp.dev_class, hdev->dev_class, 3);
1157 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1158 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160 hci_dev_unlock(hdev);
1162 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1163 sizeof(rp));
1166 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 u16 eir_len = 0;
1169 size_t name_len;
1171 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1172 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1173 hdev->dev_class, 3);
1175 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1176 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1177 hdev->appearance);
1179 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1180 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1181 hdev->dev_name, name_len);
1183 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1184 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1185 hdev->short_name, name_len);
1187 return eir_len;
1190 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1191 void *data, u16 data_len)
1193 char buf[512];
1194 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1195 u16 eir_len;
1197 bt_dev_dbg(hdev, "sock %p", sk);
1199 memset(&buf, 0, sizeof(buf));
1201 hci_dev_lock(hdev);
1203 bacpy(&rp->bdaddr, &hdev->bdaddr);
1205 rp->version = hdev->hci_ver;
1206 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1209 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1212 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1213 rp->eir_len = cpu_to_le16(eir_len);
1215 hci_dev_unlock(hdev);
1217 /* If this command is called at least once, then the events
1218 * for class of device and local name changes are disabled
1219 * and only the new extended controller information event
1220 * is used.
1222 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1223 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1224 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1227 sizeof(*rp) + eir_len);
1230 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 char buf[512];
1233 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1234 u16 eir_len;
1236 memset(buf, 0, sizeof(buf));
1238 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1239 ev->eir_len = cpu_to_le16(eir_len);
1241 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1242 sizeof(*ev) + eir_len,
1243 HCI_MGMT_EXT_INFO_EVENTS, skip);
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1251 sizeof(settings));
1254 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 struct mgmt_ev_advertising_added ev;
1258 ev.instance = instance;
1260 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1263 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1264 u8 instance)
1266 struct mgmt_ev_advertising_removed ev;
1268 ev.instance = instance;
1270 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1273 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 if (hdev->adv_instance_timeout) {
1276 hdev->adv_instance_timeout = 0;
1277 cancel_delayed_work(&hdev->adv_instance_expire);
1281 /* This function requires the caller holds hdev->lock */
1282 static void restart_le_actions(struct hci_dev *hdev)
1284 struct hci_conn_params *p;
1286 list_for_each_entry(p, &hdev->le_conn_params, list) {
1287 /* Needed for AUTO_OFF case where might not "really"
1288 * have been powered off.
1290 hci_pend_le_list_del_init(p);
1292 switch (p->auto_connect) {
1293 case HCI_AUTO_CONN_DIRECT:
1294 case HCI_AUTO_CONN_ALWAYS:
1295 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1296 break;
1297 case HCI_AUTO_CONN_REPORT:
1298 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1299 break;
1300 default:
1301 break;
1306 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1310 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1311 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1314 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 struct mgmt_pending_cmd *cmd = data;
1317 struct mgmt_mode *cp;
1319 /* Make sure cmd still outstanding. */
1320 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1321 return;
1323 cp = cmd->param;
1325 bt_dev_dbg(hdev, "err %d", err);
1327 if (!err) {
1328 if (cp->val) {
1329 hci_dev_lock(hdev);
1330 restart_le_actions(hdev);
1331 hci_update_passive_scan(hdev);
1332 hci_dev_unlock(hdev);
1335 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1337 /* Only call new_setting for power on as power off is deferred
1338 * to hdev->power_off work which does call hci_dev_do_close.
1340 if (cp->val)
1341 new_settings(hdev, cmd->sk);
1342 } else {
1343 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1344 mgmt_status(err));
1347 mgmt_pending_remove(cmd);
1350 static int set_powered_sync(struct hci_dev *hdev, void *data)
1352 struct mgmt_pending_cmd *cmd = data;
1353 struct mgmt_mode *cp = cmd->param;
1355 BT_DBG("%s", hdev->name);
1357 return hci_set_powered_sync(hdev, cp->val);
1360 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1361 u16 len)
1363 struct mgmt_mode *cp = data;
1364 struct mgmt_pending_cmd *cmd;
1365 int err;
1367 bt_dev_dbg(hdev, "sock %p", sk);
1369 if (cp->val != 0x00 && cp->val != 0x01)
1370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1371 MGMT_STATUS_INVALID_PARAMS);
1373 hci_dev_lock(hdev);
1375 if (!cp->val) {
1376 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1378 MGMT_STATUS_BUSY);
1379 goto failed;
1383 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1384 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 MGMT_STATUS_BUSY);
1386 goto failed;
1389 if (!!cp->val == hdev_is_powered(hdev)) {
1390 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1391 goto failed;
1394 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1395 if (!cmd) {
1396 err = -ENOMEM;
1397 goto failed;
1400 /* Cancel potentially blocking sync operation before power off */
1401 if (cp->val == 0x00) {
1402 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1403 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1404 mgmt_set_powered_complete);
1405 } else {
1406 /* Use hci_cmd_sync_submit since hdev might not be running */
1407 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1408 mgmt_set_powered_complete);
1411 if (err < 0)
1412 mgmt_pending_remove(cmd);
1414 failed:
1415 hci_dev_unlock(hdev);
1416 return err;
1419 int mgmt_new_settings(struct hci_dev *hdev)
1421 return new_settings(hdev, NULL);
1424 struct cmd_lookup {
1425 struct sock *sk;
1426 struct hci_dev *hdev;
1427 u8 mgmt_status;
1430 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1432 struct cmd_lookup *match = data;
1434 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1436 list_del(&cmd->list);
1438 if (match->sk == NULL) {
1439 match->sk = cmd->sk;
1440 sock_hold(match->sk);
1443 mgmt_pending_free(cmd);
1446 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1448 u8 *status = data;
1450 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1451 mgmt_pending_remove(cmd);
1454 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1456 if (cmd->cmd_complete) {
1457 u8 *status = data;
1459 cmd->cmd_complete(cmd, *status);
1460 mgmt_pending_remove(cmd);
1462 return;
1465 cmd_status_rsp(cmd, data);
1468 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1470 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1471 cmd->param, cmd->param_len);
1474 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1476 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1477 cmd->param, sizeof(struct mgmt_addr_info));
1480 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1482 if (!lmp_bredr_capable(hdev))
1483 return MGMT_STATUS_NOT_SUPPORTED;
1484 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1485 return MGMT_STATUS_REJECTED;
1486 else
1487 return MGMT_STATUS_SUCCESS;
1490 static u8 mgmt_le_support(struct hci_dev *hdev)
1492 if (!lmp_le_capable(hdev))
1493 return MGMT_STATUS_NOT_SUPPORTED;
1494 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1495 return MGMT_STATUS_REJECTED;
1496 else
1497 return MGMT_STATUS_SUCCESS;
1500 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1501 int err)
1503 struct mgmt_pending_cmd *cmd = data;
1505 bt_dev_dbg(hdev, "err %d", err);
1507 /* Make sure cmd still outstanding. */
1508 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1509 return;
1511 hci_dev_lock(hdev);
1513 if (err) {
1514 u8 mgmt_err = mgmt_status(err);
1515 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1516 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1517 goto done;
1520 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1521 hdev->discov_timeout > 0) {
1522 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1523 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1526 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1527 new_settings(hdev, cmd->sk);
1529 done:
1530 mgmt_pending_remove(cmd);
1531 hci_dev_unlock(hdev);
1534 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1536 BT_DBG("%s", hdev->name);
1538 return hci_update_discoverable_sync(hdev);
1541 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1542 u16 len)
1544 struct mgmt_cp_set_discoverable *cp = data;
1545 struct mgmt_pending_cmd *cmd;
1546 u16 timeout;
1547 int err;
1549 bt_dev_dbg(hdev, "sock %p", sk);
1551 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1552 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1553 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1554 MGMT_STATUS_REJECTED);
1556 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1558 MGMT_STATUS_INVALID_PARAMS);
1560 timeout = __le16_to_cpu(cp->timeout);
1562 /* Disabling discoverable requires that no timeout is set,
1563 * and enabling limited discoverable requires a timeout.
1565 if ((cp->val == 0x00 && timeout > 0) ||
1566 (cp->val == 0x02 && timeout == 0))
1567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 MGMT_STATUS_INVALID_PARAMS);
1570 hci_dev_lock(hdev);
1572 if (!hdev_is_powered(hdev) && timeout > 0) {
1573 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 MGMT_STATUS_NOT_POWERED);
1575 goto failed;
1578 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1579 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1580 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_BUSY);
1582 goto failed;
1585 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_REJECTED);
1588 goto failed;
1591 if (hdev->advertising_paused) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 MGMT_STATUS_BUSY);
1594 goto failed;
1597 if (!hdev_is_powered(hdev)) {
1598 bool changed = false;
1600 /* Setting limited discoverable when powered off is
1601 * not a valid operation since it requires a timeout
1602 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1604 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1605 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1606 changed = true;
1609 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1610 if (err < 0)
1611 goto failed;
1613 if (changed)
1614 err = new_settings(hdev, sk);
1616 goto failed;
1619 /* If the current mode is the same, then just update the timeout
1620 * value with the new value. And if only the timeout gets updated,
1621 * then no need for any HCI transactions.
1623 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1624 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1625 HCI_LIMITED_DISCOVERABLE)) {
1626 cancel_delayed_work(&hdev->discov_off);
1627 hdev->discov_timeout = timeout;
1629 if (cp->val && hdev->discov_timeout > 0) {
1630 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1631 queue_delayed_work(hdev->req_workqueue,
1632 &hdev->discov_off, to);
1635 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1636 goto failed;
1639 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1640 if (!cmd) {
1641 err = -ENOMEM;
1642 goto failed;
1645 /* Cancel any potential discoverable timeout that might be
1646 * still active and store new timeout value. The arming of
1647 * the timeout happens in the complete handler.
1649 cancel_delayed_work(&hdev->discov_off);
1650 hdev->discov_timeout = timeout;
1652 if (cp->val)
1653 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1654 else
1655 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1657 /* Limited discoverable mode */
1658 if (cp->val == 0x02)
1659 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1660 else
1661 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1663 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1664 mgmt_set_discoverable_complete);
1666 if (err < 0)
1667 mgmt_pending_remove(cmd);
1669 failed:
1670 hci_dev_unlock(hdev);
1671 return err;
1674 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1675 int err)
1677 struct mgmt_pending_cmd *cmd = data;
1679 bt_dev_dbg(hdev, "err %d", err);
1681 /* Make sure cmd still outstanding. */
1682 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1683 return;
1685 hci_dev_lock(hdev);
1687 if (err) {
1688 u8 mgmt_err = mgmt_status(err);
1689 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1690 goto done;
1693 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1694 new_settings(hdev, cmd->sk);
1696 done:
1697 mgmt_pending_remove(cmd);
1699 hci_dev_unlock(hdev);
1702 static int set_connectable_update_settings(struct hci_dev *hdev,
1703 struct sock *sk, u8 val)
1705 bool changed = false;
1706 int err;
1708 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1709 changed = true;
1711 if (val) {
1712 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1713 } else {
1714 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1715 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1718 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1719 if (err < 0)
1720 return err;
1722 if (changed) {
1723 hci_update_scan(hdev);
1724 hci_update_passive_scan(hdev);
1725 return new_settings(hdev, sk);
1728 return 0;
1731 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1733 BT_DBG("%s", hdev->name);
1735 return hci_update_connectable_sync(hdev);
1738 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1739 u16 len)
1741 struct mgmt_mode *cp = data;
1742 struct mgmt_pending_cmd *cmd;
1743 int err;
1745 bt_dev_dbg(hdev, "sock %p", sk);
1747 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1748 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1750 MGMT_STATUS_REJECTED);
1752 if (cp->val != 0x00 && cp->val != 0x01)
1753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1754 MGMT_STATUS_INVALID_PARAMS);
1756 hci_dev_lock(hdev);
1758 if (!hdev_is_powered(hdev)) {
1759 err = set_connectable_update_settings(hdev, sk, cp->val);
1760 goto failed;
1763 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1764 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1765 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1766 MGMT_STATUS_BUSY);
1767 goto failed;
1770 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1771 if (!cmd) {
1772 err = -ENOMEM;
1773 goto failed;
1776 if (cp->val) {
1777 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1778 } else {
1779 if (hdev->discov_timeout > 0)
1780 cancel_delayed_work(&hdev->discov_off);
1782 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1783 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1784 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1787 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1788 mgmt_set_connectable_complete);
1790 if (err < 0)
1791 mgmt_pending_remove(cmd);
1793 failed:
1794 hci_dev_unlock(hdev);
1795 return err;
1798 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1799 u16 len)
1801 struct mgmt_mode *cp = data;
1802 bool changed;
1803 int err;
1805 bt_dev_dbg(hdev, "sock %p", sk);
1807 if (cp->val != 0x00 && cp->val != 0x01)
1808 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1809 MGMT_STATUS_INVALID_PARAMS);
1811 hci_dev_lock(hdev);
1813 if (cp->val)
1814 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1815 else
1816 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1818 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1819 if (err < 0)
1820 goto unlock;
1822 if (changed) {
1823 /* In limited privacy mode the change of bondable mode
1824 * may affect the local advertising address.
1826 hci_update_discoverable(hdev);
1828 err = new_settings(hdev, sk);
1831 unlock:
1832 hci_dev_unlock(hdev);
1833 return err;
1836 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1837 u16 len)
1839 struct mgmt_mode *cp = data;
1840 struct mgmt_pending_cmd *cmd;
1841 u8 val, status;
1842 int err;
1844 bt_dev_dbg(hdev, "sock %p", sk);
1846 status = mgmt_bredr_support(hdev);
1847 if (status)
1848 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1849 status);
1851 if (cp->val != 0x00 && cp->val != 0x01)
1852 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1853 MGMT_STATUS_INVALID_PARAMS);
1855 hci_dev_lock(hdev);
1857 if (!hdev_is_powered(hdev)) {
1858 bool changed = false;
1860 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1861 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1862 changed = true;
1865 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1866 if (err < 0)
1867 goto failed;
1869 if (changed)
1870 err = new_settings(hdev, sk);
1872 goto failed;
1875 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1876 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1877 MGMT_STATUS_BUSY);
1878 goto failed;
1881 val = !!cp->val;
1883 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1884 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1885 goto failed;
1888 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1889 if (!cmd) {
1890 err = -ENOMEM;
1891 goto failed;
1894 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1895 if (err < 0) {
1896 mgmt_pending_remove(cmd);
1897 goto failed;
1900 failed:
1901 hci_dev_unlock(hdev);
1902 return err;
1905 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1907 struct cmd_lookup match = { NULL, hdev };
1908 struct mgmt_pending_cmd *cmd = data;
1909 struct mgmt_mode *cp = cmd->param;
1910 u8 enable = cp->val;
1911 bool changed;
1913 /* Make sure cmd still outstanding. */
1914 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1915 return;
1917 if (err) {
1918 u8 mgmt_err = mgmt_status(err);
1920 if (enable && hci_dev_test_and_clear_flag(hdev,
1921 HCI_SSP_ENABLED)) {
1922 new_settings(hdev, NULL);
1925 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1926 &mgmt_err);
1927 return;
1930 if (enable) {
1931 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1932 } else {
1933 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1936 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1938 if (changed)
1939 new_settings(hdev, match.sk);
1941 if (match.sk)
1942 sock_put(match.sk);
1944 hci_update_eir_sync(hdev);
1947 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1949 struct mgmt_pending_cmd *cmd = data;
1950 struct mgmt_mode *cp = cmd->param;
1951 bool changed = false;
1952 int err;
1954 if (cp->val)
1955 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1957 err = hci_write_ssp_mode_sync(hdev, cp->val);
1959 if (!err && changed)
1960 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1962 return err;
1965 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1967 struct mgmt_mode *cp = data;
1968 struct mgmt_pending_cmd *cmd;
1969 u8 status;
1970 int err;
1972 bt_dev_dbg(hdev, "sock %p", sk);
1974 status = mgmt_bredr_support(hdev);
1975 if (status)
1976 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1978 if (!lmp_ssp_capable(hdev))
1979 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1980 MGMT_STATUS_NOT_SUPPORTED);
1982 if (cp->val != 0x00 && cp->val != 0x01)
1983 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1984 MGMT_STATUS_INVALID_PARAMS);
1986 hci_dev_lock(hdev);
1988 if (!hdev_is_powered(hdev)) {
1989 bool changed;
1991 if (cp->val) {
1992 changed = !hci_dev_test_and_set_flag(hdev,
1993 HCI_SSP_ENABLED);
1994 } else {
1995 changed = hci_dev_test_and_clear_flag(hdev,
1996 HCI_SSP_ENABLED);
1999 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2000 if (err < 0)
2001 goto failed;
2003 if (changed)
2004 err = new_settings(hdev, sk);
2006 goto failed;
2009 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2010 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2011 MGMT_STATUS_BUSY);
2012 goto failed;
2015 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2016 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2017 goto failed;
2020 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2021 if (!cmd)
2022 err = -ENOMEM;
2023 else
2024 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2025 set_ssp_complete);
2027 if (err < 0) {
2028 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2029 MGMT_STATUS_FAILED);
2031 if (cmd)
2032 mgmt_pending_remove(cmd);
2035 failed:
2036 hci_dev_unlock(hdev);
2037 return err;
2040 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2042 bt_dev_dbg(hdev, "sock %p", sk);
2044 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2045 MGMT_STATUS_NOT_SUPPORTED);
2048 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2050 struct cmd_lookup match = { NULL, hdev };
2051 u8 status = mgmt_status(err);
2053 bt_dev_dbg(hdev, "err %d", err);
2055 if (status) {
2056 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2057 &status);
2058 return;
2061 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2063 new_settings(hdev, match.sk);
2065 if (match.sk)
2066 sock_put(match.sk);
2069 static int set_le_sync(struct hci_dev *hdev, void *data)
2071 struct mgmt_pending_cmd *cmd = data;
2072 struct mgmt_mode *cp = cmd->param;
2073 u8 val = !!cp->val;
2074 int err;
2076 if (!val) {
2077 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2079 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2080 hci_disable_advertising_sync(hdev);
2082 if (ext_adv_capable(hdev))
2083 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2084 } else {
2085 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2088 err = hci_write_le_host_supported_sync(hdev, val, 0);
2090 /* Make sure the controller has a good default for
2091 * advertising data. Restrict the update to when LE
2092 * has actually been enabled. During power on, the
2093 * update in powered_update_hci will take care of it.
2095 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2096 if (ext_adv_capable(hdev)) {
2097 int status;
2099 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2100 if (!status)
2101 hci_update_scan_rsp_data_sync(hdev, 0x00);
2102 } else {
2103 hci_update_adv_data_sync(hdev, 0x00);
2104 hci_update_scan_rsp_data_sync(hdev, 0x00);
2107 hci_update_passive_scan(hdev);
2110 return err;
2113 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2115 struct mgmt_pending_cmd *cmd = data;
2116 u8 status = mgmt_status(err);
2117 struct sock *sk = cmd->sk;
2119 if (status) {
2120 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2121 cmd_status_rsp, &status);
2122 return;
2125 mgmt_pending_remove(cmd);
2126 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2129 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2131 struct mgmt_pending_cmd *cmd = data;
2132 struct mgmt_cp_set_mesh *cp = cmd->param;
2133 size_t len = cmd->param_len;
2135 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2137 if (cp->enable)
2138 hci_dev_set_flag(hdev, HCI_MESH);
2139 else
2140 hci_dev_clear_flag(hdev, HCI_MESH);
2142 len -= sizeof(*cp);
2144 /* If filters don't fit, forward all adv pkts */
2145 if (len <= sizeof(hdev->mesh_ad_types))
2146 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2148 hci_update_passive_scan_sync(hdev);
2149 return 0;
2152 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2154 struct mgmt_cp_set_mesh *cp = data;
2155 struct mgmt_pending_cmd *cmd;
2156 int err = 0;
2158 bt_dev_dbg(hdev, "sock %p", sk);
2160 if (!lmp_le_capable(hdev) ||
2161 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2162 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2163 MGMT_STATUS_NOT_SUPPORTED);
2165 if (cp->enable != 0x00 && cp->enable != 0x01)
2166 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2167 MGMT_STATUS_INVALID_PARAMS);
2169 hci_dev_lock(hdev);
2171 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2172 if (!cmd)
2173 err = -ENOMEM;
2174 else
2175 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2176 set_mesh_complete);
2178 if (err < 0) {
2179 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2180 MGMT_STATUS_FAILED);
2182 if (cmd)
2183 mgmt_pending_remove(cmd);
2186 hci_dev_unlock(hdev);
2187 return err;
2190 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2192 struct mgmt_mesh_tx *mesh_tx = data;
2193 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2194 unsigned long mesh_send_interval;
2195 u8 mgmt_err = mgmt_status(err);
2197 /* Report any errors here, but don't report completion */
2199 if (mgmt_err) {
2200 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2201 /* Send Complete Error Code for handle */
2202 mesh_send_complete(hdev, mesh_tx, false);
2203 return;
2206 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2207 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2208 mesh_send_interval);
2211 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2213 struct mgmt_mesh_tx *mesh_tx = data;
2214 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2215 struct adv_info *adv, *next_instance;
2216 u8 instance = hdev->le_num_of_adv_sets + 1;
2217 u16 timeout, duration;
2218 int err = 0;
2220 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2221 return MGMT_STATUS_BUSY;
2223 timeout = 1000;
2224 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2225 adv = hci_add_adv_instance(hdev, instance, 0,
2226 send->adv_data_len, send->adv_data,
2227 0, NULL,
2228 timeout, duration,
2229 HCI_ADV_TX_POWER_NO_PREFERENCE,
2230 hdev->le_adv_min_interval,
2231 hdev->le_adv_max_interval,
2232 mesh_tx->handle);
2234 if (!IS_ERR(adv))
2235 mesh_tx->instance = instance;
2236 else
2237 err = PTR_ERR(adv);
2239 if (hdev->cur_adv_instance == instance) {
2240 /* If the currently advertised instance is being changed then
2241 * cancel the current advertising and schedule the next
2242 * instance. If there is only one instance then the overridden
2243 * advertising data will be visible right away.
2245 cancel_adv_timeout(hdev);
2247 next_instance = hci_get_next_instance(hdev, instance);
2248 if (next_instance)
2249 instance = next_instance->instance;
2250 else
2251 instance = 0;
2252 } else if (hdev->adv_instance_timeout) {
2253 /* Immediately advertise the new instance if no other, or
2254 * let it go naturally from queue if ADV is already happening
2256 instance = 0;
2259 if (instance)
2260 return hci_schedule_adv_instance_sync(hdev, instance, true);
2262 return err;
2265 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2267 struct mgmt_rp_mesh_read_features *rp = data;
2269 if (rp->used_handles >= rp->max_handles)
2270 return;
2272 rp->handles[rp->used_handles++] = mesh_tx->handle;
2275 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2276 void *data, u16 len)
2278 struct mgmt_rp_mesh_read_features rp;
2280 if (!lmp_le_capable(hdev) ||
2281 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2283 MGMT_STATUS_NOT_SUPPORTED);
2285 memset(&rp, 0, sizeof(rp));
2286 rp.index = cpu_to_le16(hdev->id);
2287 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2288 rp.max_handles = MESH_HANDLES_MAX;
2290 hci_dev_lock(hdev);
2292 if (rp.max_handles)
2293 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2295 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2296 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2298 hci_dev_unlock(hdev);
2299 return 0;
2302 static int send_cancel(struct hci_dev *hdev, void *data)
2304 struct mgmt_pending_cmd *cmd = data;
2305 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2306 struct mgmt_mesh_tx *mesh_tx;
2308 if (!cancel->handle) {
2309 do {
2310 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2312 if (mesh_tx)
2313 mesh_send_complete(hdev, mesh_tx, false);
2314 } while (mesh_tx);
2315 } else {
2316 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2318 if (mesh_tx && mesh_tx->sk == cmd->sk)
2319 mesh_send_complete(hdev, mesh_tx, false);
2322 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2323 0, NULL, 0);
2324 mgmt_pending_free(cmd);
2326 return 0;
2329 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2330 void *data, u16 len)
2332 struct mgmt_pending_cmd *cmd;
2333 int err;
2335 if (!lmp_le_capable(hdev) ||
2336 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2338 MGMT_STATUS_NOT_SUPPORTED);
2340 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2341 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2342 MGMT_STATUS_REJECTED);
2344 hci_dev_lock(hdev);
2345 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2346 if (!cmd)
2347 err = -ENOMEM;
2348 else
2349 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2351 if (err < 0) {
2352 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2353 MGMT_STATUS_FAILED);
2355 if (cmd)
2356 mgmt_pending_free(cmd);
2359 hci_dev_unlock(hdev);
2360 return err;
2363 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2365 struct mgmt_mesh_tx *mesh_tx;
2366 struct mgmt_cp_mesh_send *send = data;
2367 struct mgmt_rp_mesh_read_features rp;
2368 bool sending;
2369 int err = 0;
2371 if (!lmp_le_capable(hdev) ||
2372 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2373 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2374 MGMT_STATUS_NOT_SUPPORTED);
2375 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2376 len <= MGMT_MESH_SEND_SIZE ||
2377 len > (MGMT_MESH_SEND_SIZE + 31))
2378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2379 MGMT_STATUS_REJECTED);
2381 hci_dev_lock(hdev);
2383 memset(&rp, 0, sizeof(rp));
2384 rp.max_handles = MESH_HANDLES_MAX;
2386 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2388 if (rp.max_handles <= rp.used_handles) {
2389 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2390 MGMT_STATUS_BUSY);
2391 goto done;
2394 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2395 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2397 if (!mesh_tx)
2398 err = -ENOMEM;
2399 else if (!sending)
2400 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2401 mesh_send_start_complete);
2403 if (err < 0) {
2404 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2405 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2406 MGMT_STATUS_FAILED);
2408 if (mesh_tx) {
2409 if (sending)
2410 mgmt_mesh_remove(mesh_tx);
2412 } else {
2413 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2415 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2416 &mesh_tx->handle, 1);
2419 done:
2420 hci_dev_unlock(hdev);
2421 return err;
2424 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2426 struct mgmt_mode *cp = data;
2427 struct mgmt_pending_cmd *cmd;
2428 int err;
2429 u8 val, enabled;
2431 bt_dev_dbg(hdev, "sock %p", sk);
2433 if (!lmp_le_capable(hdev))
2434 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2435 MGMT_STATUS_NOT_SUPPORTED);
2437 if (cp->val != 0x00 && cp->val != 0x01)
2438 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2439 MGMT_STATUS_INVALID_PARAMS);
2441 /* Bluetooth single mode LE only controllers or dual-mode
2442 * controllers configured as LE only devices, do not allow
2443 * switching LE off. These have either LE enabled explicitly
2444 * or BR/EDR has been previously switched off.
2446 * When trying to enable an already enabled LE, then gracefully
2447 * send a positive response. Trying to disable it however will
2448 * result into rejection.
2450 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2451 if (cp->val == 0x01)
2452 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2454 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2455 MGMT_STATUS_REJECTED);
2458 hci_dev_lock(hdev);
2460 val = !!cp->val;
2461 enabled = lmp_host_le_capable(hdev);
2463 if (!hdev_is_powered(hdev) || val == enabled) {
2464 bool changed = false;
2466 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2467 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2468 changed = true;
2471 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2472 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2473 changed = true;
2476 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2477 if (err < 0)
2478 goto unlock;
2480 if (changed)
2481 err = new_settings(hdev, sk);
2483 goto unlock;
2486 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2487 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2488 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2489 MGMT_STATUS_BUSY);
2490 goto unlock;
2493 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2494 if (!cmd)
2495 err = -ENOMEM;
2496 else
2497 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2498 set_le_complete);
2500 if (err < 0) {
2501 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2502 MGMT_STATUS_FAILED);
2504 if (cmd)
2505 mgmt_pending_remove(cmd);
2508 unlock:
2509 hci_dev_unlock(hdev);
2510 return err;
2513 /* This is a helper function to test for pending mgmt commands that can
2514 * cause CoD or EIR HCI commands. We can only allow one such pending
2515 * mgmt command at a time since otherwise we cannot easily track what
2516 * the current values are, will be, and based on that calculate if a new
2517 * HCI command needs to be sent and if yes with what value.
2519 static bool pending_eir_or_class(struct hci_dev *hdev)
2521 struct mgmt_pending_cmd *cmd;
2523 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2524 switch (cmd->opcode) {
2525 case MGMT_OP_ADD_UUID:
2526 case MGMT_OP_REMOVE_UUID:
2527 case MGMT_OP_SET_DEV_CLASS:
2528 case MGMT_OP_SET_POWERED:
2529 return true;
2533 return false;
2536 static const u8 bluetooth_base_uuid[] = {
2537 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2538 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2541 static u8 get_uuid_size(const u8 *uuid)
2543 u32 val;
2545 if (memcmp(uuid, bluetooth_base_uuid, 12))
2546 return 128;
2548 val = get_unaligned_le32(&uuid[12]);
2549 if (val > 0xffff)
2550 return 32;
2552 return 16;
2555 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2557 struct mgmt_pending_cmd *cmd = data;
2559 bt_dev_dbg(hdev, "err %d", err);
2561 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2562 mgmt_status(err), hdev->dev_class, 3);
2564 mgmt_pending_free(cmd);
2567 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2569 int err;
2571 err = hci_update_class_sync(hdev);
2572 if (err)
2573 return err;
2575 return hci_update_eir_sync(hdev);
2578 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2580 struct mgmt_cp_add_uuid *cp = data;
2581 struct mgmt_pending_cmd *cmd;
2582 struct bt_uuid *uuid;
2583 int err;
2585 bt_dev_dbg(hdev, "sock %p", sk);
2587 hci_dev_lock(hdev);
2589 if (pending_eir_or_class(hdev)) {
2590 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2591 MGMT_STATUS_BUSY);
2592 goto failed;
2595 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2596 if (!uuid) {
2597 err = -ENOMEM;
2598 goto failed;
2601 memcpy(uuid->uuid, cp->uuid, 16);
2602 uuid->svc_hint = cp->svc_hint;
2603 uuid->size = get_uuid_size(cp->uuid);
2605 list_add_tail(&uuid->list, &hdev->uuids);
2607 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2608 if (!cmd) {
2609 err = -ENOMEM;
2610 goto failed;
2613 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2614 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2616 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2617 mgmt_class_complete);
2618 if (err < 0) {
2619 mgmt_pending_free(cmd);
2620 goto failed;
2623 failed:
2624 hci_dev_unlock(hdev);
2625 return err;
2628 static bool enable_service_cache(struct hci_dev *hdev)
2630 if (!hdev_is_powered(hdev))
2631 return false;
2633 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2634 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2635 CACHE_TIMEOUT);
2636 return true;
2639 return false;
2642 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2644 int err;
2646 err = hci_update_class_sync(hdev);
2647 if (err)
2648 return err;
2650 return hci_update_eir_sync(hdev);
2653 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2654 u16 len)
2656 struct mgmt_cp_remove_uuid *cp = data;
2657 struct mgmt_pending_cmd *cmd;
2658 struct bt_uuid *match, *tmp;
2659 static const u8 bt_uuid_any[] = {
2660 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2662 int err, found;
2664 bt_dev_dbg(hdev, "sock %p", sk);
2666 hci_dev_lock(hdev);
2668 if (pending_eir_or_class(hdev)) {
2669 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2670 MGMT_STATUS_BUSY);
2671 goto unlock;
2674 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2675 hci_uuids_clear(hdev);
2677 if (enable_service_cache(hdev)) {
2678 err = mgmt_cmd_complete(sk, hdev->id,
2679 MGMT_OP_REMOVE_UUID,
2680 0, hdev->dev_class, 3);
2681 goto unlock;
2684 goto update_class;
2687 found = 0;
2689 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2690 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2691 continue;
2693 list_del(&match->list);
2694 kfree(match);
2695 found++;
2698 if (found == 0) {
2699 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2700 MGMT_STATUS_INVALID_PARAMS);
2701 goto unlock;
2704 update_class:
2705 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2706 if (!cmd) {
2707 err = -ENOMEM;
2708 goto unlock;
2711 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2712 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2714 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2715 mgmt_class_complete);
2716 if (err < 0)
2717 mgmt_pending_free(cmd);
2719 unlock:
2720 hci_dev_unlock(hdev);
2721 return err;
2724 static int set_class_sync(struct hci_dev *hdev, void *data)
2726 int err = 0;
2728 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2729 cancel_delayed_work_sync(&hdev->service_cache);
2730 err = hci_update_eir_sync(hdev);
2733 if (err)
2734 return err;
2736 return hci_update_class_sync(hdev);
2739 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2740 u16 len)
2742 struct mgmt_cp_set_dev_class *cp = data;
2743 struct mgmt_pending_cmd *cmd;
2744 int err;
2746 bt_dev_dbg(hdev, "sock %p", sk);
2748 if (!lmp_bredr_capable(hdev))
2749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2750 MGMT_STATUS_NOT_SUPPORTED);
2752 hci_dev_lock(hdev);
2754 if (pending_eir_or_class(hdev)) {
2755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2756 MGMT_STATUS_BUSY);
2757 goto unlock;
2760 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2761 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2762 MGMT_STATUS_INVALID_PARAMS);
2763 goto unlock;
2766 hdev->major_class = cp->major;
2767 hdev->minor_class = cp->minor;
2769 if (!hdev_is_powered(hdev)) {
2770 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2771 hdev->dev_class, 3);
2772 goto unlock;
2775 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2776 if (!cmd) {
2777 err = -ENOMEM;
2778 goto unlock;
2781 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2782 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2784 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2785 mgmt_class_complete);
2786 if (err < 0)
2787 mgmt_pending_free(cmd);
2789 unlock:
2790 hci_dev_unlock(hdev);
2791 return err;
2794 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2795 u16 len)
2797 struct mgmt_cp_load_link_keys *cp = data;
2798 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2799 sizeof(struct mgmt_link_key_info));
2800 u16 key_count, expected_len;
2801 bool changed;
2802 int i;
2804 bt_dev_dbg(hdev, "sock %p", sk);
2806 if (!lmp_bredr_capable(hdev))
2807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2808 MGMT_STATUS_NOT_SUPPORTED);
2810 key_count = __le16_to_cpu(cp->key_count);
2811 if (key_count > max_key_count) {
2812 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2813 key_count);
2814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2815 MGMT_STATUS_INVALID_PARAMS);
2818 expected_len = struct_size(cp, keys, key_count);
2819 if (expected_len != len) {
2820 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2821 expected_len, len);
2822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2823 MGMT_STATUS_INVALID_PARAMS);
2826 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2828 MGMT_STATUS_INVALID_PARAMS);
2830 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2831 key_count);
2833 for (i = 0; i < key_count; i++) {
2834 struct mgmt_link_key_info *key = &cp->keys[i];
2836 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2837 if (key->type > 0x08)
2838 return mgmt_cmd_status(sk, hdev->id,
2839 MGMT_OP_LOAD_LINK_KEYS,
2840 MGMT_STATUS_INVALID_PARAMS);
2843 hci_dev_lock(hdev);
2845 hci_link_keys_clear(hdev);
2847 if (cp->debug_keys)
2848 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2849 else
2850 changed = hci_dev_test_and_clear_flag(hdev,
2851 HCI_KEEP_DEBUG_KEYS);
2853 if (changed)
2854 new_settings(hdev, NULL);
2856 for (i = 0; i < key_count; i++) {
2857 struct mgmt_link_key_info *key = &cp->keys[i];
2859 if (hci_is_blocked_key(hdev,
2860 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2861 key->val)) {
2862 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2863 &key->addr.bdaddr);
2864 continue;
2867 /* Always ignore debug keys and require a new pairing if
2868 * the user wants to use them.
2870 if (key->type == HCI_LK_DEBUG_COMBINATION)
2871 continue;
2873 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2874 key->type, key->pin_len, NULL);
2877 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2879 hci_dev_unlock(hdev);
2881 return 0;
2884 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2885 u8 addr_type, struct sock *skip_sk)
2887 struct mgmt_ev_device_unpaired ev;
2889 bacpy(&ev.addr.bdaddr, bdaddr);
2890 ev.addr.type = addr_type;
2892 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2893 skip_sk);
2896 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2898 struct mgmt_pending_cmd *cmd = data;
2899 struct mgmt_cp_unpair_device *cp = cmd->param;
2901 if (!err)
2902 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2904 cmd->cmd_complete(cmd, err);
2905 mgmt_pending_free(cmd);
2908 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2910 struct mgmt_pending_cmd *cmd = data;
2911 struct mgmt_cp_unpair_device *cp = cmd->param;
2912 struct hci_conn *conn;
2914 if (cp->addr.type == BDADDR_BREDR)
2915 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2916 &cp->addr.bdaddr);
2917 else
2918 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2919 le_addr_type(cp->addr.type));
2921 if (!conn)
2922 return 0;
2924 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2927 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2928 u16 len)
2930 struct mgmt_cp_unpair_device *cp = data;
2931 struct mgmt_rp_unpair_device rp;
2932 struct hci_conn_params *params;
2933 struct mgmt_pending_cmd *cmd;
2934 struct hci_conn *conn;
2935 u8 addr_type;
2936 int err;
2938 memset(&rp, 0, sizeof(rp));
2939 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2940 rp.addr.type = cp->addr.type;
2942 if (!bdaddr_type_is_valid(cp->addr.type))
2943 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2944 MGMT_STATUS_INVALID_PARAMS,
2945 &rp, sizeof(rp));
2947 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2948 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2949 MGMT_STATUS_INVALID_PARAMS,
2950 &rp, sizeof(rp));
2952 hci_dev_lock(hdev);
2954 if (!hdev_is_powered(hdev)) {
2955 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2956 MGMT_STATUS_NOT_POWERED, &rp,
2957 sizeof(rp));
2958 goto unlock;
2961 if (cp->addr.type == BDADDR_BREDR) {
2962 /* If disconnection is requested, then look up the
2963 * connection. If the remote device is connected, it
2964 * will be later used to terminate the link.
2966 * Setting it to NULL explicitly will cause no
2967 * termination of the link.
2969 if (cp->disconnect)
2970 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2971 &cp->addr.bdaddr);
2972 else
2973 conn = NULL;
2975 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2976 if (err < 0) {
2977 err = mgmt_cmd_complete(sk, hdev->id,
2978 MGMT_OP_UNPAIR_DEVICE,
2979 MGMT_STATUS_NOT_PAIRED, &rp,
2980 sizeof(rp));
2981 goto unlock;
2984 goto done;
2987 /* LE address type */
2988 addr_type = le_addr_type(cp->addr.type);
2990 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2991 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2992 if (err < 0) {
2993 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2994 MGMT_STATUS_NOT_PAIRED, &rp,
2995 sizeof(rp));
2996 goto unlock;
2999 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3000 if (!conn) {
3001 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3002 goto done;
3006 /* Defer clearing up the connection parameters until closing to
3007 * give a chance of keeping them if a repairing happens.
3009 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3011 /* Disable auto-connection parameters if present */
3012 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3013 if (params) {
3014 if (params->explicit_connect)
3015 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3016 else
3017 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3020 /* If disconnection is not requested, then clear the connection
3021 * variable so that the link is not terminated.
3023 if (!cp->disconnect)
3024 conn = NULL;
3026 done:
3027 /* If the connection variable is set, then termination of the
3028 * link is requested.
3030 if (!conn) {
3031 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3032 &rp, sizeof(rp));
3033 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3034 goto unlock;
3037 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3038 sizeof(*cp));
3039 if (!cmd) {
3040 err = -ENOMEM;
3041 goto unlock;
3044 cmd->cmd_complete = addr_cmd_complete;
3046 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3047 unpair_device_complete);
3048 if (err < 0)
3049 mgmt_pending_free(cmd);
3051 unlock:
3052 hci_dev_unlock(hdev);
3053 return err;
3056 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3057 u16 len)
3059 struct mgmt_cp_disconnect *cp = data;
3060 struct mgmt_rp_disconnect rp;
3061 struct mgmt_pending_cmd *cmd;
3062 struct hci_conn *conn;
3063 int err;
3065 bt_dev_dbg(hdev, "sock %p", sk);
3067 memset(&rp, 0, sizeof(rp));
3068 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3069 rp.addr.type = cp->addr.type;
3071 if (!bdaddr_type_is_valid(cp->addr.type))
3072 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3073 MGMT_STATUS_INVALID_PARAMS,
3074 &rp, sizeof(rp));
3076 hci_dev_lock(hdev);
3078 if (!test_bit(HCI_UP, &hdev->flags)) {
3079 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3080 MGMT_STATUS_NOT_POWERED, &rp,
3081 sizeof(rp));
3082 goto failed;
3085 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3086 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3087 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3088 goto failed;
3091 if (cp->addr.type == BDADDR_BREDR)
3092 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3093 &cp->addr.bdaddr);
3094 else
3095 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3096 le_addr_type(cp->addr.type));
3098 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3099 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3100 MGMT_STATUS_NOT_CONNECTED, &rp,
3101 sizeof(rp));
3102 goto failed;
3105 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3106 if (!cmd) {
3107 err = -ENOMEM;
3108 goto failed;
3111 cmd->cmd_complete = generic_cmd_complete;
3113 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3114 if (err < 0)
3115 mgmt_pending_remove(cmd);
3117 failed:
3118 hci_dev_unlock(hdev);
3119 return err;
3122 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3124 switch (link_type) {
3125 case ISO_LINK:
3126 case LE_LINK:
3127 switch (addr_type) {
3128 case ADDR_LE_DEV_PUBLIC:
3129 return BDADDR_LE_PUBLIC;
3131 default:
3132 /* Fallback to LE Random address type */
3133 return BDADDR_LE_RANDOM;
3136 default:
3137 /* Fallback to BR/EDR type */
3138 return BDADDR_BREDR;
3142 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3143 u16 data_len)
3145 struct mgmt_rp_get_connections *rp;
3146 struct hci_conn *c;
3147 int err;
3148 u16 i;
3150 bt_dev_dbg(hdev, "sock %p", sk);
3152 hci_dev_lock(hdev);
3154 if (!hdev_is_powered(hdev)) {
3155 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3156 MGMT_STATUS_NOT_POWERED);
3157 goto unlock;
3160 i = 0;
3161 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3162 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3163 i++;
3166 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3167 if (!rp) {
3168 err = -ENOMEM;
3169 goto unlock;
3172 i = 0;
3173 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3174 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3175 continue;
3176 bacpy(&rp->addr[i].bdaddr, &c->dst);
3177 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3178 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3179 continue;
3180 i++;
3183 rp->conn_count = cpu_to_le16(i);
3185 /* Recalculate length in case of filtered SCO connections, etc */
3186 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3187 struct_size(rp, addr, i));
3189 kfree(rp);
3191 unlock:
3192 hci_dev_unlock(hdev);
3193 return err;
3196 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3197 struct mgmt_cp_pin_code_neg_reply *cp)
3199 struct mgmt_pending_cmd *cmd;
3200 int err;
3202 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3203 sizeof(*cp));
3204 if (!cmd)
3205 return -ENOMEM;
3207 cmd->cmd_complete = addr_cmd_complete;
3209 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3210 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3211 if (err < 0)
3212 mgmt_pending_remove(cmd);
3214 return err;
3217 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3218 u16 len)
3220 struct hci_conn *conn;
3221 struct mgmt_cp_pin_code_reply *cp = data;
3222 struct hci_cp_pin_code_reply reply;
3223 struct mgmt_pending_cmd *cmd;
3224 int err;
3226 bt_dev_dbg(hdev, "sock %p", sk);
3228 hci_dev_lock(hdev);
3230 if (!hdev_is_powered(hdev)) {
3231 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3232 MGMT_STATUS_NOT_POWERED);
3233 goto failed;
3236 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3237 if (!conn) {
3238 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3239 MGMT_STATUS_NOT_CONNECTED);
3240 goto failed;
3243 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3244 struct mgmt_cp_pin_code_neg_reply ncp;
3246 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3248 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3250 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3251 if (err >= 0)
3252 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3253 MGMT_STATUS_INVALID_PARAMS);
3255 goto failed;
3258 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3259 if (!cmd) {
3260 err = -ENOMEM;
3261 goto failed;
3264 cmd->cmd_complete = addr_cmd_complete;
3266 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3267 reply.pin_len = cp->pin_len;
3268 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3270 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3271 if (err < 0)
3272 mgmt_pending_remove(cmd);
3274 failed:
3275 hci_dev_unlock(hdev);
3276 return err;
3279 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3280 u16 len)
3282 struct mgmt_cp_set_io_capability *cp = data;
3284 bt_dev_dbg(hdev, "sock %p", sk);
3286 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3287 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3288 MGMT_STATUS_INVALID_PARAMS);
3290 hci_dev_lock(hdev);
3292 hdev->io_capability = cp->io_capability;
3294 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3296 hci_dev_unlock(hdev);
3298 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3299 NULL, 0);
3302 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3304 struct hci_dev *hdev = conn->hdev;
3305 struct mgmt_pending_cmd *cmd;
3307 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3308 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3309 continue;
3311 if (cmd->user_data != conn)
3312 continue;
3314 return cmd;
3317 return NULL;
3320 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3322 struct mgmt_rp_pair_device rp;
3323 struct hci_conn *conn = cmd->user_data;
3324 int err;
3326 bacpy(&rp.addr.bdaddr, &conn->dst);
3327 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3329 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3330 status, &rp, sizeof(rp));
3332 /* So we don't get further callbacks for this connection */
3333 conn->connect_cfm_cb = NULL;
3334 conn->security_cfm_cb = NULL;
3335 conn->disconn_cfm_cb = NULL;
3337 hci_conn_drop(conn);
3339 /* The device is paired so there is no need to remove
3340 * its connection parameters anymore.
3342 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3344 hci_conn_put(conn);
3346 return err;
3349 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3351 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3352 struct mgmt_pending_cmd *cmd;
3354 cmd = find_pairing(conn);
3355 if (cmd) {
3356 cmd->cmd_complete(cmd, status);
3357 mgmt_pending_remove(cmd);
3361 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3363 struct mgmt_pending_cmd *cmd;
3365 BT_DBG("status %u", status);
3367 cmd = find_pairing(conn);
3368 if (!cmd) {
3369 BT_DBG("Unable to find a pending command");
3370 return;
3373 cmd->cmd_complete(cmd, mgmt_status(status));
3374 mgmt_pending_remove(cmd);
3377 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3379 struct mgmt_pending_cmd *cmd;
3381 BT_DBG("status %u", status);
3383 if (!status)
3384 return;
3386 cmd = find_pairing(conn);
3387 if (!cmd) {
3388 BT_DBG("Unable to find a pending command");
3389 return;
3392 cmd->cmd_complete(cmd, mgmt_status(status));
3393 mgmt_pending_remove(cmd);
3396 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3397 u16 len)
3399 struct mgmt_cp_pair_device *cp = data;
3400 struct mgmt_rp_pair_device rp;
3401 struct mgmt_pending_cmd *cmd;
3402 u8 sec_level, auth_type;
3403 struct hci_conn *conn;
3404 int err;
3406 bt_dev_dbg(hdev, "sock %p", sk);
3408 memset(&rp, 0, sizeof(rp));
3409 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3410 rp.addr.type = cp->addr.type;
3412 if (!bdaddr_type_is_valid(cp->addr.type))
3413 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3414 MGMT_STATUS_INVALID_PARAMS,
3415 &rp, sizeof(rp));
3417 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3418 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3419 MGMT_STATUS_INVALID_PARAMS,
3420 &rp, sizeof(rp));
3422 hci_dev_lock(hdev);
3424 if (!hdev_is_powered(hdev)) {
3425 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3426 MGMT_STATUS_NOT_POWERED, &rp,
3427 sizeof(rp));
3428 goto unlock;
3431 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3432 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3433 MGMT_STATUS_ALREADY_PAIRED, &rp,
3434 sizeof(rp));
3435 goto unlock;
3438 sec_level = BT_SECURITY_MEDIUM;
3439 auth_type = HCI_AT_DEDICATED_BONDING;
3441 if (cp->addr.type == BDADDR_BREDR) {
3442 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3443 auth_type, CONN_REASON_PAIR_DEVICE,
3444 HCI_ACL_CONN_TIMEOUT);
3445 } else {
3446 u8 addr_type = le_addr_type(cp->addr.type);
3447 struct hci_conn_params *p;
3449 /* When pairing a new device, it is expected to remember
3450 * this device for future connections. Adding the connection
3451 * parameter information ahead of time allows tracking
3452 * of the peripheral preferred values and will speed up any
3453 * further connection establishment.
3455 * If connection parameters already exist, then they
3456 * will be kept and this function does nothing.
3458 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3460 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3461 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3463 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3464 sec_level, HCI_LE_CONN_TIMEOUT,
3465 CONN_REASON_PAIR_DEVICE);
3468 if (IS_ERR(conn)) {
3469 int status;
3471 if (PTR_ERR(conn) == -EBUSY)
3472 status = MGMT_STATUS_BUSY;
3473 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3474 status = MGMT_STATUS_NOT_SUPPORTED;
3475 else if (PTR_ERR(conn) == -ECONNREFUSED)
3476 status = MGMT_STATUS_REJECTED;
3477 else
3478 status = MGMT_STATUS_CONNECT_FAILED;
3480 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3481 status, &rp, sizeof(rp));
3482 goto unlock;
3485 if (conn->connect_cfm_cb) {
3486 hci_conn_drop(conn);
3487 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3488 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3489 goto unlock;
3492 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3493 if (!cmd) {
3494 err = -ENOMEM;
3495 hci_conn_drop(conn);
3496 goto unlock;
3499 cmd->cmd_complete = pairing_complete;
3501 /* For LE, just connecting isn't a proof that the pairing finished */
3502 if (cp->addr.type == BDADDR_BREDR) {
3503 conn->connect_cfm_cb = pairing_complete_cb;
3504 conn->security_cfm_cb = pairing_complete_cb;
3505 conn->disconn_cfm_cb = pairing_complete_cb;
3506 } else {
3507 conn->connect_cfm_cb = le_pairing_complete_cb;
3508 conn->security_cfm_cb = le_pairing_complete_cb;
3509 conn->disconn_cfm_cb = le_pairing_complete_cb;
3512 conn->io_capability = cp->io_cap;
3513 cmd->user_data = hci_conn_get(conn);
3515 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3516 hci_conn_security(conn, sec_level, auth_type, true)) {
3517 cmd->cmd_complete(cmd, 0);
3518 mgmt_pending_remove(cmd);
3521 err = 0;
3523 unlock:
3524 hci_dev_unlock(hdev);
3525 return err;
3528 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3529 u16 len)
3531 struct mgmt_addr_info *addr = data;
3532 struct mgmt_pending_cmd *cmd;
3533 struct hci_conn *conn;
3534 int err;
3536 bt_dev_dbg(hdev, "sock %p", sk);
3538 hci_dev_lock(hdev);
3540 if (!hdev_is_powered(hdev)) {
3541 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3542 MGMT_STATUS_NOT_POWERED);
3543 goto unlock;
3546 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3547 if (!cmd) {
3548 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3549 MGMT_STATUS_INVALID_PARAMS);
3550 goto unlock;
3553 conn = cmd->user_data;
3555 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3556 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3557 MGMT_STATUS_INVALID_PARAMS);
3558 goto unlock;
3561 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3562 mgmt_pending_remove(cmd);
3564 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3565 addr, sizeof(*addr));
3567 /* Since user doesn't want to proceed with the connection, abort any
3568 * ongoing pairing and then terminate the link if it was created
3569 * because of the pair device action.
3571 if (addr->type == BDADDR_BREDR)
3572 hci_remove_link_key(hdev, &addr->bdaddr);
3573 else
3574 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3575 le_addr_type(addr->type));
3577 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3578 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3580 unlock:
3581 hci_dev_unlock(hdev);
3582 return err;
3585 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3586 struct mgmt_addr_info *addr, u16 mgmt_op,
3587 u16 hci_op, __le32 passkey)
3589 struct mgmt_pending_cmd *cmd;
3590 struct hci_conn *conn;
3591 int err;
3593 hci_dev_lock(hdev);
3595 if (!hdev_is_powered(hdev)) {
3596 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3597 MGMT_STATUS_NOT_POWERED, addr,
3598 sizeof(*addr));
3599 goto done;
3602 if (addr->type == BDADDR_BREDR)
3603 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3604 else
3605 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3606 le_addr_type(addr->type));
3608 if (!conn) {
3609 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3610 MGMT_STATUS_NOT_CONNECTED, addr,
3611 sizeof(*addr));
3612 goto done;
3615 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3616 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3617 if (!err)
3618 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3619 MGMT_STATUS_SUCCESS, addr,
3620 sizeof(*addr));
3621 else
3622 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3623 MGMT_STATUS_FAILED, addr,
3624 sizeof(*addr));
3626 goto done;
3629 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3630 if (!cmd) {
3631 err = -ENOMEM;
3632 goto done;
3635 cmd->cmd_complete = addr_cmd_complete;
3637 /* Continue with pairing via HCI */
3638 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3639 struct hci_cp_user_passkey_reply cp;
3641 bacpy(&cp.bdaddr, &addr->bdaddr);
3642 cp.passkey = passkey;
3643 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3644 } else
3645 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3646 &addr->bdaddr);
3648 if (err < 0)
3649 mgmt_pending_remove(cmd);
3651 done:
3652 hci_dev_unlock(hdev);
3653 return err;
3656 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3657 void *data, u16 len)
3659 struct mgmt_cp_pin_code_neg_reply *cp = data;
3661 bt_dev_dbg(hdev, "sock %p", sk);
3663 return user_pairing_resp(sk, hdev, &cp->addr,
3664 MGMT_OP_PIN_CODE_NEG_REPLY,
3665 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3668 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3669 u16 len)
3671 struct mgmt_cp_user_confirm_reply *cp = data;
3673 bt_dev_dbg(hdev, "sock %p", sk);
3675 if (len != sizeof(*cp))
3676 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3677 MGMT_STATUS_INVALID_PARAMS);
3679 return user_pairing_resp(sk, hdev, &cp->addr,
3680 MGMT_OP_USER_CONFIRM_REPLY,
3681 HCI_OP_USER_CONFIRM_REPLY, 0);
3684 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3685 void *data, u16 len)
3687 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3689 bt_dev_dbg(hdev, "sock %p", sk);
3691 return user_pairing_resp(sk, hdev, &cp->addr,
3692 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3693 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3696 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3697 u16 len)
3699 struct mgmt_cp_user_passkey_reply *cp = data;
3701 bt_dev_dbg(hdev, "sock %p", sk);
3703 return user_pairing_resp(sk, hdev, &cp->addr,
3704 MGMT_OP_USER_PASSKEY_REPLY,
3705 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3708 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3709 void *data, u16 len)
3711 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3713 bt_dev_dbg(hdev, "sock %p", sk);
3715 return user_pairing_resp(sk, hdev, &cp->addr,
3716 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3717 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3720 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3722 struct adv_info *adv_instance;
3724 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3725 if (!adv_instance)
3726 return 0;
3728 /* stop if current instance doesn't need to be changed */
3729 if (!(adv_instance->flags & flags))
3730 return 0;
3732 cancel_adv_timeout(hdev);
3734 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3735 if (!adv_instance)
3736 return 0;
3738 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3740 return 0;
3743 static int name_changed_sync(struct hci_dev *hdev, void *data)
3745 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3748 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3750 struct mgmt_pending_cmd *cmd = data;
3751 struct mgmt_cp_set_local_name *cp = cmd->param;
3752 u8 status = mgmt_status(err);
3754 bt_dev_dbg(hdev, "err %d", err);
3756 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3757 return;
3759 if (status) {
3760 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3761 status);
3762 } else {
3763 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3764 cp, sizeof(*cp));
3766 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3767 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3770 mgmt_pending_remove(cmd);
3773 static int set_name_sync(struct hci_dev *hdev, void *data)
3775 if (lmp_bredr_capable(hdev)) {
3776 hci_update_name_sync(hdev);
3777 hci_update_eir_sync(hdev);
3780 /* The name is stored in the scan response data and so
3781 * no need to update the advertising data here.
3783 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3784 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3786 return 0;
3789 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3790 u16 len)
3792 struct mgmt_cp_set_local_name *cp = data;
3793 struct mgmt_pending_cmd *cmd;
3794 int err;
3796 bt_dev_dbg(hdev, "sock %p", sk);
3798 hci_dev_lock(hdev);
3800 /* If the old values are the same as the new ones just return a
3801 * direct command complete event.
3803 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3804 !memcmp(hdev->short_name, cp->short_name,
3805 sizeof(hdev->short_name))) {
3806 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3807 data, len);
3808 goto failed;
3811 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3813 if (!hdev_is_powered(hdev)) {
3814 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3816 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3817 data, len);
3818 if (err < 0)
3819 goto failed;
3821 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3822 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3823 ext_info_changed(hdev, sk);
3825 goto failed;
3828 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3829 if (!cmd)
3830 err = -ENOMEM;
3831 else
3832 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3833 set_name_complete);
3835 if (err < 0) {
3836 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3837 MGMT_STATUS_FAILED);
3839 if (cmd)
3840 mgmt_pending_remove(cmd);
3842 goto failed;
3845 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3847 failed:
3848 hci_dev_unlock(hdev);
3849 return err;
3852 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3854 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3857 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3858 u16 len)
3860 struct mgmt_cp_set_appearance *cp = data;
3861 u16 appearance;
3862 int err;
3864 bt_dev_dbg(hdev, "sock %p", sk);
3866 if (!lmp_le_capable(hdev))
3867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3868 MGMT_STATUS_NOT_SUPPORTED);
3870 appearance = le16_to_cpu(cp->appearance);
3872 hci_dev_lock(hdev);
3874 if (hdev->appearance != appearance) {
3875 hdev->appearance = appearance;
3877 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3878 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3879 NULL);
3881 ext_info_changed(hdev, sk);
3884 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3887 hci_dev_unlock(hdev);
3889 return err;
3892 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3893 void *data, u16 len)
3895 struct mgmt_rp_get_phy_configuration rp;
3897 bt_dev_dbg(hdev, "sock %p", sk);
3899 hci_dev_lock(hdev);
3901 memset(&rp, 0, sizeof(rp));
3903 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3904 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3905 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3907 hci_dev_unlock(hdev);
3909 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3910 &rp, sizeof(rp));
3913 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3915 struct mgmt_ev_phy_configuration_changed ev;
3917 memset(&ev, 0, sizeof(ev));
3919 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3921 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3922 sizeof(ev), skip);
3925 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3927 struct mgmt_pending_cmd *cmd = data;
3928 struct sk_buff *skb = cmd->skb;
3929 u8 status = mgmt_status(err);
3931 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3932 return;
3934 if (!status) {
3935 if (!skb)
3936 status = MGMT_STATUS_FAILED;
3937 else if (IS_ERR(skb))
3938 status = mgmt_status(PTR_ERR(skb));
3939 else
3940 status = mgmt_status(skb->data[0]);
3943 bt_dev_dbg(hdev, "status %d", status);
3945 if (status) {
3946 mgmt_cmd_status(cmd->sk, hdev->id,
3947 MGMT_OP_SET_PHY_CONFIGURATION, status);
3948 } else {
3949 mgmt_cmd_complete(cmd->sk, hdev->id,
3950 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3951 NULL, 0);
3953 mgmt_phy_configuration_changed(hdev, cmd->sk);
3956 if (skb && !IS_ERR(skb))
3957 kfree_skb(skb);
3959 mgmt_pending_remove(cmd);
3962 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3964 struct mgmt_pending_cmd *cmd = data;
3965 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3966 struct hci_cp_le_set_default_phy cp_phy;
3967 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3969 memset(&cp_phy, 0, sizeof(cp_phy));
3971 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3972 cp_phy.all_phys |= 0x01;
3974 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3975 cp_phy.all_phys |= 0x02;
3977 if (selected_phys & MGMT_PHY_LE_1M_TX)
3978 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3980 if (selected_phys & MGMT_PHY_LE_2M_TX)
3981 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3983 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3984 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3986 if (selected_phys & MGMT_PHY_LE_1M_RX)
3987 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3989 if (selected_phys & MGMT_PHY_LE_2M_RX)
3990 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3992 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3993 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3995 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3996 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3998 return 0;
4001 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4002 void *data, u16 len)
4004 struct mgmt_cp_set_phy_configuration *cp = data;
4005 struct mgmt_pending_cmd *cmd;
4006 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4007 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4008 bool changed = false;
4009 int err;
4011 bt_dev_dbg(hdev, "sock %p", sk);
4013 configurable_phys = get_configurable_phys(hdev);
4014 supported_phys = get_supported_phys(hdev);
4015 selected_phys = __le32_to_cpu(cp->selected_phys);
4017 if (selected_phys & ~supported_phys)
4018 return mgmt_cmd_status(sk, hdev->id,
4019 MGMT_OP_SET_PHY_CONFIGURATION,
4020 MGMT_STATUS_INVALID_PARAMS);
4022 unconfigure_phys = supported_phys & ~configurable_phys;
4024 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4025 return mgmt_cmd_status(sk, hdev->id,
4026 MGMT_OP_SET_PHY_CONFIGURATION,
4027 MGMT_STATUS_INVALID_PARAMS);
4029 if (selected_phys == get_selected_phys(hdev))
4030 return mgmt_cmd_complete(sk, hdev->id,
4031 MGMT_OP_SET_PHY_CONFIGURATION,
4032 0, NULL, 0);
4034 hci_dev_lock(hdev);
4036 if (!hdev_is_powered(hdev)) {
4037 err = mgmt_cmd_status(sk, hdev->id,
4038 MGMT_OP_SET_PHY_CONFIGURATION,
4039 MGMT_STATUS_REJECTED);
4040 goto unlock;
4043 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4044 err = mgmt_cmd_status(sk, hdev->id,
4045 MGMT_OP_SET_PHY_CONFIGURATION,
4046 MGMT_STATUS_BUSY);
4047 goto unlock;
4050 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4051 pkt_type |= (HCI_DH3 | HCI_DM3);
4052 else
4053 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4055 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4056 pkt_type |= (HCI_DH5 | HCI_DM5);
4057 else
4058 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4060 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4061 pkt_type &= ~HCI_2DH1;
4062 else
4063 pkt_type |= HCI_2DH1;
4065 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4066 pkt_type &= ~HCI_2DH3;
4067 else
4068 pkt_type |= HCI_2DH3;
4070 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4071 pkt_type &= ~HCI_2DH5;
4072 else
4073 pkt_type |= HCI_2DH5;
4075 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4076 pkt_type &= ~HCI_3DH1;
4077 else
4078 pkt_type |= HCI_3DH1;
4080 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4081 pkt_type &= ~HCI_3DH3;
4082 else
4083 pkt_type |= HCI_3DH3;
4085 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4086 pkt_type &= ~HCI_3DH5;
4087 else
4088 pkt_type |= HCI_3DH5;
4090 if (pkt_type != hdev->pkt_type) {
4091 hdev->pkt_type = pkt_type;
4092 changed = true;
4095 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4096 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4097 if (changed)
4098 mgmt_phy_configuration_changed(hdev, sk);
4100 err = mgmt_cmd_complete(sk, hdev->id,
4101 MGMT_OP_SET_PHY_CONFIGURATION,
4102 0, NULL, 0);
4104 goto unlock;
4107 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4108 len);
4109 if (!cmd)
4110 err = -ENOMEM;
4111 else
4112 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4113 set_default_phy_complete);
4115 if (err < 0) {
4116 err = mgmt_cmd_status(sk, hdev->id,
4117 MGMT_OP_SET_PHY_CONFIGURATION,
4118 MGMT_STATUS_FAILED);
4120 if (cmd)
4121 mgmt_pending_remove(cmd);
4124 unlock:
4125 hci_dev_unlock(hdev);
4127 return err;
4130 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4131 u16 len)
4133 int err = MGMT_STATUS_SUCCESS;
4134 struct mgmt_cp_set_blocked_keys *keys = data;
4135 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4136 sizeof(struct mgmt_blocked_key_info));
4137 u16 key_count, expected_len;
4138 int i;
4140 bt_dev_dbg(hdev, "sock %p", sk);
4142 key_count = __le16_to_cpu(keys->key_count);
4143 if (key_count > max_key_count) {
4144 bt_dev_err(hdev, "too big key_count value %u", key_count);
4145 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4146 MGMT_STATUS_INVALID_PARAMS);
4149 expected_len = struct_size(keys, keys, key_count);
4150 if (expected_len != len) {
4151 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4152 expected_len, len);
4153 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4154 MGMT_STATUS_INVALID_PARAMS);
4157 hci_dev_lock(hdev);
4159 hci_blocked_keys_clear(hdev);
4161 for (i = 0; i < key_count; ++i) {
4162 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4164 if (!b) {
4165 err = MGMT_STATUS_NO_RESOURCES;
4166 break;
4169 b->type = keys->keys[i].type;
4170 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4171 list_add_rcu(&b->list, &hdev->blocked_keys);
4173 hci_dev_unlock(hdev);
4175 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4176 err, NULL, 0);
4179 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4180 void *data, u16 len)
4182 struct mgmt_mode *cp = data;
4183 int err;
4184 bool changed = false;
4186 bt_dev_dbg(hdev, "sock %p", sk);
4188 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4189 return mgmt_cmd_status(sk, hdev->id,
4190 MGMT_OP_SET_WIDEBAND_SPEECH,
4191 MGMT_STATUS_NOT_SUPPORTED);
4193 if (cp->val != 0x00 && cp->val != 0x01)
4194 return mgmt_cmd_status(sk, hdev->id,
4195 MGMT_OP_SET_WIDEBAND_SPEECH,
4196 MGMT_STATUS_INVALID_PARAMS);
4198 hci_dev_lock(hdev);
4200 if (hdev_is_powered(hdev) &&
4201 !!cp->val != hci_dev_test_flag(hdev,
4202 HCI_WIDEBAND_SPEECH_ENABLED)) {
4203 err = mgmt_cmd_status(sk, hdev->id,
4204 MGMT_OP_SET_WIDEBAND_SPEECH,
4205 MGMT_STATUS_REJECTED);
4206 goto unlock;
4209 if (cp->val)
4210 changed = !hci_dev_test_and_set_flag(hdev,
4211 HCI_WIDEBAND_SPEECH_ENABLED);
4212 else
4213 changed = hci_dev_test_and_clear_flag(hdev,
4214 HCI_WIDEBAND_SPEECH_ENABLED);
4216 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4217 if (err < 0)
4218 goto unlock;
4220 if (changed)
4221 err = new_settings(hdev, sk);
4223 unlock:
4224 hci_dev_unlock(hdev);
4225 return err;
4228 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4229 void *data, u16 data_len)
4231 char buf[20];
4232 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4233 u16 cap_len = 0;
4234 u8 flags = 0;
4235 u8 tx_power_range[2];
4237 bt_dev_dbg(hdev, "sock %p", sk);
4239 memset(&buf, 0, sizeof(buf));
4241 hci_dev_lock(hdev);
4243 /* When the Read Simple Pairing Options command is supported, then
4244 * the remote public key validation is supported.
4246 * Alternatively, when Microsoft extensions are available, they can
4247 * indicate support for public key validation as well.
4249 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4250 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4252 flags |= 0x02; /* Remote public key validation (LE) */
4254 /* When the Read Encryption Key Size command is supported, then the
4255 * encryption key size is enforced.
4257 if (hdev->commands[20] & 0x10)
4258 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4260 flags |= 0x08; /* Encryption key size enforcement (LE) */
4262 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4263 &flags, 1);
4265 /* When the Read Simple Pairing Options command is supported, then
4266 * also max encryption key size information is provided.
4268 if (hdev->commands[41] & 0x08)
4269 cap_len = eir_append_le16(rp->cap, cap_len,
4270 MGMT_CAP_MAX_ENC_KEY_SIZE,
4271 hdev->max_enc_key_size);
4273 cap_len = eir_append_le16(rp->cap, cap_len,
4274 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4275 SMP_MAX_ENC_KEY_SIZE);
4277 /* Append the min/max LE tx power parameters if we were able to fetch
4278 * it from the controller
4280 if (hdev->commands[38] & 0x80) {
4281 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4282 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4283 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4284 tx_power_range, 2);
4287 rp->cap_len = cpu_to_le16(cap_len);
4289 hci_dev_unlock(hdev);
4291 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4292 rp, sizeof(*rp) + cap_len);
4295 #ifdef CONFIG_BT_FEATURE_DEBUG
4296 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4297 static const u8 debug_uuid[16] = {
4298 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4299 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4301 #endif
4303 /* 330859bc-7506-492d-9370-9a6f0614037f */
4304 static const u8 quality_report_uuid[16] = {
4305 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4306 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4309 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4310 static const u8 offload_codecs_uuid[16] = {
4311 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4312 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4315 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4316 static const u8 le_simultaneous_roles_uuid[16] = {
4317 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4318 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4321 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4322 static const u8 rpa_resolution_uuid[16] = {
4323 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4324 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4327 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4328 static const u8 iso_socket_uuid[16] = {
4329 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4330 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4333 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4334 static const u8 mgmt_mesh_uuid[16] = {
4335 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4336 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4339 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4340 void *data, u16 data_len)
4342 struct mgmt_rp_read_exp_features_info *rp;
4343 size_t len;
4344 u16 idx = 0;
4345 u32 flags;
4346 int status;
4348 bt_dev_dbg(hdev, "sock %p", sk);
4350 /* Enough space for 7 features */
4351 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4352 rp = kzalloc(len, GFP_KERNEL);
4353 if (!rp)
4354 return -ENOMEM;
4356 #ifdef CONFIG_BT_FEATURE_DEBUG
4357 if (!hdev) {
4358 flags = bt_dbg_get() ? BIT(0) : 0;
4360 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4361 rp->features[idx].flags = cpu_to_le32(flags);
4362 idx++;
4364 #endif
4366 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4367 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4368 flags = BIT(0);
4369 else
4370 flags = 0;
4372 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4373 rp->features[idx].flags = cpu_to_le32(flags);
4374 idx++;
4377 if (hdev && ll_privacy_capable(hdev)) {
4378 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4379 flags = BIT(0) | BIT(1);
4380 else
4381 flags = BIT(1);
4383 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4384 rp->features[idx].flags = cpu_to_le32(flags);
4385 idx++;
4388 if (hdev && (aosp_has_quality_report(hdev) ||
4389 hdev->set_quality_report)) {
4390 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4391 flags = BIT(0);
4392 else
4393 flags = 0;
4395 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4396 rp->features[idx].flags = cpu_to_le32(flags);
4397 idx++;
4400 if (hdev && hdev->get_data_path_id) {
4401 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4402 flags = BIT(0);
4403 else
4404 flags = 0;
4406 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4407 rp->features[idx].flags = cpu_to_le32(flags);
4408 idx++;
4411 if (IS_ENABLED(CONFIG_BT_LE)) {
4412 flags = iso_enabled() ? BIT(0) : 0;
4413 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4414 rp->features[idx].flags = cpu_to_le32(flags);
4415 idx++;
4418 if (hdev && lmp_le_capable(hdev)) {
4419 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4420 flags = BIT(0);
4421 else
4422 flags = 0;
4424 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4425 rp->features[idx].flags = cpu_to_le32(flags);
4426 idx++;
4429 rp->feature_count = cpu_to_le16(idx);
4431 /* After reading the experimental features information, enable
4432 * the events to update client on any future change.
4434 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4436 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4437 MGMT_OP_READ_EXP_FEATURES_INFO,
4438 0, rp, sizeof(*rp) + (20 * idx));
4440 kfree(rp);
4441 return status;
4444 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4445 struct sock *skip)
4447 struct mgmt_ev_exp_feature_changed ev;
4449 memset(&ev, 0, sizeof(ev));
4450 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4451 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4453 // Do we need to be atomic with the conn_flags?
4454 if (enabled && privacy_mode_capable(hdev))
4455 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4456 else
4457 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4459 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4460 &ev, sizeof(ev),
4461 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4465 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4466 bool enabled, struct sock *skip)
4468 struct mgmt_ev_exp_feature_changed ev;
4470 memset(&ev, 0, sizeof(ev));
4471 memcpy(ev.uuid, uuid, 16);
4472 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4474 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4475 &ev, sizeof(ev),
4476 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4479 #define EXP_FEAT(_uuid, _set_func) \
4481 .uuid = _uuid, \
4482 .set_func = _set_func, \
4485 /* The zero key uuid is special. Multiple exp features are set through it. */
4486 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4487 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4489 struct mgmt_rp_set_exp_feature rp;
4491 memset(rp.uuid, 0, 16);
4492 rp.flags = cpu_to_le32(0);
4494 #ifdef CONFIG_BT_FEATURE_DEBUG
4495 if (!hdev) {
4496 bool changed = bt_dbg_get();
4498 bt_dbg_set(false);
4500 if (changed)
4501 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4503 #endif
4505 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4506 bool changed;
4508 changed = hci_dev_test_and_clear_flag(hdev,
4509 HCI_ENABLE_LL_PRIVACY);
4510 if (changed)
4511 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4512 sk);
4515 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4517 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4518 MGMT_OP_SET_EXP_FEATURE, 0,
4519 &rp, sizeof(rp));
4522 #ifdef CONFIG_BT_FEATURE_DEBUG
4523 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4524 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4526 struct mgmt_rp_set_exp_feature rp;
4528 bool val, changed;
4529 int err;
4531 /* Command requires to use the non-controller index */
4532 if (hdev)
4533 return mgmt_cmd_status(sk, hdev->id,
4534 MGMT_OP_SET_EXP_FEATURE,
4535 MGMT_STATUS_INVALID_INDEX);
4537 /* Parameters are limited to a single octet */
4538 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4539 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4540 MGMT_OP_SET_EXP_FEATURE,
4541 MGMT_STATUS_INVALID_PARAMS);
4543 /* Only boolean on/off is supported */
4544 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4545 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4546 MGMT_OP_SET_EXP_FEATURE,
4547 MGMT_STATUS_INVALID_PARAMS);
4549 val = !!cp->param[0];
4550 changed = val ? !bt_dbg_get() : bt_dbg_get();
4551 bt_dbg_set(val);
4553 memcpy(rp.uuid, debug_uuid, 16);
4554 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4556 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4558 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4559 MGMT_OP_SET_EXP_FEATURE, 0,
4560 &rp, sizeof(rp));
4562 if (changed)
4563 exp_feature_changed(hdev, debug_uuid, val, sk);
4565 return err;
4567 #endif
4569 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4570 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4572 struct mgmt_rp_set_exp_feature rp;
4573 bool val, changed;
4574 int err;
4576 /* Command requires to use the controller index */
4577 if (!hdev)
4578 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4579 MGMT_OP_SET_EXP_FEATURE,
4580 MGMT_STATUS_INVALID_INDEX);
4582 /* Parameters are limited to a single octet */
4583 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4584 return mgmt_cmd_status(sk, hdev->id,
4585 MGMT_OP_SET_EXP_FEATURE,
4586 MGMT_STATUS_INVALID_PARAMS);
4588 /* Only boolean on/off is supported */
4589 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4590 return mgmt_cmd_status(sk, hdev->id,
4591 MGMT_OP_SET_EXP_FEATURE,
4592 MGMT_STATUS_INVALID_PARAMS);
4594 val = !!cp->param[0];
4596 if (val) {
4597 changed = !hci_dev_test_and_set_flag(hdev,
4598 HCI_MESH_EXPERIMENTAL);
4599 } else {
4600 hci_dev_clear_flag(hdev, HCI_MESH);
4601 changed = hci_dev_test_and_clear_flag(hdev,
4602 HCI_MESH_EXPERIMENTAL);
4605 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4606 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4608 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4610 err = mgmt_cmd_complete(sk, hdev->id,
4611 MGMT_OP_SET_EXP_FEATURE, 0,
4612 &rp, sizeof(rp));
4614 if (changed)
4615 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4617 return err;
4620 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4621 struct mgmt_cp_set_exp_feature *cp,
4622 u16 data_len)
4624 struct mgmt_rp_set_exp_feature rp;
4625 bool val, changed;
4626 int err;
4627 u32 flags;
4629 /* Command requires to use the controller index */
4630 if (!hdev)
4631 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4632 MGMT_OP_SET_EXP_FEATURE,
4633 MGMT_STATUS_INVALID_INDEX);
4635 /* Changes can only be made when controller is powered down */
4636 if (hdev_is_powered(hdev))
4637 return mgmt_cmd_status(sk, hdev->id,
4638 MGMT_OP_SET_EXP_FEATURE,
4639 MGMT_STATUS_REJECTED);
4641 /* Parameters are limited to a single octet */
4642 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4643 return mgmt_cmd_status(sk, hdev->id,
4644 MGMT_OP_SET_EXP_FEATURE,
4645 MGMT_STATUS_INVALID_PARAMS);
4647 /* Only boolean on/off is supported */
4648 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4649 return mgmt_cmd_status(sk, hdev->id,
4650 MGMT_OP_SET_EXP_FEATURE,
4651 MGMT_STATUS_INVALID_PARAMS);
4653 val = !!cp->param[0];
4655 if (val) {
4656 changed = !hci_dev_test_and_set_flag(hdev,
4657 HCI_ENABLE_LL_PRIVACY);
4658 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4660 /* Enable LL privacy + supported settings changed */
4661 flags = BIT(0) | BIT(1);
4662 } else {
4663 changed = hci_dev_test_and_clear_flag(hdev,
4664 HCI_ENABLE_LL_PRIVACY);
4666 /* Disable LL privacy + supported settings changed */
4667 flags = BIT(1);
4670 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4671 rp.flags = cpu_to_le32(flags);
4673 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4675 err = mgmt_cmd_complete(sk, hdev->id,
4676 MGMT_OP_SET_EXP_FEATURE, 0,
4677 &rp, sizeof(rp));
4679 if (changed)
4680 exp_ll_privacy_feature_changed(val, hdev, sk);
4682 return err;
4685 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4686 struct mgmt_cp_set_exp_feature *cp,
4687 u16 data_len)
4689 struct mgmt_rp_set_exp_feature rp;
4690 bool val, changed;
4691 int err;
4693 /* Command requires to use a valid controller index */
4694 if (!hdev)
4695 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4696 MGMT_OP_SET_EXP_FEATURE,
4697 MGMT_STATUS_INVALID_INDEX);
4699 /* Parameters are limited to a single octet */
4700 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4701 return mgmt_cmd_status(sk, hdev->id,
4702 MGMT_OP_SET_EXP_FEATURE,
4703 MGMT_STATUS_INVALID_PARAMS);
4705 /* Only boolean on/off is supported */
4706 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4707 return mgmt_cmd_status(sk, hdev->id,
4708 MGMT_OP_SET_EXP_FEATURE,
4709 MGMT_STATUS_INVALID_PARAMS);
4711 hci_req_sync_lock(hdev);
4713 val = !!cp->param[0];
4714 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4716 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4717 err = mgmt_cmd_status(sk, hdev->id,
4718 MGMT_OP_SET_EXP_FEATURE,
4719 MGMT_STATUS_NOT_SUPPORTED);
4720 goto unlock_quality_report;
4723 if (changed) {
4724 if (hdev->set_quality_report)
4725 err = hdev->set_quality_report(hdev, val);
4726 else
4727 err = aosp_set_quality_report(hdev, val);
4729 if (err) {
4730 err = mgmt_cmd_status(sk, hdev->id,
4731 MGMT_OP_SET_EXP_FEATURE,
4732 MGMT_STATUS_FAILED);
4733 goto unlock_quality_report;
4736 if (val)
4737 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4738 else
4739 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4742 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4744 memcpy(rp.uuid, quality_report_uuid, 16);
4745 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4746 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4748 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4749 &rp, sizeof(rp));
4751 if (changed)
4752 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4754 unlock_quality_report:
4755 hci_req_sync_unlock(hdev);
4756 return err;
4759 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4760 struct mgmt_cp_set_exp_feature *cp,
4761 u16 data_len)
4763 bool val, changed;
4764 int err;
4765 struct mgmt_rp_set_exp_feature rp;
4767 /* Command requires to use a valid controller index */
4768 if (!hdev)
4769 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4770 MGMT_OP_SET_EXP_FEATURE,
4771 MGMT_STATUS_INVALID_INDEX);
4773 /* Parameters are limited to a single octet */
4774 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4775 return mgmt_cmd_status(sk, hdev->id,
4776 MGMT_OP_SET_EXP_FEATURE,
4777 MGMT_STATUS_INVALID_PARAMS);
4779 /* Only boolean on/off is supported */
4780 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4781 return mgmt_cmd_status(sk, hdev->id,
4782 MGMT_OP_SET_EXP_FEATURE,
4783 MGMT_STATUS_INVALID_PARAMS);
4785 val = !!cp->param[0];
4786 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4788 if (!hdev->get_data_path_id) {
4789 return mgmt_cmd_status(sk, hdev->id,
4790 MGMT_OP_SET_EXP_FEATURE,
4791 MGMT_STATUS_NOT_SUPPORTED);
4794 if (changed) {
4795 if (val)
4796 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4797 else
4798 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4801 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4802 val, changed);
4804 memcpy(rp.uuid, offload_codecs_uuid, 16);
4805 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4806 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4807 err = mgmt_cmd_complete(sk, hdev->id,
4808 MGMT_OP_SET_EXP_FEATURE, 0,
4809 &rp, sizeof(rp));
4811 if (changed)
4812 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4814 return err;
4817 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4818 struct mgmt_cp_set_exp_feature *cp,
4819 u16 data_len)
4821 bool val, changed;
4822 int err;
4823 struct mgmt_rp_set_exp_feature rp;
4825 /* Command requires to use a valid controller index */
4826 if (!hdev)
4827 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4828 MGMT_OP_SET_EXP_FEATURE,
4829 MGMT_STATUS_INVALID_INDEX);
4831 /* Parameters are limited to a single octet */
4832 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4833 return mgmt_cmd_status(sk, hdev->id,
4834 MGMT_OP_SET_EXP_FEATURE,
4835 MGMT_STATUS_INVALID_PARAMS);
4837 /* Only boolean on/off is supported */
4838 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4839 return mgmt_cmd_status(sk, hdev->id,
4840 MGMT_OP_SET_EXP_FEATURE,
4841 MGMT_STATUS_INVALID_PARAMS);
4843 val = !!cp->param[0];
4844 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4846 if (!hci_dev_le_state_simultaneous(hdev)) {
4847 return mgmt_cmd_status(sk, hdev->id,
4848 MGMT_OP_SET_EXP_FEATURE,
4849 MGMT_STATUS_NOT_SUPPORTED);
4852 if (changed) {
4853 if (val)
4854 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4855 else
4856 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4859 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4860 val, changed);
4862 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4863 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4864 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4865 err = mgmt_cmd_complete(sk, hdev->id,
4866 MGMT_OP_SET_EXP_FEATURE, 0,
4867 &rp, sizeof(rp));
4869 if (changed)
4870 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4872 return err;
4875 #ifdef CONFIG_BT_LE
4876 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4877 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4879 struct mgmt_rp_set_exp_feature rp;
4880 bool val, changed = false;
4881 int err;
4883 /* Command requires to use the non-controller index */
4884 if (hdev)
4885 return mgmt_cmd_status(sk, hdev->id,
4886 MGMT_OP_SET_EXP_FEATURE,
4887 MGMT_STATUS_INVALID_INDEX);
4889 /* Parameters are limited to a single octet */
4890 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4891 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4892 MGMT_OP_SET_EXP_FEATURE,
4893 MGMT_STATUS_INVALID_PARAMS);
4895 /* Only boolean on/off is supported */
4896 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4897 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4898 MGMT_OP_SET_EXP_FEATURE,
4899 MGMT_STATUS_INVALID_PARAMS);
4901 val = cp->param[0] ? true : false;
4902 if (val)
4903 err = iso_init();
4904 else
4905 err = iso_exit();
4907 if (!err)
4908 changed = true;
4910 memcpy(rp.uuid, iso_socket_uuid, 16);
4911 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4913 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4915 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4916 MGMT_OP_SET_EXP_FEATURE, 0,
4917 &rp, sizeof(rp));
4919 if (changed)
4920 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4922 return err;
4924 #endif
4926 static const struct mgmt_exp_feature {
4927 const u8 *uuid;
4928 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4929 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4930 } exp_features[] = {
4931 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4932 #ifdef CONFIG_BT_FEATURE_DEBUG
4933 EXP_FEAT(debug_uuid, set_debug_func),
4934 #endif
4935 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4936 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4937 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4938 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4939 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4940 #ifdef CONFIG_BT_LE
4941 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4942 #endif
4944 /* end with a null feature */
4945 EXP_FEAT(NULL, NULL)
4948 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4949 void *data, u16 data_len)
4951 struct mgmt_cp_set_exp_feature *cp = data;
4952 size_t i = 0;
4954 bt_dev_dbg(hdev, "sock %p", sk);
4956 for (i = 0; exp_features[i].uuid; i++) {
4957 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4958 return exp_features[i].set_func(sk, hdev, cp, data_len);
4961 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4962 MGMT_OP_SET_EXP_FEATURE,
4963 MGMT_STATUS_NOT_SUPPORTED);
4966 static u32 get_params_flags(struct hci_dev *hdev,
4967 struct hci_conn_params *params)
4969 u32 flags = hdev->conn_flags;
4971 /* Devices using RPAs can only be programmed in the acceptlist if
4972 * LL Privacy has been enable otherwise they cannot mark
4973 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4975 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4976 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
4977 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4979 return flags;
4982 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4983 u16 data_len)
4985 struct mgmt_cp_get_device_flags *cp = data;
4986 struct mgmt_rp_get_device_flags rp;
4987 struct bdaddr_list_with_flags *br_params;
4988 struct hci_conn_params *params;
4989 u32 supported_flags;
4990 u32 current_flags = 0;
4991 u8 status = MGMT_STATUS_INVALID_PARAMS;
4993 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4994 &cp->addr.bdaddr, cp->addr.type);
4996 hci_dev_lock(hdev);
4998 supported_flags = hdev->conn_flags;
5000 memset(&rp, 0, sizeof(rp));
5002 if (cp->addr.type == BDADDR_BREDR) {
5003 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5004 &cp->addr.bdaddr,
5005 cp->addr.type);
5006 if (!br_params)
5007 goto done;
5009 current_flags = br_params->flags;
5010 } else {
5011 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5012 le_addr_type(cp->addr.type));
5013 if (!params)
5014 goto done;
5016 supported_flags = get_params_flags(hdev, params);
5017 current_flags = params->flags;
5020 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5021 rp.addr.type = cp->addr.type;
5022 rp.supported_flags = cpu_to_le32(supported_flags);
5023 rp.current_flags = cpu_to_le32(current_flags);
5025 status = MGMT_STATUS_SUCCESS;
5027 done:
5028 hci_dev_unlock(hdev);
5030 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5031 &rp, sizeof(rp));
5034 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5035 bdaddr_t *bdaddr, u8 bdaddr_type,
5036 u32 supported_flags, u32 current_flags)
5038 struct mgmt_ev_device_flags_changed ev;
5040 bacpy(&ev.addr.bdaddr, bdaddr);
5041 ev.addr.type = bdaddr_type;
5042 ev.supported_flags = cpu_to_le32(supported_flags);
5043 ev.current_flags = cpu_to_le32(current_flags);
5045 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5048 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5049 u16 len)
5051 struct mgmt_cp_set_device_flags *cp = data;
5052 struct bdaddr_list_with_flags *br_params;
5053 struct hci_conn_params *params;
5054 u8 status = MGMT_STATUS_INVALID_PARAMS;
5055 u32 supported_flags;
5056 u32 current_flags = __le32_to_cpu(cp->current_flags);
5058 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5059 &cp->addr.bdaddr, cp->addr.type, current_flags);
5061 // We should take hci_dev_lock() early, I think.. conn_flags can change
5062 supported_flags = hdev->conn_flags;
5064 if ((supported_flags | current_flags) != supported_flags) {
5065 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5066 current_flags, supported_flags);
5067 goto done;
5070 hci_dev_lock(hdev);
5072 if (cp->addr.type == BDADDR_BREDR) {
5073 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5074 &cp->addr.bdaddr,
5075 cp->addr.type);
5077 if (br_params) {
5078 br_params->flags = current_flags;
5079 status = MGMT_STATUS_SUCCESS;
5080 } else {
5081 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5082 &cp->addr.bdaddr, cp->addr.type);
5085 goto unlock;
5088 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5089 le_addr_type(cp->addr.type));
5090 if (!params) {
5091 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5092 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5093 goto unlock;
5096 supported_flags = get_params_flags(hdev, params);
5098 if ((supported_flags | current_flags) != supported_flags) {
5099 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5100 current_flags, supported_flags);
5101 goto unlock;
5104 WRITE_ONCE(params->flags, current_flags);
5105 status = MGMT_STATUS_SUCCESS;
5107 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5108 * has been set.
5110 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5111 hci_update_passive_scan(hdev);
5113 unlock:
5114 hci_dev_unlock(hdev);
5116 done:
5117 if (status == MGMT_STATUS_SUCCESS)
5118 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5119 supported_flags, current_flags);
5121 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5122 &cp->addr, sizeof(cp->addr));
5125 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5126 u16 handle)
5128 struct mgmt_ev_adv_monitor_added ev;
5130 ev.monitor_handle = cpu_to_le16(handle);
5132 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5135 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5137 struct mgmt_ev_adv_monitor_removed ev;
5138 struct mgmt_pending_cmd *cmd;
5139 struct sock *sk_skip = NULL;
5140 struct mgmt_cp_remove_adv_monitor *cp;
5142 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5143 if (cmd) {
5144 cp = cmd->param;
5146 if (cp->monitor_handle)
5147 sk_skip = cmd->sk;
5150 ev.monitor_handle = cpu_to_le16(handle);
5152 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5155 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5156 void *data, u16 len)
5158 struct adv_monitor *monitor = NULL;
5159 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5160 int handle, err;
5161 size_t rp_size = 0;
5162 __u32 supported = 0;
5163 __u32 enabled = 0;
5164 __u16 num_handles = 0;
5165 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5167 BT_DBG("request for %s", hdev->name);
5169 hci_dev_lock(hdev);
5171 if (msft_monitor_supported(hdev))
5172 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5174 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5175 handles[num_handles++] = monitor->handle;
5177 hci_dev_unlock(hdev);
5179 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5180 rp = kmalloc(rp_size, GFP_KERNEL);
5181 if (!rp)
5182 return -ENOMEM;
5184 /* All supported features are currently enabled */
5185 enabled = supported;
5187 rp->supported_features = cpu_to_le32(supported);
5188 rp->enabled_features = cpu_to_le32(enabled);
5189 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5190 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5191 rp->num_handles = cpu_to_le16(num_handles);
5192 if (num_handles)
5193 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5195 err = mgmt_cmd_complete(sk, hdev->id,
5196 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5197 MGMT_STATUS_SUCCESS, rp, rp_size);
5199 kfree(rp);
5201 return err;
5204 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5205 void *data, int status)
5207 struct mgmt_rp_add_adv_patterns_monitor rp;
5208 struct mgmt_pending_cmd *cmd = data;
5209 struct adv_monitor *monitor = cmd->user_data;
5211 hci_dev_lock(hdev);
5213 rp.monitor_handle = cpu_to_le16(monitor->handle);
5215 if (!status) {
5216 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5217 hdev->adv_monitors_cnt++;
5218 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5219 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5220 hci_update_passive_scan(hdev);
5223 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5224 mgmt_status(status), &rp, sizeof(rp));
5225 mgmt_pending_remove(cmd);
5227 hci_dev_unlock(hdev);
5228 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5229 rp.monitor_handle, status);
5232 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5234 struct mgmt_pending_cmd *cmd = data;
5235 struct adv_monitor *monitor = cmd->user_data;
5237 return hci_add_adv_monitor(hdev, monitor);
5240 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5241 struct adv_monitor *m, u8 status,
5242 void *data, u16 len, u16 op)
5244 struct mgmt_pending_cmd *cmd;
5245 int err;
5247 hci_dev_lock(hdev);
5249 if (status)
5250 goto unlock;
5252 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5253 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5254 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5255 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5256 status = MGMT_STATUS_BUSY;
5257 goto unlock;
5260 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5261 if (!cmd) {
5262 status = MGMT_STATUS_NO_RESOURCES;
5263 goto unlock;
5266 cmd->user_data = m;
5267 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5268 mgmt_add_adv_patterns_monitor_complete);
5269 if (err) {
5270 if (err == -ENOMEM)
5271 status = MGMT_STATUS_NO_RESOURCES;
5272 else
5273 status = MGMT_STATUS_FAILED;
5275 goto unlock;
5278 hci_dev_unlock(hdev);
5280 return 0;
5282 unlock:
5283 hci_free_adv_monitor(hdev, m);
5284 hci_dev_unlock(hdev);
5285 return mgmt_cmd_status(sk, hdev->id, op, status);
5288 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5289 struct mgmt_adv_rssi_thresholds *rssi)
5291 if (rssi) {
5292 m->rssi.low_threshold = rssi->low_threshold;
5293 m->rssi.low_threshold_timeout =
5294 __le16_to_cpu(rssi->low_threshold_timeout);
5295 m->rssi.high_threshold = rssi->high_threshold;
5296 m->rssi.high_threshold_timeout =
5297 __le16_to_cpu(rssi->high_threshold_timeout);
5298 m->rssi.sampling_period = rssi->sampling_period;
5299 } else {
5300 /* Default values. These numbers are the least constricting
5301 * parameters for MSFT API to work, so it behaves as if there
5302 * are no rssi parameter to consider. May need to be changed
5303 * if other API are to be supported.
5305 m->rssi.low_threshold = -127;
5306 m->rssi.low_threshold_timeout = 60;
5307 m->rssi.high_threshold = -127;
5308 m->rssi.high_threshold_timeout = 0;
5309 m->rssi.sampling_period = 0;
5313 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5314 struct mgmt_adv_pattern *patterns)
5316 u8 offset = 0, length = 0;
5317 struct adv_pattern *p = NULL;
5318 int i;
5320 for (i = 0; i < pattern_count; i++) {
5321 offset = patterns[i].offset;
5322 length = patterns[i].length;
5323 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5324 length > HCI_MAX_EXT_AD_LENGTH ||
5325 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5326 return MGMT_STATUS_INVALID_PARAMS;
5328 p = kmalloc(sizeof(*p), GFP_KERNEL);
5329 if (!p)
5330 return MGMT_STATUS_NO_RESOURCES;
5332 p->ad_type = patterns[i].ad_type;
5333 p->offset = patterns[i].offset;
5334 p->length = patterns[i].length;
5335 memcpy(p->value, patterns[i].value, p->length);
5337 INIT_LIST_HEAD(&p->list);
5338 list_add(&p->list, &m->patterns);
5341 return MGMT_STATUS_SUCCESS;
5344 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5345 void *data, u16 len)
5347 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5348 struct adv_monitor *m = NULL;
5349 u8 status = MGMT_STATUS_SUCCESS;
5350 size_t expected_size = sizeof(*cp);
5352 BT_DBG("request for %s", hdev->name);
5354 if (len <= sizeof(*cp)) {
5355 status = MGMT_STATUS_INVALID_PARAMS;
5356 goto done;
5359 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5360 if (len != expected_size) {
5361 status = MGMT_STATUS_INVALID_PARAMS;
5362 goto done;
5365 m = kzalloc(sizeof(*m), GFP_KERNEL);
5366 if (!m) {
5367 status = MGMT_STATUS_NO_RESOURCES;
5368 goto done;
5371 INIT_LIST_HEAD(&m->patterns);
5373 parse_adv_monitor_rssi(m, NULL);
5374 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5376 done:
5377 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5378 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5381 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5382 void *data, u16 len)
5384 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5385 struct adv_monitor *m = NULL;
5386 u8 status = MGMT_STATUS_SUCCESS;
5387 size_t expected_size = sizeof(*cp);
5389 BT_DBG("request for %s", hdev->name);
5391 if (len <= sizeof(*cp)) {
5392 status = MGMT_STATUS_INVALID_PARAMS;
5393 goto done;
5396 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5397 if (len != expected_size) {
5398 status = MGMT_STATUS_INVALID_PARAMS;
5399 goto done;
5402 m = kzalloc(sizeof(*m), GFP_KERNEL);
5403 if (!m) {
5404 status = MGMT_STATUS_NO_RESOURCES;
5405 goto done;
5408 INIT_LIST_HEAD(&m->patterns);
5410 parse_adv_monitor_rssi(m, &cp->rssi);
5411 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5413 done:
5414 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5415 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5418 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5419 void *data, int status)
5421 struct mgmt_rp_remove_adv_monitor rp;
5422 struct mgmt_pending_cmd *cmd = data;
5423 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5425 hci_dev_lock(hdev);
5427 rp.monitor_handle = cp->monitor_handle;
5429 if (!status)
5430 hci_update_passive_scan(hdev);
5432 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5433 mgmt_status(status), &rp, sizeof(rp));
5434 mgmt_pending_remove(cmd);
5436 hci_dev_unlock(hdev);
5437 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5438 rp.monitor_handle, status);
5441 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5443 struct mgmt_pending_cmd *cmd = data;
5444 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5445 u16 handle = __le16_to_cpu(cp->monitor_handle);
5447 if (!handle)
5448 return hci_remove_all_adv_monitor(hdev);
5450 return hci_remove_single_adv_monitor(hdev, handle);
5453 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5454 void *data, u16 len)
5456 struct mgmt_pending_cmd *cmd;
5457 int err, status;
5459 hci_dev_lock(hdev);
5461 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5462 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5463 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5464 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5465 status = MGMT_STATUS_BUSY;
5466 goto unlock;
5469 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5470 if (!cmd) {
5471 status = MGMT_STATUS_NO_RESOURCES;
5472 goto unlock;
5475 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5476 mgmt_remove_adv_monitor_complete);
5478 if (err) {
5479 mgmt_pending_remove(cmd);
5481 if (err == -ENOMEM)
5482 status = MGMT_STATUS_NO_RESOURCES;
5483 else
5484 status = MGMT_STATUS_FAILED;
5486 goto unlock;
5489 hci_dev_unlock(hdev);
5491 return 0;
5493 unlock:
5494 hci_dev_unlock(hdev);
5495 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5496 status);
5499 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5501 struct mgmt_rp_read_local_oob_data mgmt_rp;
5502 size_t rp_size = sizeof(mgmt_rp);
5503 struct mgmt_pending_cmd *cmd = data;
5504 struct sk_buff *skb = cmd->skb;
5505 u8 status = mgmt_status(err);
5507 if (!status) {
5508 if (!skb)
5509 status = MGMT_STATUS_FAILED;
5510 else if (IS_ERR(skb))
5511 status = mgmt_status(PTR_ERR(skb));
5512 else
5513 status = mgmt_status(skb->data[0]);
5516 bt_dev_dbg(hdev, "status %d", status);
5518 if (status) {
5519 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5520 goto remove;
5523 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5525 if (!bredr_sc_enabled(hdev)) {
5526 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5528 if (skb->len < sizeof(*rp)) {
5529 mgmt_cmd_status(cmd->sk, hdev->id,
5530 MGMT_OP_READ_LOCAL_OOB_DATA,
5531 MGMT_STATUS_FAILED);
5532 goto remove;
5535 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5536 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5538 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5539 } else {
5540 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5542 if (skb->len < sizeof(*rp)) {
5543 mgmt_cmd_status(cmd->sk, hdev->id,
5544 MGMT_OP_READ_LOCAL_OOB_DATA,
5545 MGMT_STATUS_FAILED);
5546 goto remove;
5549 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5550 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5552 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5553 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5556 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5557 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5559 remove:
5560 if (skb && !IS_ERR(skb))
5561 kfree_skb(skb);
5563 mgmt_pending_free(cmd);
5566 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5568 struct mgmt_pending_cmd *cmd = data;
5570 if (bredr_sc_enabled(hdev))
5571 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5572 else
5573 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5575 if (IS_ERR(cmd->skb))
5576 return PTR_ERR(cmd->skb);
5577 else
5578 return 0;
5581 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5582 void *data, u16 data_len)
5584 struct mgmt_pending_cmd *cmd;
5585 int err;
5587 bt_dev_dbg(hdev, "sock %p", sk);
5589 hci_dev_lock(hdev);
5591 if (!hdev_is_powered(hdev)) {
5592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5593 MGMT_STATUS_NOT_POWERED);
5594 goto unlock;
5597 if (!lmp_ssp_capable(hdev)) {
5598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5599 MGMT_STATUS_NOT_SUPPORTED);
5600 goto unlock;
5603 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5604 if (!cmd)
5605 err = -ENOMEM;
5606 else
5607 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5608 read_local_oob_data_complete);
5610 if (err < 0) {
5611 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5612 MGMT_STATUS_FAILED);
5614 if (cmd)
5615 mgmt_pending_free(cmd);
5618 unlock:
5619 hci_dev_unlock(hdev);
5620 return err;
5623 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5624 void *data, u16 len)
5626 struct mgmt_addr_info *addr = data;
5627 int err;
5629 bt_dev_dbg(hdev, "sock %p", sk);
5631 if (!bdaddr_type_is_valid(addr->type))
5632 return mgmt_cmd_complete(sk, hdev->id,
5633 MGMT_OP_ADD_REMOTE_OOB_DATA,
5634 MGMT_STATUS_INVALID_PARAMS,
5635 addr, sizeof(*addr));
5637 hci_dev_lock(hdev);
5639 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5640 struct mgmt_cp_add_remote_oob_data *cp = data;
5641 u8 status;
5643 if (cp->addr.type != BDADDR_BREDR) {
5644 err = mgmt_cmd_complete(sk, hdev->id,
5645 MGMT_OP_ADD_REMOTE_OOB_DATA,
5646 MGMT_STATUS_INVALID_PARAMS,
5647 &cp->addr, sizeof(cp->addr));
5648 goto unlock;
5651 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5652 cp->addr.type, cp->hash,
5653 cp->rand, NULL, NULL);
5654 if (err < 0)
5655 status = MGMT_STATUS_FAILED;
5656 else
5657 status = MGMT_STATUS_SUCCESS;
5659 err = mgmt_cmd_complete(sk, hdev->id,
5660 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5661 &cp->addr, sizeof(cp->addr));
5662 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5663 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5664 u8 *rand192, *hash192, *rand256, *hash256;
5665 u8 status;
5667 if (bdaddr_type_is_le(cp->addr.type)) {
5668 /* Enforce zero-valued 192-bit parameters as
5669 * long as legacy SMP OOB isn't implemented.
5671 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5672 memcmp(cp->hash192, ZERO_KEY, 16)) {
5673 err = mgmt_cmd_complete(sk, hdev->id,
5674 MGMT_OP_ADD_REMOTE_OOB_DATA,
5675 MGMT_STATUS_INVALID_PARAMS,
5676 addr, sizeof(*addr));
5677 goto unlock;
5680 rand192 = NULL;
5681 hash192 = NULL;
5682 } else {
5683 /* In case one of the P-192 values is set to zero,
5684 * then just disable OOB data for P-192.
5686 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5687 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5688 rand192 = NULL;
5689 hash192 = NULL;
5690 } else {
5691 rand192 = cp->rand192;
5692 hash192 = cp->hash192;
5696 /* In case one of the P-256 values is set to zero, then just
5697 * disable OOB data for P-256.
5699 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5700 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5701 rand256 = NULL;
5702 hash256 = NULL;
5703 } else {
5704 rand256 = cp->rand256;
5705 hash256 = cp->hash256;
5708 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5709 cp->addr.type, hash192, rand192,
5710 hash256, rand256);
5711 if (err < 0)
5712 status = MGMT_STATUS_FAILED;
5713 else
5714 status = MGMT_STATUS_SUCCESS;
5716 err = mgmt_cmd_complete(sk, hdev->id,
5717 MGMT_OP_ADD_REMOTE_OOB_DATA,
5718 status, &cp->addr, sizeof(cp->addr));
5719 } else {
5720 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5721 len);
5722 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5723 MGMT_STATUS_INVALID_PARAMS);
5726 unlock:
5727 hci_dev_unlock(hdev);
5728 return err;
5731 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5732 void *data, u16 len)
5734 struct mgmt_cp_remove_remote_oob_data *cp = data;
5735 u8 status;
5736 int err;
5738 bt_dev_dbg(hdev, "sock %p", sk);
5740 if (cp->addr.type != BDADDR_BREDR)
5741 return mgmt_cmd_complete(sk, hdev->id,
5742 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5743 MGMT_STATUS_INVALID_PARAMS,
5744 &cp->addr, sizeof(cp->addr));
5746 hci_dev_lock(hdev);
5748 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5749 hci_remote_oob_data_clear(hdev);
5750 status = MGMT_STATUS_SUCCESS;
5751 goto done;
5754 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5755 if (err < 0)
5756 status = MGMT_STATUS_INVALID_PARAMS;
5757 else
5758 status = MGMT_STATUS_SUCCESS;
5760 done:
5761 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5762 status, &cp->addr, sizeof(cp->addr));
5764 hci_dev_unlock(hdev);
5765 return err;
5768 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5770 struct mgmt_pending_cmd *cmd;
5772 bt_dev_dbg(hdev, "status %u", status);
5774 hci_dev_lock(hdev);
5776 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5777 if (!cmd)
5778 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5780 if (!cmd)
5781 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5783 if (cmd) {
5784 cmd->cmd_complete(cmd, mgmt_status(status));
5785 mgmt_pending_remove(cmd);
5788 hci_dev_unlock(hdev);
5791 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5792 uint8_t *mgmt_status)
5794 switch (type) {
5795 case DISCOV_TYPE_LE:
5796 *mgmt_status = mgmt_le_support(hdev);
5797 if (*mgmt_status)
5798 return false;
5799 break;
5800 case DISCOV_TYPE_INTERLEAVED:
5801 *mgmt_status = mgmt_le_support(hdev);
5802 if (*mgmt_status)
5803 return false;
5804 fallthrough;
5805 case DISCOV_TYPE_BREDR:
5806 *mgmt_status = mgmt_bredr_support(hdev);
5807 if (*mgmt_status)
5808 return false;
5809 break;
5810 default:
5811 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5812 return false;
5815 return true;
5818 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5820 struct mgmt_pending_cmd *cmd = data;
5822 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5823 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5824 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5825 return;
5827 bt_dev_dbg(hdev, "err %d", err);
5829 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5830 cmd->param, 1);
5831 mgmt_pending_remove(cmd);
5833 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5834 DISCOVERY_FINDING);
5837 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5839 return hci_start_discovery_sync(hdev);
5842 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5843 u16 op, void *data, u16 len)
5845 struct mgmt_cp_start_discovery *cp = data;
5846 struct mgmt_pending_cmd *cmd;
5847 u8 status;
5848 int err;
5850 bt_dev_dbg(hdev, "sock %p", sk);
5852 hci_dev_lock(hdev);
5854 if (!hdev_is_powered(hdev)) {
5855 err = mgmt_cmd_complete(sk, hdev->id, op,
5856 MGMT_STATUS_NOT_POWERED,
5857 &cp->type, sizeof(cp->type));
5858 goto failed;
5861 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5862 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5863 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5864 &cp->type, sizeof(cp->type));
5865 goto failed;
5868 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5869 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5870 &cp->type, sizeof(cp->type));
5871 goto failed;
5874 /* Can't start discovery when it is paused */
5875 if (hdev->discovery_paused) {
5876 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5877 &cp->type, sizeof(cp->type));
5878 goto failed;
5881 /* Clear the discovery filter first to free any previously
5882 * allocated memory for the UUID list.
5884 hci_discovery_filter_clear(hdev);
5886 hdev->discovery.type = cp->type;
5887 hdev->discovery.report_invalid_rssi = false;
5888 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5889 hdev->discovery.limited = true;
5890 else
5891 hdev->discovery.limited = false;
5893 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5894 if (!cmd) {
5895 err = -ENOMEM;
5896 goto failed;
5899 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5900 start_discovery_complete);
5901 if (err < 0) {
5902 mgmt_pending_remove(cmd);
5903 goto failed;
5906 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5908 failed:
5909 hci_dev_unlock(hdev);
5910 return err;
5913 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5914 void *data, u16 len)
5916 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5917 data, len);
5920 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5921 void *data, u16 len)
5923 return start_discovery_internal(sk, hdev,
5924 MGMT_OP_START_LIMITED_DISCOVERY,
5925 data, len);
5928 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5929 void *data, u16 len)
5931 struct mgmt_cp_start_service_discovery *cp = data;
5932 struct mgmt_pending_cmd *cmd;
5933 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5934 u16 uuid_count, expected_len;
5935 u8 status;
5936 int err;
5938 bt_dev_dbg(hdev, "sock %p", sk);
5940 hci_dev_lock(hdev);
5942 if (!hdev_is_powered(hdev)) {
5943 err = mgmt_cmd_complete(sk, hdev->id,
5944 MGMT_OP_START_SERVICE_DISCOVERY,
5945 MGMT_STATUS_NOT_POWERED,
5946 &cp->type, sizeof(cp->type));
5947 goto failed;
5950 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5951 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5952 err = mgmt_cmd_complete(sk, hdev->id,
5953 MGMT_OP_START_SERVICE_DISCOVERY,
5954 MGMT_STATUS_BUSY, &cp->type,
5955 sizeof(cp->type));
5956 goto failed;
5959 if (hdev->discovery_paused) {
5960 err = mgmt_cmd_complete(sk, hdev->id,
5961 MGMT_OP_START_SERVICE_DISCOVERY,
5962 MGMT_STATUS_BUSY, &cp->type,
5963 sizeof(cp->type));
5964 goto failed;
5967 uuid_count = __le16_to_cpu(cp->uuid_count);
5968 if (uuid_count > max_uuid_count) {
5969 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5970 uuid_count);
5971 err = mgmt_cmd_complete(sk, hdev->id,
5972 MGMT_OP_START_SERVICE_DISCOVERY,
5973 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5974 sizeof(cp->type));
5975 goto failed;
5978 expected_len = sizeof(*cp) + uuid_count * 16;
5979 if (expected_len != len) {
5980 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5981 expected_len, len);
5982 err = mgmt_cmd_complete(sk, hdev->id,
5983 MGMT_OP_START_SERVICE_DISCOVERY,
5984 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5985 sizeof(cp->type));
5986 goto failed;
5989 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5990 err = mgmt_cmd_complete(sk, hdev->id,
5991 MGMT_OP_START_SERVICE_DISCOVERY,
5992 status, &cp->type, sizeof(cp->type));
5993 goto failed;
5996 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5997 hdev, data, len);
5998 if (!cmd) {
5999 err = -ENOMEM;
6000 goto failed;
6003 /* Clear the discovery filter first to free any previously
6004 * allocated memory for the UUID list.
6006 hci_discovery_filter_clear(hdev);
6008 hdev->discovery.result_filtering = true;
6009 hdev->discovery.type = cp->type;
6010 hdev->discovery.rssi = cp->rssi;
6011 hdev->discovery.uuid_count = uuid_count;
6013 if (uuid_count > 0) {
6014 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6015 GFP_KERNEL);
6016 if (!hdev->discovery.uuids) {
6017 err = mgmt_cmd_complete(sk, hdev->id,
6018 MGMT_OP_START_SERVICE_DISCOVERY,
6019 MGMT_STATUS_FAILED,
6020 &cp->type, sizeof(cp->type));
6021 mgmt_pending_remove(cmd);
6022 goto failed;
6026 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6027 start_discovery_complete);
6028 if (err < 0) {
6029 mgmt_pending_remove(cmd);
6030 goto failed;
6033 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6035 failed:
6036 hci_dev_unlock(hdev);
6037 return err;
6040 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6042 struct mgmt_pending_cmd *cmd;
6044 bt_dev_dbg(hdev, "status %u", status);
6046 hci_dev_lock(hdev);
6048 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6049 if (cmd) {
6050 cmd->cmd_complete(cmd, mgmt_status(status));
6051 mgmt_pending_remove(cmd);
6054 hci_dev_unlock(hdev);
6057 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6059 struct mgmt_pending_cmd *cmd = data;
6061 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6062 return;
6064 bt_dev_dbg(hdev, "err %d", err);
6066 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6067 cmd->param, 1);
6068 mgmt_pending_remove(cmd);
6070 if (!err)
6071 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6074 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6076 return hci_stop_discovery_sync(hdev);
6079 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6080 u16 len)
6082 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6083 struct mgmt_pending_cmd *cmd;
6084 int err;
6086 bt_dev_dbg(hdev, "sock %p", sk);
6088 hci_dev_lock(hdev);
6090 if (!hci_discovery_active(hdev)) {
6091 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6092 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6093 sizeof(mgmt_cp->type));
6094 goto unlock;
6097 if (hdev->discovery.type != mgmt_cp->type) {
6098 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6099 MGMT_STATUS_INVALID_PARAMS,
6100 &mgmt_cp->type, sizeof(mgmt_cp->type));
6101 goto unlock;
6104 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6105 if (!cmd) {
6106 err = -ENOMEM;
6107 goto unlock;
6110 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6111 stop_discovery_complete);
6112 if (err < 0) {
6113 mgmt_pending_remove(cmd);
6114 goto unlock;
6117 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6119 unlock:
6120 hci_dev_unlock(hdev);
6121 return err;
6124 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6125 u16 len)
6127 struct mgmt_cp_confirm_name *cp = data;
6128 struct inquiry_entry *e;
6129 int err;
6131 bt_dev_dbg(hdev, "sock %p", sk);
6133 hci_dev_lock(hdev);
6135 if (!hci_discovery_active(hdev)) {
6136 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6137 MGMT_STATUS_FAILED, &cp->addr,
6138 sizeof(cp->addr));
6139 goto failed;
6142 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6143 if (!e) {
6144 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6145 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6146 sizeof(cp->addr));
6147 goto failed;
6150 if (cp->name_known) {
6151 e->name_state = NAME_KNOWN;
6152 list_del(&e->list);
6153 } else {
6154 e->name_state = NAME_NEEDED;
6155 hci_inquiry_cache_update_resolve(hdev, e);
6158 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6159 &cp->addr, sizeof(cp->addr));
6161 failed:
6162 hci_dev_unlock(hdev);
6163 return err;
6166 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6167 u16 len)
6169 struct mgmt_cp_block_device *cp = data;
6170 u8 status;
6171 int err;
6173 bt_dev_dbg(hdev, "sock %p", sk);
6175 if (!bdaddr_type_is_valid(cp->addr.type))
6176 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6177 MGMT_STATUS_INVALID_PARAMS,
6178 &cp->addr, sizeof(cp->addr));
6180 hci_dev_lock(hdev);
6182 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6183 cp->addr.type);
6184 if (err < 0) {
6185 status = MGMT_STATUS_FAILED;
6186 goto done;
6189 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6190 sk);
6191 status = MGMT_STATUS_SUCCESS;
6193 done:
6194 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6195 &cp->addr, sizeof(cp->addr));
6197 hci_dev_unlock(hdev);
6199 return err;
6202 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6203 u16 len)
6205 struct mgmt_cp_unblock_device *cp = data;
6206 u8 status;
6207 int err;
6209 bt_dev_dbg(hdev, "sock %p", sk);
6211 if (!bdaddr_type_is_valid(cp->addr.type))
6212 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6213 MGMT_STATUS_INVALID_PARAMS,
6214 &cp->addr, sizeof(cp->addr));
6216 hci_dev_lock(hdev);
6218 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6219 cp->addr.type);
6220 if (err < 0) {
6221 status = MGMT_STATUS_INVALID_PARAMS;
6222 goto done;
6225 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6226 sk);
6227 status = MGMT_STATUS_SUCCESS;
6229 done:
6230 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6231 &cp->addr, sizeof(cp->addr));
6233 hci_dev_unlock(hdev);
6235 return err;
6238 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6240 return hci_update_eir_sync(hdev);
6243 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6244 u16 len)
6246 struct mgmt_cp_set_device_id *cp = data;
6247 int err;
6248 __u16 source;
6250 bt_dev_dbg(hdev, "sock %p", sk);
6252 source = __le16_to_cpu(cp->source);
6254 if (source > 0x0002)
6255 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6256 MGMT_STATUS_INVALID_PARAMS);
6258 hci_dev_lock(hdev);
6260 hdev->devid_source = source;
6261 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6262 hdev->devid_product = __le16_to_cpu(cp->product);
6263 hdev->devid_version = __le16_to_cpu(cp->version);
6265 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6266 NULL, 0);
6268 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6270 hci_dev_unlock(hdev);
6272 return err;
6275 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6277 if (err)
6278 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6279 else
6280 bt_dev_dbg(hdev, "status %d", err);
6283 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6285 struct cmd_lookup match = { NULL, hdev };
6286 u8 instance;
6287 struct adv_info *adv_instance;
6288 u8 status = mgmt_status(err);
6290 if (status) {
6291 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6292 cmd_status_rsp, &status);
6293 return;
6296 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6297 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6298 else
6299 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6301 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6302 &match);
6304 new_settings(hdev, match.sk);
6306 if (match.sk)
6307 sock_put(match.sk);
6309 /* If "Set Advertising" was just disabled and instance advertising was
6310 * set up earlier, then re-enable multi-instance advertising.
6312 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6313 list_empty(&hdev->adv_instances))
6314 return;
6316 instance = hdev->cur_adv_instance;
6317 if (!instance) {
6318 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6319 struct adv_info, list);
6320 if (!adv_instance)
6321 return;
6323 instance = adv_instance->instance;
6326 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6328 enable_advertising_instance(hdev, err);
6331 static int set_adv_sync(struct hci_dev *hdev, void *data)
6333 struct mgmt_pending_cmd *cmd = data;
6334 struct mgmt_mode *cp = cmd->param;
6335 u8 val = !!cp->val;
6337 if (cp->val == 0x02)
6338 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6339 else
6340 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6342 cancel_adv_timeout(hdev);
6344 if (val) {
6345 /* Switch to instance "0" for the Set Advertising setting.
6346 * We cannot use update_[adv|scan_rsp]_data() here as the
6347 * HCI_ADVERTISING flag is not yet set.
6349 hdev->cur_adv_instance = 0x00;
6351 if (ext_adv_capable(hdev)) {
6352 hci_start_ext_adv_sync(hdev, 0x00);
6353 } else {
6354 hci_update_adv_data_sync(hdev, 0x00);
6355 hci_update_scan_rsp_data_sync(hdev, 0x00);
6356 hci_enable_advertising_sync(hdev);
6358 } else {
6359 hci_disable_advertising_sync(hdev);
6362 return 0;
6365 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6366 u16 len)
6368 struct mgmt_mode *cp = data;
6369 struct mgmt_pending_cmd *cmd;
6370 u8 val, status;
6371 int err;
6373 bt_dev_dbg(hdev, "sock %p", sk);
6375 status = mgmt_le_support(hdev);
6376 if (status)
6377 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6378 status);
6380 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6381 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6382 MGMT_STATUS_INVALID_PARAMS);
6384 if (hdev->advertising_paused)
6385 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6386 MGMT_STATUS_BUSY);
6388 hci_dev_lock(hdev);
6390 val = !!cp->val;
6392 /* The following conditions are ones which mean that we should
6393 * not do any HCI communication but directly send a mgmt
6394 * response to user space (after toggling the flag if
6395 * necessary).
6397 if (!hdev_is_powered(hdev) ||
6398 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6399 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6400 hci_dev_test_flag(hdev, HCI_MESH) ||
6401 hci_conn_num(hdev, LE_LINK) > 0 ||
6402 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6403 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6404 bool changed;
6406 if (cp->val) {
6407 hdev->cur_adv_instance = 0x00;
6408 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6409 if (cp->val == 0x02)
6410 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6411 else
6412 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6413 } else {
6414 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6415 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6418 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6419 if (err < 0)
6420 goto unlock;
6422 if (changed)
6423 err = new_settings(hdev, sk);
6425 goto unlock;
6428 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6429 pending_find(MGMT_OP_SET_LE, hdev)) {
6430 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6431 MGMT_STATUS_BUSY);
6432 goto unlock;
6435 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6436 if (!cmd)
6437 err = -ENOMEM;
6438 else
6439 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6440 set_advertising_complete);
6442 if (err < 0 && cmd)
6443 mgmt_pending_remove(cmd);
6445 unlock:
6446 hci_dev_unlock(hdev);
6447 return err;
6450 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6451 void *data, u16 len)
6453 struct mgmt_cp_set_static_address *cp = data;
6454 int err;
6456 bt_dev_dbg(hdev, "sock %p", sk);
6458 if (!lmp_le_capable(hdev))
6459 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6460 MGMT_STATUS_NOT_SUPPORTED);
6462 if (hdev_is_powered(hdev))
6463 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6464 MGMT_STATUS_REJECTED);
6466 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6467 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6468 return mgmt_cmd_status(sk, hdev->id,
6469 MGMT_OP_SET_STATIC_ADDRESS,
6470 MGMT_STATUS_INVALID_PARAMS);
6472 /* Two most significant bits shall be set */
6473 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6474 return mgmt_cmd_status(sk, hdev->id,
6475 MGMT_OP_SET_STATIC_ADDRESS,
6476 MGMT_STATUS_INVALID_PARAMS);
6479 hci_dev_lock(hdev);
6481 bacpy(&hdev->static_addr, &cp->bdaddr);
6483 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6484 if (err < 0)
6485 goto unlock;
6487 err = new_settings(hdev, sk);
6489 unlock:
6490 hci_dev_unlock(hdev);
6491 return err;
6494 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6495 void *data, u16 len)
6497 struct mgmt_cp_set_scan_params *cp = data;
6498 __u16 interval, window;
6499 int err;
6501 bt_dev_dbg(hdev, "sock %p", sk);
6503 if (!lmp_le_capable(hdev))
6504 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6505 MGMT_STATUS_NOT_SUPPORTED);
6507 interval = __le16_to_cpu(cp->interval);
6509 if (interval < 0x0004 || interval > 0x4000)
6510 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6511 MGMT_STATUS_INVALID_PARAMS);
6513 window = __le16_to_cpu(cp->window);
6515 if (window < 0x0004 || window > 0x4000)
6516 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6517 MGMT_STATUS_INVALID_PARAMS);
6519 if (window > interval)
6520 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6521 MGMT_STATUS_INVALID_PARAMS);
6523 hci_dev_lock(hdev);
6525 hdev->le_scan_interval = interval;
6526 hdev->le_scan_window = window;
6528 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6529 NULL, 0);
6531 /* If background scan is running, restart it so new parameters are
6532 * loaded.
6534 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6535 hdev->discovery.state == DISCOVERY_STOPPED)
6536 hci_update_passive_scan(hdev);
6538 hci_dev_unlock(hdev);
6540 return err;
6543 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6545 struct mgmt_pending_cmd *cmd = data;
6547 bt_dev_dbg(hdev, "err %d", err);
6549 if (err) {
6550 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6551 mgmt_status(err));
6552 } else {
6553 struct mgmt_mode *cp = cmd->param;
6555 if (cp->val)
6556 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6557 else
6558 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6560 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6561 new_settings(hdev, cmd->sk);
6564 mgmt_pending_free(cmd);
6567 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6569 struct mgmt_pending_cmd *cmd = data;
6570 struct mgmt_mode *cp = cmd->param;
6572 return hci_write_fast_connectable_sync(hdev, cp->val);
6575 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6576 void *data, u16 len)
6578 struct mgmt_mode *cp = data;
6579 struct mgmt_pending_cmd *cmd;
6580 int err;
6582 bt_dev_dbg(hdev, "sock %p", sk);
6584 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6585 hdev->hci_ver < BLUETOOTH_VER_1_2)
6586 return mgmt_cmd_status(sk, hdev->id,
6587 MGMT_OP_SET_FAST_CONNECTABLE,
6588 MGMT_STATUS_NOT_SUPPORTED);
6590 if (cp->val != 0x00 && cp->val != 0x01)
6591 return mgmt_cmd_status(sk, hdev->id,
6592 MGMT_OP_SET_FAST_CONNECTABLE,
6593 MGMT_STATUS_INVALID_PARAMS);
6595 hci_dev_lock(hdev);
6597 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6598 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6599 goto unlock;
6602 if (!hdev_is_powered(hdev)) {
6603 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6604 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6605 new_settings(hdev, sk);
6606 goto unlock;
6609 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6610 len);
6611 if (!cmd)
6612 err = -ENOMEM;
6613 else
6614 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6615 fast_connectable_complete);
6617 if (err < 0) {
6618 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6619 MGMT_STATUS_FAILED);
6621 if (cmd)
6622 mgmt_pending_free(cmd);
6625 unlock:
6626 hci_dev_unlock(hdev);
6628 return err;
6631 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6633 struct mgmt_pending_cmd *cmd = data;
6635 bt_dev_dbg(hdev, "err %d", err);
6637 if (err) {
6638 u8 mgmt_err = mgmt_status(err);
6640 /* We need to restore the flag if related HCI commands
6641 * failed.
6643 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6645 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6646 } else {
6647 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6648 new_settings(hdev, cmd->sk);
6651 mgmt_pending_free(cmd);
6654 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6656 int status;
6658 status = hci_write_fast_connectable_sync(hdev, false);
6660 if (!status)
6661 status = hci_update_scan_sync(hdev);
6663 /* Since only the advertising data flags will change, there
6664 * is no need to update the scan response data.
6666 if (!status)
6667 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6669 return status;
6672 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6674 struct mgmt_mode *cp = data;
6675 struct mgmt_pending_cmd *cmd;
6676 int err;
6678 bt_dev_dbg(hdev, "sock %p", sk);
6680 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6681 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6682 MGMT_STATUS_NOT_SUPPORTED);
6684 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6685 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6686 MGMT_STATUS_REJECTED);
6688 if (cp->val != 0x00 && cp->val != 0x01)
6689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6690 MGMT_STATUS_INVALID_PARAMS);
6692 hci_dev_lock(hdev);
6694 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6695 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6696 goto unlock;
6699 if (!hdev_is_powered(hdev)) {
6700 if (!cp->val) {
6701 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6702 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6703 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6704 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6707 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6709 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6710 if (err < 0)
6711 goto unlock;
6713 err = new_settings(hdev, sk);
6714 goto unlock;
6717 /* Reject disabling when powered on */
6718 if (!cp->val) {
6719 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6720 MGMT_STATUS_REJECTED);
6721 goto unlock;
6722 } else {
6723 /* When configuring a dual-mode controller to operate
6724 * with LE only and using a static address, then switching
6725 * BR/EDR back on is not allowed.
6727 * Dual-mode controllers shall operate with the public
6728 * address as its identity address for BR/EDR and LE. So
6729 * reject the attempt to create an invalid configuration.
6731 * The same restrictions applies when secure connections
6732 * has been enabled. For BR/EDR this is a controller feature
6733 * while for LE it is a host stack feature. This means that
6734 * switching BR/EDR back on when secure connections has been
6735 * enabled is not a supported transaction.
6737 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6738 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6739 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6740 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6741 MGMT_STATUS_REJECTED);
6742 goto unlock;
6746 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6747 if (!cmd)
6748 err = -ENOMEM;
6749 else
6750 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6751 set_bredr_complete);
6753 if (err < 0) {
6754 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6755 MGMT_STATUS_FAILED);
6756 if (cmd)
6757 mgmt_pending_free(cmd);
6759 goto unlock;
6762 /* We need to flip the bit already here so that
6763 * hci_req_update_adv_data generates the correct flags.
6765 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6767 unlock:
6768 hci_dev_unlock(hdev);
6769 return err;
6772 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6774 struct mgmt_pending_cmd *cmd = data;
6775 struct mgmt_mode *cp;
6777 bt_dev_dbg(hdev, "err %d", err);
6779 if (err) {
6780 u8 mgmt_err = mgmt_status(err);
6782 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6783 goto done;
6786 cp = cmd->param;
6788 switch (cp->val) {
6789 case 0x00:
6790 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6791 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6792 break;
6793 case 0x01:
6794 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6795 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6796 break;
6797 case 0x02:
6798 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6799 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6800 break;
6803 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6804 new_settings(hdev, cmd->sk);
6806 done:
6807 mgmt_pending_free(cmd);
6810 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6812 struct mgmt_pending_cmd *cmd = data;
6813 struct mgmt_mode *cp = cmd->param;
6814 u8 val = !!cp->val;
6816 /* Force write of val */
6817 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6819 return hci_write_sc_support_sync(hdev, val);
6822 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6823 void *data, u16 len)
6825 struct mgmt_mode *cp = data;
6826 struct mgmt_pending_cmd *cmd;
6827 u8 val;
6828 int err;
6830 bt_dev_dbg(hdev, "sock %p", sk);
6832 if (!lmp_sc_capable(hdev) &&
6833 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6834 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6835 MGMT_STATUS_NOT_SUPPORTED);
6837 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6838 lmp_sc_capable(hdev) &&
6839 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6840 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6841 MGMT_STATUS_REJECTED);
6843 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6845 MGMT_STATUS_INVALID_PARAMS);
6847 hci_dev_lock(hdev);
6849 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6850 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6851 bool changed;
6853 if (cp->val) {
6854 changed = !hci_dev_test_and_set_flag(hdev,
6855 HCI_SC_ENABLED);
6856 if (cp->val == 0x02)
6857 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6858 else
6859 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6860 } else {
6861 changed = hci_dev_test_and_clear_flag(hdev,
6862 HCI_SC_ENABLED);
6863 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6866 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6867 if (err < 0)
6868 goto failed;
6870 if (changed)
6871 err = new_settings(hdev, sk);
6873 goto failed;
6876 val = !!cp->val;
6878 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6879 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6880 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6881 goto failed;
6884 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6885 if (!cmd)
6886 err = -ENOMEM;
6887 else
6888 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6889 set_secure_conn_complete);
6891 if (err < 0) {
6892 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6893 MGMT_STATUS_FAILED);
6894 if (cmd)
6895 mgmt_pending_free(cmd);
6898 failed:
6899 hci_dev_unlock(hdev);
6900 return err;
6903 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6904 void *data, u16 len)
6906 struct mgmt_mode *cp = data;
6907 bool changed, use_changed;
6908 int err;
6910 bt_dev_dbg(hdev, "sock %p", sk);
6912 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6913 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6914 MGMT_STATUS_INVALID_PARAMS);
6916 hci_dev_lock(hdev);
6918 if (cp->val)
6919 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6920 else
6921 changed = hci_dev_test_and_clear_flag(hdev,
6922 HCI_KEEP_DEBUG_KEYS);
6924 if (cp->val == 0x02)
6925 use_changed = !hci_dev_test_and_set_flag(hdev,
6926 HCI_USE_DEBUG_KEYS);
6927 else
6928 use_changed = hci_dev_test_and_clear_flag(hdev,
6929 HCI_USE_DEBUG_KEYS);
6931 if (hdev_is_powered(hdev) && use_changed &&
6932 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6933 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6934 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6935 sizeof(mode), &mode);
6938 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6939 if (err < 0)
6940 goto unlock;
6942 if (changed)
6943 err = new_settings(hdev, sk);
6945 unlock:
6946 hci_dev_unlock(hdev);
6947 return err;
6950 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6951 u16 len)
6953 struct mgmt_cp_set_privacy *cp = cp_data;
6954 bool changed;
6955 int err;
6957 bt_dev_dbg(hdev, "sock %p", sk);
6959 if (!lmp_le_capable(hdev))
6960 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6961 MGMT_STATUS_NOT_SUPPORTED);
6963 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6965 MGMT_STATUS_INVALID_PARAMS);
6967 if (hdev_is_powered(hdev))
6968 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6969 MGMT_STATUS_REJECTED);
6971 hci_dev_lock(hdev);
6973 /* If user space supports this command it is also expected to
6974 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6976 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6978 if (cp->privacy) {
6979 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6980 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6981 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6982 hci_adv_instances_set_rpa_expired(hdev, true);
6983 if (cp->privacy == 0x02)
6984 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6985 else
6986 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6987 } else {
6988 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6989 memset(hdev->irk, 0, sizeof(hdev->irk));
6990 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6991 hci_adv_instances_set_rpa_expired(hdev, false);
6992 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6995 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6996 if (err < 0)
6997 goto unlock;
6999 if (changed)
7000 err = new_settings(hdev, sk);
7002 unlock:
7003 hci_dev_unlock(hdev);
7004 return err;
7007 static bool irk_is_valid(struct mgmt_irk_info *irk)
7009 switch (irk->addr.type) {
7010 case BDADDR_LE_PUBLIC:
7011 return true;
7013 case BDADDR_LE_RANDOM:
7014 /* Two most significant bits shall be set */
7015 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7016 return false;
7017 return true;
7020 return false;
7023 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7024 u16 len)
7026 struct mgmt_cp_load_irks *cp = cp_data;
7027 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7028 sizeof(struct mgmt_irk_info));
7029 u16 irk_count, expected_len;
7030 int i, err;
7032 bt_dev_dbg(hdev, "sock %p", sk);
7034 if (!lmp_le_capable(hdev))
7035 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7036 MGMT_STATUS_NOT_SUPPORTED);
7038 irk_count = __le16_to_cpu(cp->irk_count);
7039 if (irk_count > max_irk_count) {
7040 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7041 irk_count);
7042 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7043 MGMT_STATUS_INVALID_PARAMS);
7046 expected_len = struct_size(cp, irks, irk_count);
7047 if (expected_len != len) {
7048 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7049 expected_len, len);
7050 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7051 MGMT_STATUS_INVALID_PARAMS);
7054 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7056 for (i = 0; i < irk_count; i++) {
7057 struct mgmt_irk_info *key = &cp->irks[i];
7059 if (!irk_is_valid(key))
7060 return mgmt_cmd_status(sk, hdev->id,
7061 MGMT_OP_LOAD_IRKS,
7062 MGMT_STATUS_INVALID_PARAMS);
7065 hci_dev_lock(hdev);
7067 hci_smp_irks_clear(hdev);
7069 for (i = 0; i < irk_count; i++) {
7070 struct mgmt_irk_info *irk = &cp->irks[i];
7071 u8 addr_type = le_addr_type(irk->addr.type);
7073 if (hci_is_blocked_key(hdev,
7074 HCI_BLOCKED_KEY_TYPE_IRK,
7075 irk->val)) {
7076 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7077 &irk->addr.bdaddr);
7078 continue;
7081 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7082 if (irk->addr.type == BDADDR_BREDR)
7083 addr_type = BDADDR_BREDR;
7085 hci_add_irk(hdev, &irk->addr.bdaddr,
7086 addr_type, irk->val,
7087 BDADDR_ANY);
7090 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7092 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7094 hci_dev_unlock(hdev);
7096 return err;
7099 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7101 if (key->initiator != 0x00 && key->initiator != 0x01)
7102 return false;
7104 switch (key->addr.type) {
7105 case BDADDR_LE_PUBLIC:
7106 return true;
7108 case BDADDR_LE_RANDOM:
7109 /* Two most significant bits shall be set */
7110 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7111 return false;
7112 return true;
7115 return false;
7118 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7119 void *cp_data, u16 len)
7121 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7122 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7123 sizeof(struct mgmt_ltk_info));
7124 u16 key_count, expected_len;
7125 int i, err;
7127 bt_dev_dbg(hdev, "sock %p", sk);
7129 if (!lmp_le_capable(hdev))
7130 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7131 MGMT_STATUS_NOT_SUPPORTED);
7133 key_count = __le16_to_cpu(cp->key_count);
7134 if (key_count > max_key_count) {
7135 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7136 key_count);
7137 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7138 MGMT_STATUS_INVALID_PARAMS);
7141 expected_len = struct_size(cp, keys, key_count);
7142 if (expected_len != len) {
7143 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7144 expected_len, len);
7145 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7146 MGMT_STATUS_INVALID_PARAMS);
7149 bt_dev_dbg(hdev, "key_count %u", key_count);
7151 for (i = 0; i < key_count; i++) {
7152 struct mgmt_ltk_info *key = &cp->keys[i];
7154 if (!ltk_is_valid(key))
7155 return mgmt_cmd_status(sk, hdev->id,
7156 MGMT_OP_LOAD_LONG_TERM_KEYS,
7157 MGMT_STATUS_INVALID_PARAMS);
7160 hci_dev_lock(hdev);
7162 hci_smp_ltks_clear(hdev);
7164 for (i = 0; i < key_count; i++) {
7165 struct mgmt_ltk_info *key = &cp->keys[i];
7166 u8 type, authenticated;
7167 u8 addr_type = le_addr_type(key->addr.type);
7169 if (hci_is_blocked_key(hdev,
7170 HCI_BLOCKED_KEY_TYPE_LTK,
7171 key->val)) {
7172 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7173 &key->addr.bdaddr);
7174 continue;
7177 switch (key->type) {
7178 case MGMT_LTK_UNAUTHENTICATED:
7179 authenticated = 0x00;
7180 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7181 break;
7182 case MGMT_LTK_AUTHENTICATED:
7183 authenticated = 0x01;
7184 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7185 break;
7186 case MGMT_LTK_P256_UNAUTH:
7187 authenticated = 0x00;
7188 type = SMP_LTK_P256;
7189 break;
7190 case MGMT_LTK_P256_AUTH:
7191 authenticated = 0x01;
7192 type = SMP_LTK_P256;
7193 break;
7194 case MGMT_LTK_P256_DEBUG:
7195 authenticated = 0x00;
7196 type = SMP_LTK_P256_DEBUG;
7197 fallthrough;
7198 default:
7199 continue;
7202 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7203 if (key->addr.type == BDADDR_BREDR)
7204 addr_type = BDADDR_BREDR;
7206 hci_add_ltk(hdev, &key->addr.bdaddr,
7207 addr_type, type, authenticated,
7208 key->val, key->enc_size, key->ediv, key->rand);
7211 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7212 NULL, 0);
7214 hci_dev_unlock(hdev);
7216 return err;
7219 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7221 struct mgmt_pending_cmd *cmd = data;
7222 struct hci_conn *conn = cmd->user_data;
7223 struct mgmt_cp_get_conn_info *cp = cmd->param;
7224 struct mgmt_rp_get_conn_info rp;
7225 u8 status;
7227 bt_dev_dbg(hdev, "err %d", err);
7229 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7231 status = mgmt_status(err);
7232 if (status == MGMT_STATUS_SUCCESS) {
7233 rp.rssi = conn->rssi;
7234 rp.tx_power = conn->tx_power;
7235 rp.max_tx_power = conn->max_tx_power;
7236 } else {
7237 rp.rssi = HCI_RSSI_INVALID;
7238 rp.tx_power = HCI_TX_POWER_INVALID;
7239 rp.max_tx_power = HCI_TX_POWER_INVALID;
7242 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7243 &rp, sizeof(rp));
7245 mgmt_pending_free(cmd);
7248 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7250 struct mgmt_pending_cmd *cmd = data;
7251 struct mgmt_cp_get_conn_info *cp = cmd->param;
7252 struct hci_conn *conn;
7253 int err;
7254 __le16 handle;
7256 /* Make sure we are still connected */
7257 if (cp->addr.type == BDADDR_BREDR)
7258 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7259 &cp->addr.bdaddr);
7260 else
7261 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7263 if (!conn || conn->state != BT_CONNECTED)
7264 return MGMT_STATUS_NOT_CONNECTED;
7266 cmd->user_data = conn;
7267 handle = cpu_to_le16(conn->handle);
7269 /* Refresh RSSI each time */
7270 err = hci_read_rssi_sync(hdev, handle);
7272 /* For LE links TX power does not change thus we don't need to
7273 * query for it once value is known.
7275 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7276 conn->tx_power == HCI_TX_POWER_INVALID))
7277 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7279 /* Max TX power needs to be read only once per connection */
7280 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7281 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7283 return err;
7286 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7287 u16 len)
7289 struct mgmt_cp_get_conn_info *cp = data;
7290 struct mgmt_rp_get_conn_info rp;
7291 struct hci_conn *conn;
7292 unsigned long conn_info_age;
7293 int err = 0;
7295 bt_dev_dbg(hdev, "sock %p", sk);
7297 memset(&rp, 0, sizeof(rp));
7298 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7299 rp.addr.type = cp->addr.type;
7301 if (!bdaddr_type_is_valid(cp->addr.type))
7302 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7303 MGMT_STATUS_INVALID_PARAMS,
7304 &rp, sizeof(rp));
7306 hci_dev_lock(hdev);
7308 if (!hdev_is_powered(hdev)) {
7309 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7310 MGMT_STATUS_NOT_POWERED, &rp,
7311 sizeof(rp));
7312 goto unlock;
7315 if (cp->addr.type == BDADDR_BREDR)
7316 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7317 &cp->addr.bdaddr);
7318 else
7319 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7321 if (!conn || conn->state != BT_CONNECTED) {
7322 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7323 MGMT_STATUS_NOT_CONNECTED, &rp,
7324 sizeof(rp));
7325 goto unlock;
7328 /* To avoid client trying to guess when to poll again for information we
7329 * calculate conn info age as random value between min/max set in hdev.
7331 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7332 hdev->conn_info_max_age - 1);
7334 /* Query controller to refresh cached values if they are too old or were
7335 * never read.
7337 if (time_after(jiffies, conn->conn_info_timestamp +
7338 msecs_to_jiffies(conn_info_age)) ||
7339 !conn->conn_info_timestamp) {
7340 struct mgmt_pending_cmd *cmd;
7342 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7343 len);
7344 if (!cmd) {
7345 err = -ENOMEM;
7346 } else {
7347 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7348 cmd, get_conn_info_complete);
7351 if (err < 0) {
7352 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7353 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7355 if (cmd)
7356 mgmt_pending_free(cmd);
7358 goto unlock;
7361 conn->conn_info_timestamp = jiffies;
7362 } else {
7363 /* Cache is valid, just reply with values cached in hci_conn */
7364 rp.rssi = conn->rssi;
7365 rp.tx_power = conn->tx_power;
7366 rp.max_tx_power = conn->max_tx_power;
7368 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7369 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7372 unlock:
7373 hci_dev_unlock(hdev);
7374 return err;
7377 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7379 struct mgmt_pending_cmd *cmd = data;
7380 struct mgmt_cp_get_clock_info *cp = cmd->param;
7381 struct mgmt_rp_get_clock_info rp;
7382 struct hci_conn *conn = cmd->user_data;
7383 u8 status = mgmt_status(err);
7385 bt_dev_dbg(hdev, "err %d", err);
7387 memset(&rp, 0, sizeof(rp));
7388 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7389 rp.addr.type = cp->addr.type;
7391 if (err)
7392 goto complete;
7394 rp.local_clock = cpu_to_le32(hdev->clock);
7396 if (conn) {
7397 rp.piconet_clock = cpu_to_le32(conn->clock);
7398 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7401 complete:
7402 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7403 sizeof(rp));
7405 mgmt_pending_free(cmd);
7408 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7410 struct mgmt_pending_cmd *cmd = data;
7411 struct mgmt_cp_get_clock_info *cp = cmd->param;
7412 struct hci_cp_read_clock hci_cp;
7413 struct hci_conn *conn;
7415 memset(&hci_cp, 0, sizeof(hci_cp));
7416 hci_read_clock_sync(hdev, &hci_cp);
7418 /* Make sure connection still exists */
7419 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7420 if (!conn || conn->state != BT_CONNECTED)
7421 return MGMT_STATUS_NOT_CONNECTED;
7423 cmd->user_data = conn;
7424 hci_cp.handle = cpu_to_le16(conn->handle);
7425 hci_cp.which = 0x01; /* Piconet clock */
7427 return hci_read_clock_sync(hdev, &hci_cp);
7430 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7431 u16 len)
7433 struct mgmt_cp_get_clock_info *cp = data;
7434 struct mgmt_rp_get_clock_info rp;
7435 struct mgmt_pending_cmd *cmd;
7436 struct hci_conn *conn;
7437 int err;
7439 bt_dev_dbg(hdev, "sock %p", sk);
7441 memset(&rp, 0, sizeof(rp));
7442 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7443 rp.addr.type = cp->addr.type;
7445 if (cp->addr.type != BDADDR_BREDR)
7446 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7447 MGMT_STATUS_INVALID_PARAMS,
7448 &rp, sizeof(rp));
7450 hci_dev_lock(hdev);
7452 if (!hdev_is_powered(hdev)) {
7453 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7454 MGMT_STATUS_NOT_POWERED, &rp,
7455 sizeof(rp));
7456 goto unlock;
7459 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7460 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7461 &cp->addr.bdaddr);
7462 if (!conn || conn->state != BT_CONNECTED) {
7463 err = mgmt_cmd_complete(sk, hdev->id,
7464 MGMT_OP_GET_CLOCK_INFO,
7465 MGMT_STATUS_NOT_CONNECTED,
7466 &rp, sizeof(rp));
7467 goto unlock;
7469 } else {
7470 conn = NULL;
7473 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7474 if (!cmd)
7475 err = -ENOMEM;
7476 else
7477 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7478 get_clock_info_complete);
7480 if (err < 0) {
7481 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7482 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7484 if (cmd)
7485 mgmt_pending_free(cmd);
7489 unlock:
7490 hci_dev_unlock(hdev);
7491 return err;
7494 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7496 struct hci_conn *conn;
7498 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7499 if (!conn)
7500 return false;
7502 if (conn->dst_type != type)
7503 return false;
7505 if (conn->state != BT_CONNECTED)
7506 return false;
7508 return true;
7511 /* This function requires the caller holds hdev->lock */
7512 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7513 u8 addr_type, u8 auto_connect)
7515 struct hci_conn_params *params;
7517 params = hci_conn_params_add(hdev, addr, addr_type);
7518 if (!params)
7519 return -EIO;
7521 if (params->auto_connect == auto_connect)
7522 return 0;
7524 hci_pend_le_list_del_init(params);
7526 switch (auto_connect) {
7527 case HCI_AUTO_CONN_DISABLED:
7528 case HCI_AUTO_CONN_LINK_LOSS:
7529 /* If auto connect is being disabled when we're trying to
7530 * connect to device, keep connecting.
7532 if (params->explicit_connect)
7533 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7534 break;
7535 case HCI_AUTO_CONN_REPORT:
7536 if (params->explicit_connect)
7537 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7538 else
7539 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7540 break;
7541 case HCI_AUTO_CONN_DIRECT:
7542 case HCI_AUTO_CONN_ALWAYS:
7543 if (!is_connected(hdev, addr, addr_type))
7544 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7545 break;
7548 params->auto_connect = auto_connect;
7550 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7551 addr, addr_type, auto_connect);
7553 return 0;
7556 static void device_added(struct sock *sk, struct hci_dev *hdev,
7557 bdaddr_t *bdaddr, u8 type, u8 action)
7559 struct mgmt_ev_device_added ev;
7561 bacpy(&ev.addr.bdaddr, bdaddr);
7562 ev.addr.type = type;
7563 ev.action = action;
7565 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7568 static int add_device_sync(struct hci_dev *hdev, void *data)
7570 return hci_update_passive_scan_sync(hdev);
7573 static int add_device(struct sock *sk, struct hci_dev *hdev,
7574 void *data, u16 len)
7576 struct mgmt_cp_add_device *cp = data;
7577 u8 auto_conn, addr_type;
7578 struct hci_conn_params *params;
7579 int err;
7580 u32 current_flags = 0;
7581 u32 supported_flags;
7583 bt_dev_dbg(hdev, "sock %p", sk);
7585 if (!bdaddr_type_is_valid(cp->addr.type) ||
7586 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7587 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7588 MGMT_STATUS_INVALID_PARAMS,
7589 &cp->addr, sizeof(cp->addr));
7591 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7592 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7593 MGMT_STATUS_INVALID_PARAMS,
7594 &cp->addr, sizeof(cp->addr));
7596 hci_dev_lock(hdev);
7598 if (cp->addr.type == BDADDR_BREDR) {
7599 /* Only incoming connections action is supported for now */
7600 if (cp->action != 0x01) {
7601 err = mgmt_cmd_complete(sk, hdev->id,
7602 MGMT_OP_ADD_DEVICE,
7603 MGMT_STATUS_INVALID_PARAMS,
7604 &cp->addr, sizeof(cp->addr));
7605 goto unlock;
7608 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7609 &cp->addr.bdaddr,
7610 cp->addr.type, 0);
7611 if (err)
7612 goto unlock;
7614 hci_update_scan(hdev);
7616 goto added;
7619 addr_type = le_addr_type(cp->addr.type);
7621 if (cp->action == 0x02)
7622 auto_conn = HCI_AUTO_CONN_ALWAYS;
7623 else if (cp->action == 0x01)
7624 auto_conn = HCI_AUTO_CONN_DIRECT;
7625 else
7626 auto_conn = HCI_AUTO_CONN_REPORT;
7628 /* Kernel internally uses conn_params with resolvable private
7629 * address, but Add Device allows only identity addresses.
7630 * Make sure it is enforced before calling
7631 * hci_conn_params_lookup.
7633 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7634 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7635 MGMT_STATUS_INVALID_PARAMS,
7636 &cp->addr, sizeof(cp->addr));
7637 goto unlock;
7640 /* If the connection parameters don't exist for this device,
7641 * they will be created and configured with defaults.
7643 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7644 auto_conn) < 0) {
7645 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7646 MGMT_STATUS_FAILED, &cp->addr,
7647 sizeof(cp->addr));
7648 goto unlock;
7649 } else {
7650 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7651 addr_type);
7652 if (params)
7653 current_flags = params->flags;
7656 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7657 if (err < 0)
7658 goto unlock;
7660 added:
7661 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7662 supported_flags = hdev->conn_flags;
7663 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7664 supported_flags, current_flags);
7666 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7667 MGMT_STATUS_SUCCESS, &cp->addr,
7668 sizeof(cp->addr));
7670 unlock:
7671 hci_dev_unlock(hdev);
7672 return err;
7675 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7676 bdaddr_t *bdaddr, u8 type)
7678 struct mgmt_ev_device_removed ev;
7680 bacpy(&ev.addr.bdaddr, bdaddr);
7681 ev.addr.type = type;
7683 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7686 static int remove_device_sync(struct hci_dev *hdev, void *data)
7688 return hci_update_passive_scan_sync(hdev);
7691 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7692 void *data, u16 len)
7694 struct mgmt_cp_remove_device *cp = data;
7695 int err;
7697 bt_dev_dbg(hdev, "sock %p", sk);
7699 hci_dev_lock(hdev);
7701 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7702 struct hci_conn_params *params;
7703 u8 addr_type;
7705 if (!bdaddr_type_is_valid(cp->addr.type)) {
7706 err = mgmt_cmd_complete(sk, hdev->id,
7707 MGMT_OP_REMOVE_DEVICE,
7708 MGMT_STATUS_INVALID_PARAMS,
7709 &cp->addr, sizeof(cp->addr));
7710 goto unlock;
7713 if (cp->addr.type == BDADDR_BREDR) {
7714 err = hci_bdaddr_list_del(&hdev->accept_list,
7715 &cp->addr.bdaddr,
7716 cp->addr.type);
7717 if (err) {
7718 err = mgmt_cmd_complete(sk, hdev->id,
7719 MGMT_OP_REMOVE_DEVICE,
7720 MGMT_STATUS_INVALID_PARAMS,
7721 &cp->addr,
7722 sizeof(cp->addr));
7723 goto unlock;
7726 hci_update_scan(hdev);
7728 device_removed(sk, hdev, &cp->addr.bdaddr,
7729 cp->addr.type);
7730 goto complete;
7733 addr_type = le_addr_type(cp->addr.type);
7735 /* Kernel internally uses conn_params with resolvable private
7736 * address, but Remove Device allows only identity addresses.
7737 * Make sure it is enforced before calling
7738 * hci_conn_params_lookup.
7740 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7741 err = mgmt_cmd_complete(sk, hdev->id,
7742 MGMT_OP_REMOVE_DEVICE,
7743 MGMT_STATUS_INVALID_PARAMS,
7744 &cp->addr, sizeof(cp->addr));
7745 goto unlock;
7748 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7749 addr_type);
7750 if (!params) {
7751 err = mgmt_cmd_complete(sk, hdev->id,
7752 MGMT_OP_REMOVE_DEVICE,
7753 MGMT_STATUS_INVALID_PARAMS,
7754 &cp->addr, sizeof(cp->addr));
7755 goto unlock;
7758 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7759 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7760 err = mgmt_cmd_complete(sk, hdev->id,
7761 MGMT_OP_REMOVE_DEVICE,
7762 MGMT_STATUS_INVALID_PARAMS,
7763 &cp->addr, sizeof(cp->addr));
7764 goto unlock;
7767 hci_conn_params_free(params);
7769 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7770 } else {
7771 struct hci_conn_params *p, *tmp;
7772 struct bdaddr_list *b, *btmp;
7774 if (cp->addr.type) {
7775 err = mgmt_cmd_complete(sk, hdev->id,
7776 MGMT_OP_REMOVE_DEVICE,
7777 MGMT_STATUS_INVALID_PARAMS,
7778 &cp->addr, sizeof(cp->addr));
7779 goto unlock;
7782 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7783 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7784 list_del(&b->list);
7785 kfree(b);
7788 hci_update_scan(hdev);
7790 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7791 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7792 continue;
7793 device_removed(sk, hdev, &p->addr, p->addr_type);
7794 if (p->explicit_connect) {
7795 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7796 continue;
7798 hci_conn_params_free(p);
7801 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7804 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7806 complete:
7807 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7808 MGMT_STATUS_SUCCESS, &cp->addr,
7809 sizeof(cp->addr));
7810 unlock:
7811 hci_dev_unlock(hdev);
7812 return err;
7815 static int conn_update_sync(struct hci_dev *hdev, void *data)
7817 struct hci_conn_params *params = data;
7818 struct hci_conn *conn;
7820 conn = hci_conn_hash_lookup_le(hdev, &params->addr, params->addr_type);
7821 if (!conn)
7822 return -ECANCELED;
7824 return hci_le_conn_update_sync(hdev, conn, params);
7827 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7828 u16 len)
7830 struct mgmt_cp_load_conn_param *cp = data;
7831 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7832 sizeof(struct mgmt_conn_param));
7833 u16 param_count, expected_len;
7834 int i;
7836 if (!lmp_le_capable(hdev))
7837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7838 MGMT_STATUS_NOT_SUPPORTED);
7840 param_count = __le16_to_cpu(cp->param_count);
7841 if (param_count > max_param_count) {
7842 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7843 param_count);
7844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7845 MGMT_STATUS_INVALID_PARAMS);
7848 expected_len = struct_size(cp, params, param_count);
7849 if (expected_len != len) {
7850 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7851 expected_len, len);
7852 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7853 MGMT_STATUS_INVALID_PARAMS);
7856 bt_dev_dbg(hdev, "param_count %u", param_count);
7858 hci_dev_lock(hdev);
7860 if (param_count > 1)
7861 hci_conn_params_clear_disabled(hdev);
7863 for (i = 0; i < param_count; i++) {
7864 struct mgmt_conn_param *param = &cp->params[i];
7865 struct hci_conn_params *hci_param;
7866 u16 min, max, latency, timeout;
7867 bool update = false;
7868 u8 addr_type;
7870 bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7871 param->addr.type);
7873 if (param->addr.type == BDADDR_LE_PUBLIC) {
7874 addr_type = ADDR_LE_DEV_PUBLIC;
7875 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7876 addr_type = ADDR_LE_DEV_RANDOM;
7877 } else {
7878 bt_dev_err(hdev, "ignoring invalid connection parameters");
7879 continue;
7882 min = le16_to_cpu(param->min_interval);
7883 max = le16_to_cpu(param->max_interval);
7884 latency = le16_to_cpu(param->latency);
7885 timeout = le16_to_cpu(param->timeout);
7887 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7888 min, max, latency, timeout);
7890 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7891 bt_dev_err(hdev, "ignoring invalid connection parameters");
7892 continue;
7895 /* Detect when the loading is for an existing parameter then
7896 * attempt to trigger the connection update procedure.
7898 if (!i && param_count == 1) {
7899 hci_param = hci_conn_params_lookup(hdev,
7900 &param->addr.bdaddr,
7901 addr_type);
7902 if (hci_param)
7903 update = true;
7904 else
7905 hci_conn_params_clear_disabled(hdev);
7908 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7909 addr_type);
7910 if (!hci_param) {
7911 bt_dev_err(hdev, "failed to add connection parameters");
7912 continue;
7915 hci_param->conn_min_interval = min;
7916 hci_param->conn_max_interval = max;
7917 hci_param->conn_latency = latency;
7918 hci_param->supervision_timeout = timeout;
7920 /* Check if we need to trigger a connection update */
7921 if (update) {
7922 struct hci_conn *conn;
7924 /* Lookup for existing connection as central and check
7925 * if parameters match and if they don't then trigger
7926 * a connection update.
7928 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7929 addr_type);
7930 if (conn && conn->role == HCI_ROLE_MASTER &&
7931 (conn->le_conn_min_interval != min ||
7932 conn->le_conn_max_interval != max ||
7933 conn->le_conn_latency != latency ||
7934 conn->le_supv_timeout != timeout))
7935 hci_cmd_sync_queue(hdev, conn_update_sync,
7936 hci_param, NULL);
7940 hci_dev_unlock(hdev);
7942 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7943 NULL, 0);
7946 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7947 void *data, u16 len)
7949 struct mgmt_cp_set_external_config *cp = data;
7950 bool changed;
7951 int err;
7953 bt_dev_dbg(hdev, "sock %p", sk);
7955 if (hdev_is_powered(hdev))
7956 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7957 MGMT_STATUS_REJECTED);
7959 if (cp->config != 0x00 && cp->config != 0x01)
7960 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7961 MGMT_STATUS_INVALID_PARAMS);
7963 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7965 MGMT_STATUS_NOT_SUPPORTED);
7967 hci_dev_lock(hdev);
7969 if (cp->config)
7970 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7971 else
7972 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7974 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7975 if (err < 0)
7976 goto unlock;
7978 if (!changed)
7979 goto unlock;
7981 err = new_options(hdev, sk);
7983 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7984 mgmt_index_removed(hdev);
7986 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7987 hci_dev_set_flag(hdev, HCI_CONFIG);
7988 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7990 queue_work(hdev->req_workqueue, &hdev->power_on);
7991 } else {
7992 set_bit(HCI_RAW, &hdev->flags);
7993 mgmt_index_added(hdev);
7997 unlock:
7998 hci_dev_unlock(hdev);
7999 return err;
8002 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8003 void *data, u16 len)
8005 struct mgmt_cp_set_public_address *cp = data;
8006 bool changed;
8007 int err;
8009 bt_dev_dbg(hdev, "sock %p", sk);
8011 if (hdev_is_powered(hdev))
8012 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8013 MGMT_STATUS_REJECTED);
8015 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8016 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8017 MGMT_STATUS_INVALID_PARAMS);
8019 if (!hdev->set_bdaddr)
8020 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8021 MGMT_STATUS_NOT_SUPPORTED);
8023 hci_dev_lock(hdev);
8025 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8026 bacpy(&hdev->public_addr, &cp->bdaddr);
8028 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8029 if (err < 0)
8030 goto unlock;
8032 if (!changed)
8033 goto unlock;
8035 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8036 err = new_options(hdev, sk);
8038 if (is_configured(hdev)) {
8039 mgmt_index_removed(hdev);
8041 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8043 hci_dev_set_flag(hdev, HCI_CONFIG);
8044 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8046 queue_work(hdev->req_workqueue, &hdev->power_on);
8049 unlock:
8050 hci_dev_unlock(hdev);
8051 return err;
8054 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8055 int err)
8057 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8058 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8059 u8 *h192, *r192, *h256, *r256;
8060 struct mgmt_pending_cmd *cmd = data;
8061 struct sk_buff *skb = cmd->skb;
8062 u8 status = mgmt_status(err);
8063 u16 eir_len;
8065 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8066 return;
8068 if (!status) {
8069 if (!skb)
8070 status = MGMT_STATUS_FAILED;
8071 else if (IS_ERR(skb))
8072 status = mgmt_status(PTR_ERR(skb));
8073 else
8074 status = mgmt_status(skb->data[0]);
8077 bt_dev_dbg(hdev, "status %u", status);
8079 mgmt_cp = cmd->param;
8081 if (status) {
8082 status = mgmt_status(status);
8083 eir_len = 0;
8085 h192 = NULL;
8086 r192 = NULL;
8087 h256 = NULL;
8088 r256 = NULL;
8089 } else if (!bredr_sc_enabled(hdev)) {
8090 struct hci_rp_read_local_oob_data *rp;
8092 if (skb->len != sizeof(*rp)) {
8093 status = MGMT_STATUS_FAILED;
8094 eir_len = 0;
8095 } else {
8096 status = MGMT_STATUS_SUCCESS;
8097 rp = (void *)skb->data;
8099 eir_len = 5 + 18 + 18;
8100 h192 = rp->hash;
8101 r192 = rp->rand;
8102 h256 = NULL;
8103 r256 = NULL;
8105 } else {
8106 struct hci_rp_read_local_oob_ext_data *rp;
8108 if (skb->len != sizeof(*rp)) {
8109 status = MGMT_STATUS_FAILED;
8110 eir_len = 0;
8111 } else {
8112 status = MGMT_STATUS_SUCCESS;
8113 rp = (void *)skb->data;
8115 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8116 eir_len = 5 + 18 + 18;
8117 h192 = NULL;
8118 r192 = NULL;
8119 } else {
8120 eir_len = 5 + 18 + 18 + 18 + 18;
8121 h192 = rp->hash192;
8122 r192 = rp->rand192;
8125 h256 = rp->hash256;
8126 r256 = rp->rand256;
8130 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8131 if (!mgmt_rp)
8132 goto done;
8134 if (eir_len == 0)
8135 goto send_rsp;
8137 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8138 hdev->dev_class, 3);
8140 if (h192 && r192) {
8141 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8142 EIR_SSP_HASH_C192, h192, 16);
8143 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8144 EIR_SSP_RAND_R192, r192, 16);
8147 if (h256 && r256) {
8148 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8149 EIR_SSP_HASH_C256, h256, 16);
8150 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8151 EIR_SSP_RAND_R256, r256, 16);
8154 send_rsp:
8155 mgmt_rp->type = mgmt_cp->type;
8156 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8158 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8159 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8160 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8161 if (err < 0 || status)
8162 goto done;
8164 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8166 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8167 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8168 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8169 done:
8170 if (skb && !IS_ERR(skb))
8171 kfree_skb(skb);
8173 kfree(mgmt_rp);
8174 mgmt_pending_remove(cmd);
8177 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8178 struct mgmt_cp_read_local_oob_ext_data *cp)
8180 struct mgmt_pending_cmd *cmd;
8181 int err;
8183 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8184 cp, sizeof(*cp));
8185 if (!cmd)
8186 return -ENOMEM;
8188 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8189 read_local_oob_ext_data_complete);
8191 if (err < 0) {
8192 mgmt_pending_remove(cmd);
8193 return err;
8196 return 0;
8199 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8200 void *data, u16 data_len)
8202 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8203 struct mgmt_rp_read_local_oob_ext_data *rp;
8204 size_t rp_len;
8205 u16 eir_len;
8206 u8 status, flags, role, addr[7], hash[16], rand[16];
8207 int err;
8209 bt_dev_dbg(hdev, "sock %p", sk);
8211 if (hdev_is_powered(hdev)) {
8212 switch (cp->type) {
8213 case BIT(BDADDR_BREDR):
8214 status = mgmt_bredr_support(hdev);
8215 if (status)
8216 eir_len = 0;
8217 else
8218 eir_len = 5;
8219 break;
8220 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8221 status = mgmt_le_support(hdev);
8222 if (status)
8223 eir_len = 0;
8224 else
8225 eir_len = 9 + 3 + 18 + 18 + 3;
8226 break;
8227 default:
8228 status = MGMT_STATUS_INVALID_PARAMS;
8229 eir_len = 0;
8230 break;
8232 } else {
8233 status = MGMT_STATUS_NOT_POWERED;
8234 eir_len = 0;
8237 rp_len = sizeof(*rp) + eir_len;
8238 rp = kmalloc(rp_len, GFP_ATOMIC);
8239 if (!rp)
8240 return -ENOMEM;
8242 if (!status && !lmp_ssp_capable(hdev)) {
8243 status = MGMT_STATUS_NOT_SUPPORTED;
8244 eir_len = 0;
8247 if (status)
8248 goto complete;
8250 hci_dev_lock(hdev);
8252 eir_len = 0;
8253 switch (cp->type) {
8254 case BIT(BDADDR_BREDR):
8255 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8256 err = read_local_ssp_oob_req(hdev, sk, cp);
8257 hci_dev_unlock(hdev);
8258 if (!err)
8259 goto done;
8261 status = MGMT_STATUS_FAILED;
8262 goto complete;
8263 } else {
8264 eir_len = eir_append_data(rp->eir, eir_len,
8265 EIR_CLASS_OF_DEV,
8266 hdev->dev_class, 3);
8268 break;
8269 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8270 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8271 smp_generate_oob(hdev, hash, rand) < 0) {
8272 hci_dev_unlock(hdev);
8273 status = MGMT_STATUS_FAILED;
8274 goto complete;
8277 /* This should return the active RPA, but since the RPA
8278 * is only programmed on demand, it is really hard to fill
8279 * this in at the moment. For now disallow retrieving
8280 * local out-of-band data when privacy is in use.
8282 * Returning the identity address will not help here since
8283 * pairing happens before the identity resolving key is
8284 * known and thus the connection establishment happens
8285 * based on the RPA and not the identity address.
8287 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8288 hci_dev_unlock(hdev);
8289 status = MGMT_STATUS_REJECTED;
8290 goto complete;
8293 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8294 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8295 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8296 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8297 memcpy(addr, &hdev->static_addr, 6);
8298 addr[6] = 0x01;
8299 } else {
8300 memcpy(addr, &hdev->bdaddr, 6);
8301 addr[6] = 0x00;
8304 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8305 addr, sizeof(addr));
8307 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8308 role = 0x02;
8309 else
8310 role = 0x01;
8312 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8313 &role, sizeof(role));
8315 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8316 eir_len = eir_append_data(rp->eir, eir_len,
8317 EIR_LE_SC_CONFIRM,
8318 hash, sizeof(hash));
8320 eir_len = eir_append_data(rp->eir, eir_len,
8321 EIR_LE_SC_RANDOM,
8322 rand, sizeof(rand));
8325 flags = mgmt_get_adv_discov_flags(hdev);
8327 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8328 flags |= LE_AD_NO_BREDR;
8330 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8331 &flags, sizeof(flags));
8332 break;
8335 hci_dev_unlock(hdev);
8337 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8339 status = MGMT_STATUS_SUCCESS;
8341 complete:
8342 rp->type = cp->type;
8343 rp->eir_len = cpu_to_le16(eir_len);
8345 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8346 status, rp, sizeof(*rp) + eir_len);
8347 if (err < 0 || status)
8348 goto done;
8350 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8351 rp, sizeof(*rp) + eir_len,
8352 HCI_MGMT_OOB_DATA_EVENTS, sk);
8354 done:
8355 kfree(rp);
8357 return err;
8360 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8362 u32 flags = 0;
8364 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8365 flags |= MGMT_ADV_FLAG_DISCOV;
8366 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8367 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8368 flags |= MGMT_ADV_FLAG_APPEARANCE;
8369 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8370 flags |= MGMT_ADV_PARAM_DURATION;
8371 flags |= MGMT_ADV_PARAM_TIMEOUT;
8372 flags |= MGMT_ADV_PARAM_INTERVALS;
8373 flags |= MGMT_ADV_PARAM_TX_POWER;
8374 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8376 /* In extended adv TX_POWER returned from Set Adv Param
8377 * will be always valid.
8379 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8380 flags |= MGMT_ADV_FLAG_TX_POWER;
8382 if (ext_adv_capable(hdev)) {
8383 flags |= MGMT_ADV_FLAG_SEC_1M;
8384 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8385 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8387 if (le_2m_capable(hdev))
8388 flags |= MGMT_ADV_FLAG_SEC_2M;
8390 if (le_coded_capable(hdev))
8391 flags |= MGMT_ADV_FLAG_SEC_CODED;
8394 return flags;
8397 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8398 void *data, u16 data_len)
8400 struct mgmt_rp_read_adv_features *rp;
8401 size_t rp_len;
8402 int err;
8403 struct adv_info *adv_instance;
8404 u32 supported_flags;
8405 u8 *instance;
8407 bt_dev_dbg(hdev, "sock %p", sk);
8409 if (!lmp_le_capable(hdev))
8410 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8411 MGMT_STATUS_REJECTED);
8413 hci_dev_lock(hdev);
8415 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8416 rp = kmalloc(rp_len, GFP_ATOMIC);
8417 if (!rp) {
8418 hci_dev_unlock(hdev);
8419 return -ENOMEM;
8422 supported_flags = get_supported_adv_flags(hdev);
8424 rp->supported_flags = cpu_to_le32(supported_flags);
8425 rp->max_adv_data_len = max_adv_len(hdev);
8426 rp->max_scan_rsp_len = max_adv_len(hdev);
8427 rp->max_instances = hdev->le_num_of_adv_sets;
8428 rp->num_instances = hdev->adv_instance_cnt;
8430 instance = rp->instance;
8431 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8432 /* Only instances 1-le_num_of_adv_sets are externally visible */
8433 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8434 *instance = adv_instance->instance;
8435 instance++;
8436 } else {
8437 rp->num_instances--;
8438 rp_len--;
8442 hci_dev_unlock(hdev);
8444 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8445 MGMT_STATUS_SUCCESS, rp, rp_len);
8447 kfree(rp);
8449 return err;
8452 static u8 calculate_name_len(struct hci_dev *hdev)
8454 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8456 return eir_append_local_name(hdev, buf, 0);
8459 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8460 bool is_adv_data)
8462 u8 max_len = max_adv_len(hdev);
8464 if (is_adv_data) {
8465 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8466 MGMT_ADV_FLAG_LIMITED_DISCOV |
8467 MGMT_ADV_FLAG_MANAGED_FLAGS))
8468 max_len -= 3;
8470 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8471 max_len -= 3;
8472 } else {
8473 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8474 max_len -= calculate_name_len(hdev);
8476 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8477 max_len -= 4;
8480 return max_len;
8483 static bool flags_managed(u32 adv_flags)
8485 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8486 MGMT_ADV_FLAG_LIMITED_DISCOV |
8487 MGMT_ADV_FLAG_MANAGED_FLAGS);
8490 static bool tx_power_managed(u32 adv_flags)
8492 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8495 static bool name_managed(u32 adv_flags)
8497 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8500 static bool appearance_managed(u32 adv_flags)
8502 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8505 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8506 u8 len, bool is_adv_data)
8508 int i, cur_len;
8509 u8 max_len;
8511 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8513 if (len > max_len)
8514 return false;
8516 /* Make sure that the data is correctly formatted. */
8517 for (i = 0; i < len; i += (cur_len + 1)) {
8518 cur_len = data[i];
8520 if (!cur_len)
8521 continue;
8523 if (data[i + 1] == EIR_FLAGS &&
8524 (!is_adv_data || flags_managed(adv_flags)))
8525 return false;
8527 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8528 return false;
8530 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8531 return false;
8533 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8534 return false;
8536 if (data[i + 1] == EIR_APPEARANCE &&
8537 appearance_managed(adv_flags))
8538 return false;
8540 /* If the current field length would exceed the total data
8541 * length, then it's invalid.
8543 if (i + cur_len >= len)
8544 return false;
8547 return true;
8550 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8552 u32 supported_flags, phy_flags;
8554 /* The current implementation only supports a subset of the specified
8555 * flags. Also need to check mutual exclusiveness of sec flags.
8557 supported_flags = get_supported_adv_flags(hdev);
8558 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8559 if (adv_flags & ~supported_flags ||
8560 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8561 return false;
8563 return true;
8566 static bool adv_busy(struct hci_dev *hdev)
8568 return pending_find(MGMT_OP_SET_LE, hdev);
8571 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8572 int err)
8574 struct adv_info *adv, *n;
8576 bt_dev_dbg(hdev, "err %d", err);
8578 hci_dev_lock(hdev);
8580 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8581 u8 instance;
8583 if (!adv->pending)
8584 continue;
8586 if (!err) {
8587 adv->pending = false;
8588 continue;
8591 instance = adv->instance;
8593 if (hdev->cur_adv_instance == instance)
8594 cancel_adv_timeout(hdev);
8596 hci_remove_adv_instance(hdev, instance);
8597 mgmt_advertising_removed(sk, hdev, instance);
8600 hci_dev_unlock(hdev);
8603 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8605 struct mgmt_pending_cmd *cmd = data;
8606 struct mgmt_cp_add_advertising *cp = cmd->param;
8607 struct mgmt_rp_add_advertising rp;
8609 memset(&rp, 0, sizeof(rp));
8611 rp.instance = cp->instance;
8613 if (err)
8614 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8615 mgmt_status(err));
8616 else
8617 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8618 mgmt_status(err), &rp, sizeof(rp));
8620 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8622 mgmt_pending_free(cmd);
8625 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8627 struct mgmt_pending_cmd *cmd = data;
8628 struct mgmt_cp_add_advertising *cp = cmd->param;
8630 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8633 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8634 void *data, u16 data_len)
8636 struct mgmt_cp_add_advertising *cp = data;
8637 struct mgmt_rp_add_advertising rp;
8638 u32 flags;
8639 u8 status;
8640 u16 timeout, duration;
8641 unsigned int prev_instance_cnt;
8642 u8 schedule_instance = 0;
8643 struct adv_info *adv, *next_instance;
8644 int err;
8645 struct mgmt_pending_cmd *cmd;
8647 bt_dev_dbg(hdev, "sock %p", sk);
8649 status = mgmt_le_support(hdev);
8650 if (status)
8651 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8652 status);
8654 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8655 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8656 MGMT_STATUS_INVALID_PARAMS);
8658 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8659 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8660 MGMT_STATUS_INVALID_PARAMS);
8662 flags = __le32_to_cpu(cp->flags);
8663 timeout = __le16_to_cpu(cp->timeout);
8664 duration = __le16_to_cpu(cp->duration);
8666 if (!requested_adv_flags_are_valid(hdev, flags))
8667 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8668 MGMT_STATUS_INVALID_PARAMS);
8670 hci_dev_lock(hdev);
8672 if (timeout && !hdev_is_powered(hdev)) {
8673 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8674 MGMT_STATUS_REJECTED);
8675 goto unlock;
8678 if (adv_busy(hdev)) {
8679 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8680 MGMT_STATUS_BUSY);
8681 goto unlock;
8684 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8685 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8686 cp->scan_rsp_len, false)) {
8687 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8688 MGMT_STATUS_INVALID_PARAMS);
8689 goto unlock;
8692 prev_instance_cnt = hdev->adv_instance_cnt;
8694 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8695 cp->adv_data_len, cp->data,
8696 cp->scan_rsp_len,
8697 cp->data + cp->adv_data_len,
8698 timeout, duration,
8699 HCI_ADV_TX_POWER_NO_PREFERENCE,
8700 hdev->le_adv_min_interval,
8701 hdev->le_adv_max_interval, 0);
8702 if (IS_ERR(adv)) {
8703 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8704 MGMT_STATUS_FAILED);
8705 goto unlock;
8708 /* Only trigger an advertising added event if a new instance was
8709 * actually added.
8711 if (hdev->adv_instance_cnt > prev_instance_cnt)
8712 mgmt_advertising_added(sk, hdev, cp->instance);
8714 if (hdev->cur_adv_instance == cp->instance) {
8715 /* If the currently advertised instance is being changed then
8716 * cancel the current advertising and schedule the next
8717 * instance. If there is only one instance then the overridden
8718 * advertising data will be visible right away.
8720 cancel_adv_timeout(hdev);
8722 next_instance = hci_get_next_instance(hdev, cp->instance);
8723 if (next_instance)
8724 schedule_instance = next_instance->instance;
8725 } else if (!hdev->adv_instance_timeout) {
8726 /* Immediately advertise the new instance if no other
8727 * instance is currently being advertised.
8729 schedule_instance = cp->instance;
8732 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8733 * there is no instance to be advertised then we have no HCI
8734 * communication to make. Simply return.
8736 if (!hdev_is_powered(hdev) ||
8737 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8738 !schedule_instance) {
8739 rp.instance = cp->instance;
8740 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8741 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8742 goto unlock;
8745 /* We're good to go, update advertising data, parameters, and start
8746 * advertising.
8748 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8749 data_len);
8750 if (!cmd) {
8751 err = -ENOMEM;
8752 goto unlock;
8755 cp->instance = schedule_instance;
8757 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8758 add_advertising_complete);
8759 if (err < 0)
8760 mgmt_pending_free(cmd);
8762 unlock:
8763 hci_dev_unlock(hdev);
8765 return err;
8768 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8769 int err)
8771 struct mgmt_pending_cmd *cmd = data;
8772 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8773 struct mgmt_rp_add_ext_adv_params rp;
8774 struct adv_info *adv;
8775 u32 flags;
8777 BT_DBG("%s", hdev->name);
8779 hci_dev_lock(hdev);
8781 adv = hci_find_adv_instance(hdev, cp->instance);
8782 if (!adv)
8783 goto unlock;
8785 rp.instance = cp->instance;
8786 rp.tx_power = adv->tx_power;
8788 /* While we're at it, inform userspace of the available space for this
8789 * advertisement, given the flags that will be used.
8791 flags = __le32_to_cpu(cp->flags);
8792 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8793 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8795 if (err) {
8796 /* If this advertisement was previously advertising and we
8797 * failed to update it, we signal that it has been removed and
8798 * delete its structure
8800 if (!adv->pending)
8801 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8803 hci_remove_adv_instance(hdev, cp->instance);
8805 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8806 mgmt_status(err));
8807 } else {
8808 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8809 mgmt_status(err), &rp, sizeof(rp));
8812 unlock:
8813 mgmt_pending_free(cmd);
8815 hci_dev_unlock(hdev);
8818 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8820 struct mgmt_pending_cmd *cmd = data;
8821 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8823 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8826 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8827 void *data, u16 data_len)
8829 struct mgmt_cp_add_ext_adv_params *cp = data;
8830 struct mgmt_rp_add_ext_adv_params rp;
8831 struct mgmt_pending_cmd *cmd = NULL;
8832 struct adv_info *adv;
8833 u32 flags, min_interval, max_interval;
8834 u16 timeout, duration;
8835 u8 status;
8836 s8 tx_power;
8837 int err;
8839 BT_DBG("%s", hdev->name);
8841 status = mgmt_le_support(hdev);
8842 if (status)
8843 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8844 status);
8846 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8848 MGMT_STATUS_INVALID_PARAMS);
8850 /* The purpose of breaking add_advertising into two separate MGMT calls
8851 * for params and data is to allow more parameters to be added to this
8852 * structure in the future. For this reason, we verify that we have the
8853 * bare minimum structure we know of when the interface was defined. Any
8854 * extra parameters we don't know about will be ignored in this request.
8856 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8858 MGMT_STATUS_INVALID_PARAMS);
8860 flags = __le32_to_cpu(cp->flags);
8862 if (!requested_adv_flags_are_valid(hdev, flags))
8863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8864 MGMT_STATUS_INVALID_PARAMS);
8866 hci_dev_lock(hdev);
8868 /* In new interface, we require that we are powered to register */
8869 if (!hdev_is_powered(hdev)) {
8870 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8871 MGMT_STATUS_REJECTED);
8872 goto unlock;
8875 if (adv_busy(hdev)) {
8876 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8877 MGMT_STATUS_BUSY);
8878 goto unlock;
8881 /* Parse defined parameters from request, use defaults otherwise */
8882 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8883 __le16_to_cpu(cp->timeout) : 0;
8885 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8886 __le16_to_cpu(cp->duration) :
8887 hdev->def_multi_adv_rotation_duration;
8889 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8890 __le32_to_cpu(cp->min_interval) :
8891 hdev->le_adv_min_interval;
8893 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8894 __le32_to_cpu(cp->max_interval) :
8895 hdev->le_adv_max_interval;
8897 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8898 cp->tx_power :
8899 HCI_ADV_TX_POWER_NO_PREFERENCE;
8901 /* Create advertising instance with no advertising or response data */
8902 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8903 timeout, duration, tx_power, min_interval,
8904 max_interval, 0);
8906 if (IS_ERR(adv)) {
8907 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8908 MGMT_STATUS_FAILED);
8909 goto unlock;
8912 /* Submit request for advertising params if ext adv available */
8913 if (ext_adv_capable(hdev)) {
8914 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8915 data, data_len);
8916 if (!cmd) {
8917 err = -ENOMEM;
8918 hci_remove_adv_instance(hdev, cp->instance);
8919 goto unlock;
8922 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8923 add_ext_adv_params_complete);
8924 if (err < 0)
8925 mgmt_pending_free(cmd);
8926 } else {
8927 rp.instance = cp->instance;
8928 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8929 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8930 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8931 err = mgmt_cmd_complete(sk, hdev->id,
8932 MGMT_OP_ADD_EXT_ADV_PARAMS,
8933 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8936 unlock:
8937 hci_dev_unlock(hdev);
8939 return err;
8942 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8944 struct mgmt_pending_cmd *cmd = data;
8945 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8946 struct mgmt_rp_add_advertising rp;
8948 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8950 memset(&rp, 0, sizeof(rp));
8952 rp.instance = cp->instance;
8954 if (err)
8955 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8956 mgmt_status(err));
8957 else
8958 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8959 mgmt_status(err), &rp, sizeof(rp));
8961 mgmt_pending_free(cmd);
8964 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8966 struct mgmt_pending_cmd *cmd = data;
8967 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8968 int err;
8970 if (ext_adv_capable(hdev)) {
8971 err = hci_update_adv_data_sync(hdev, cp->instance);
8972 if (err)
8973 return err;
8975 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8976 if (err)
8977 return err;
8979 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8982 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8985 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8986 u16 data_len)
8988 struct mgmt_cp_add_ext_adv_data *cp = data;
8989 struct mgmt_rp_add_ext_adv_data rp;
8990 u8 schedule_instance = 0;
8991 struct adv_info *next_instance;
8992 struct adv_info *adv_instance;
8993 int err = 0;
8994 struct mgmt_pending_cmd *cmd;
8996 BT_DBG("%s", hdev->name);
8998 hci_dev_lock(hdev);
9000 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9002 if (!adv_instance) {
9003 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9004 MGMT_STATUS_INVALID_PARAMS);
9005 goto unlock;
9008 /* In new interface, we require that we are powered to register */
9009 if (!hdev_is_powered(hdev)) {
9010 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9011 MGMT_STATUS_REJECTED);
9012 goto clear_new_instance;
9015 if (adv_busy(hdev)) {
9016 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9017 MGMT_STATUS_BUSY);
9018 goto clear_new_instance;
9021 /* Validate new data */
9022 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9023 cp->adv_data_len, true) ||
9024 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9025 cp->adv_data_len, cp->scan_rsp_len, false)) {
9026 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9027 MGMT_STATUS_INVALID_PARAMS);
9028 goto clear_new_instance;
9031 /* Set the data in the advertising instance */
9032 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9033 cp->data, cp->scan_rsp_len,
9034 cp->data + cp->adv_data_len);
9036 /* If using software rotation, determine next instance to use */
9037 if (hdev->cur_adv_instance == cp->instance) {
9038 /* If the currently advertised instance is being changed
9039 * then cancel the current advertising and schedule the
9040 * next instance. If there is only one instance then the
9041 * overridden advertising data will be visible right
9042 * away
9044 cancel_adv_timeout(hdev);
9046 next_instance = hci_get_next_instance(hdev, cp->instance);
9047 if (next_instance)
9048 schedule_instance = next_instance->instance;
9049 } else if (!hdev->adv_instance_timeout) {
9050 /* Immediately advertise the new instance if no other
9051 * instance is currently being advertised.
9053 schedule_instance = cp->instance;
9056 /* If the HCI_ADVERTISING flag is set or there is no instance to
9057 * be advertised then we have no HCI communication to make.
9058 * Simply return.
9060 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9061 if (adv_instance->pending) {
9062 mgmt_advertising_added(sk, hdev, cp->instance);
9063 adv_instance->pending = false;
9065 rp.instance = cp->instance;
9066 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9067 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9068 goto unlock;
9071 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9072 data_len);
9073 if (!cmd) {
9074 err = -ENOMEM;
9075 goto clear_new_instance;
9078 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9079 add_ext_adv_data_complete);
9080 if (err < 0) {
9081 mgmt_pending_free(cmd);
9082 goto clear_new_instance;
9085 /* We were successful in updating data, so trigger advertising_added
9086 * event if this is an instance that wasn't previously advertising. If
9087 * a failure occurs in the requests we initiated, we will remove the
9088 * instance again in add_advertising_complete
9090 if (adv_instance->pending)
9091 mgmt_advertising_added(sk, hdev, cp->instance);
9093 goto unlock;
9095 clear_new_instance:
9096 hci_remove_adv_instance(hdev, cp->instance);
9098 unlock:
9099 hci_dev_unlock(hdev);
9101 return err;
9104 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9105 int err)
9107 struct mgmt_pending_cmd *cmd = data;
9108 struct mgmt_cp_remove_advertising *cp = cmd->param;
9109 struct mgmt_rp_remove_advertising rp;
9111 bt_dev_dbg(hdev, "err %d", err);
9113 memset(&rp, 0, sizeof(rp));
9114 rp.instance = cp->instance;
9116 if (err)
9117 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9118 mgmt_status(err));
9119 else
9120 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9121 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9123 mgmt_pending_free(cmd);
9126 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9128 struct mgmt_pending_cmd *cmd = data;
9129 struct mgmt_cp_remove_advertising *cp = cmd->param;
9130 int err;
9132 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9133 if (err)
9134 return err;
9136 if (list_empty(&hdev->adv_instances))
9137 err = hci_disable_advertising_sync(hdev);
9139 return err;
9142 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9143 void *data, u16 data_len)
9145 struct mgmt_cp_remove_advertising *cp = data;
9146 struct mgmt_pending_cmd *cmd;
9147 int err;
9149 bt_dev_dbg(hdev, "sock %p", sk);
9151 hci_dev_lock(hdev);
9153 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9154 err = mgmt_cmd_status(sk, hdev->id,
9155 MGMT_OP_REMOVE_ADVERTISING,
9156 MGMT_STATUS_INVALID_PARAMS);
9157 goto unlock;
9160 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9161 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9162 MGMT_STATUS_BUSY);
9163 goto unlock;
9166 if (list_empty(&hdev->adv_instances)) {
9167 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9168 MGMT_STATUS_INVALID_PARAMS);
9169 goto unlock;
9172 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9173 data_len);
9174 if (!cmd) {
9175 err = -ENOMEM;
9176 goto unlock;
9179 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9180 remove_advertising_complete);
9181 if (err < 0)
9182 mgmt_pending_free(cmd);
9184 unlock:
9185 hci_dev_unlock(hdev);
9187 return err;
9190 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9191 void *data, u16 data_len)
9193 struct mgmt_cp_get_adv_size_info *cp = data;
9194 struct mgmt_rp_get_adv_size_info rp;
9195 u32 flags, supported_flags;
9197 bt_dev_dbg(hdev, "sock %p", sk);
9199 if (!lmp_le_capable(hdev))
9200 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9201 MGMT_STATUS_REJECTED);
9203 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9204 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9205 MGMT_STATUS_INVALID_PARAMS);
9207 flags = __le32_to_cpu(cp->flags);
9209 /* The current implementation only supports a subset of the specified
9210 * flags.
9212 supported_flags = get_supported_adv_flags(hdev);
9213 if (flags & ~supported_flags)
9214 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9215 MGMT_STATUS_INVALID_PARAMS);
9217 rp.instance = cp->instance;
9218 rp.flags = cp->flags;
9219 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9220 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9222 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9223 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9226 static const struct hci_mgmt_handler mgmt_handlers[] = {
9227 { NULL }, /* 0x0000 (no command) */
9228 { read_version, MGMT_READ_VERSION_SIZE,
9229 HCI_MGMT_NO_HDEV |
9230 HCI_MGMT_UNTRUSTED },
9231 { read_commands, MGMT_READ_COMMANDS_SIZE,
9232 HCI_MGMT_NO_HDEV |
9233 HCI_MGMT_UNTRUSTED },
9234 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9235 HCI_MGMT_NO_HDEV |
9236 HCI_MGMT_UNTRUSTED },
9237 { read_controller_info, MGMT_READ_INFO_SIZE,
9238 HCI_MGMT_UNTRUSTED },
9239 { set_powered, MGMT_SETTING_SIZE },
9240 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9241 { set_connectable, MGMT_SETTING_SIZE },
9242 { set_fast_connectable, MGMT_SETTING_SIZE },
9243 { set_bondable, MGMT_SETTING_SIZE },
9244 { set_link_security, MGMT_SETTING_SIZE },
9245 { set_ssp, MGMT_SETTING_SIZE },
9246 { set_hs, MGMT_SETTING_SIZE },
9247 { set_le, MGMT_SETTING_SIZE },
9248 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9249 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9250 { add_uuid, MGMT_ADD_UUID_SIZE },
9251 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9252 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9253 HCI_MGMT_VAR_LEN },
9254 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9255 HCI_MGMT_VAR_LEN },
9256 { disconnect, MGMT_DISCONNECT_SIZE },
9257 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9258 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9259 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9260 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9261 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9262 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9263 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9264 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9265 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9266 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9267 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9268 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9269 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9270 HCI_MGMT_VAR_LEN },
9271 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9272 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9273 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9274 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9275 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9276 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9277 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9278 { set_advertising, MGMT_SETTING_SIZE },
9279 { set_bredr, MGMT_SETTING_SIZE },
9280 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9281 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9282 { set_secure_conn, MGMT_SETTING_SIZE },
9283 { set_debug_keys, MGMT_SETTING_SIZE },
9284 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9285 { load_irks, MGMT_LOAD_IRKS_SIZE,
9286 HCI_MGMT_VAR_LEN },
9287 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9288 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9289 { add_device, MGMT_ADD_DEVICE_SIZE },
9290 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9291 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9292 HCI_MGMT_VAR_LEN },
9293 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9294 HCI_MGMT_NO_HDEV |
9295 HCI_MGMT_UNTRUSTED },
9296 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9297 HCI_MGMT_UNCONFIGURED |
9298 HCI_MGMT_UNTRUSTED },
9299 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9300 HCI_MGMT_UNCONFIGURED },
9301 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9302 HCI_MGMT_UNCONFIGURED },
9303 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9304 HCI_MGMT_VAR_LEN },
9305 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9306 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9307 HCI_MGMT_NO_HDEV |
9308 HCI_MGMT_UNTRUSTED },
9309 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9310 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9311 HCI_MGMT_VAR_LEN },
9312 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9313 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9314 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9315 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9316 HCI_MGMT_UNTRUSTED },
9317 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9318 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9319 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9320 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9321 HCI_MGMT_VAR_LEN },
9322 { set_wideband_speech, MGMT_SETTING_SIZE },
9323 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9324 HCI_MGMT_UNTRUSTED },
9325 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9326 HCI_MGMT_UNTRUSTED |
9327 HCI_MGMT_HDEV_OPTIONAL },
9328 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9329 HCI_MGMT_VAR_LEN |
9330 HCI_MGMT_HDEV_OPTIONAL },
9331 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9332 HCI_MGMT_UNTRUSTED },
9333 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9334 HCI_MGMT_VAR_LEN },
9335 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9336 HCI_MGMT_UNTRUSTED },
9337 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9338 HCI_MGMT_VAR_LEN },
9339 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9340 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9341 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9342 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9343 HCI_MGMT_VAR_LEN },
9344 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9345 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9346 HCI_MGMT_VAR_LEN },
9347 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9348 HCI_MGMT_VAR_LEN },
9349 { add_adv_patterns_monitor_rssi,
9350 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9351 HCI_MGMT_VAR_LEN },
9352 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9353 HCI_MGMT_VAR_LEN },
9354 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9355 { mesh_send, MGMT_MESH_SEND_SIZE,
9356 HCI_MGMT_VAR_LEN },
9357 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9360 void mgmt_index_added(struct hci_dev *hdev)
9362 struct mgmt_ev_ext_index ev;
9364 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9365 return;
9367 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9368 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9369 HCI_MGMT_UNCONF_INDEX_EVENTS);
9370 ev.type = 0x01;
9371 } else {
9372 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9373 HCI_MGMT_INDEX_EVENTS);
9374 ev.type = 0x00;
9377 ev.bus = hdev->bus;
9379 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9380 HCI_MGMT_EXT_INDEX_EVENTS);
9383 void mgmt_index_removed(struct hci_dev *hdev)
9385 struct mgmt_ev_ext_index ev;
9386 u8 status = MGMT_STATUS_INVALID_INDEX;
9388 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9389 return;
9391 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9393 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9394 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9395 HCI_MGMT_UNCONF_INDEX_EVENTS);
9396 ev.type = 0x01;
9397 } else {
9398 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9399 HCI_MGMT_INDEX_EVENTS);
9400 ev.type = 0x00;
9403 ev.bus = hdev->bus;
9405 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9406 HCI_MGMT_EXT_INDEX_EVENTS);
9408 /* Cancel any remaining timed work */
9409 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9410 return;
9411 cancel_delayed_work_sync(&hdev->discov_off);
9412 cancel_delayed_work_sync(&hdev->service_cache);
9413 cancel_delayed_work_sync(&hdev->rpa_expired);
9416 void mgmt_power_on(struct hci_dev *hdev, int err)
9418 struct cmd_lookup match = { NULL, hdev };
9420 bt_dev_dbg(hdev, "err %d", err);
9422 hci_dev_lock(hdev);
9424 if (!err) {
9425 restart_le_actions(hdev);
9426 hci_update_passive_scan(hdev);
9429 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9431 new_settings(hdev, match.sk);
9433 if (match.sk)
9434 sock_put(match.sk);
9436 hci_dev_unlock(hdev);
9439 void __mgmt_power_off(struct hci_dev *hdev)
9441 struct cmd_lookup match = { NULL, hdev };
9442 u8 status, zero_cod[] = { 0, 0, 0 };
9444 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9446 /* If the power off is because of hdev unregistration let
9447 * use the appropriate INVALID_INDEX status. Otherwise use
9448 * NOT_POWERED. We cover both scenarios here since later in
9449 * mgmt_index_removed() any hci_conn callbacks will have already
9450 * been triggered, potentially causing misleading DISCONNECTED
9451 * status responses.
9453 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9454 status = MGMT_STATUS_INVALID_INDEX;
9455 else
9456 status = MGMT_STATUS_NOT_POWERED;
9458 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9460 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9461 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9462 zero_cod, sizeof(zero_cod),
9463 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9464 ext_info_changed(hdev, NULL);
9467 new_settings(hdev, match.sk);
9469 if (match.sk)
9470 sock_put(match.sk);
9473 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9475 struct mgmt_pending_cmd *cmd;
9476 u8 status;
9478 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9479 if (!cmd)
9480 return;
9482 if (err == -ERFKILL)
9483 status = MGMT_STATUS_RFKILLED;
9484 else
9485 status = MGMT_STATUS_FAILED;
9487 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9489 mgmt_pending_remove(cmd);
9492 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9493 bool persistent)
9495 struct mgmt_ev_new_link_key ev;
9497 memset(&ev, 0, sizeof(ev));
9499 ev.store_hint = persistent;
9500 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9501 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9502 ev.key.type = key->type;
9503 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9504 ev.key.pin_len = key->pin_len;
9506 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9509 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9511 switch (ltk->type) {
9512 case SMP_LTK:
9513 case SMP_LTK_RESPONDER:
9514 if (ltk->authenticated)
9515 return MGMT_LTK_AUTHENTICATED;
9516 return MGMT_LTK_UNAUTHENTICATED;
9517 case SMP_LTK_P256:
9518 if (ltk->authenticated)
9519 return MGMT_LTK_P256_AUTH;
9520 return MGMT_LTK_P256_UNAUTH;
9521 case SMP_LTK_P256_DEBUG:
9522 return MGMT_LTK_P256_DEBUG;
9525 return MGMT_LTK_UNAUTHENTICATED;
9528 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9530 struct mgmt_ev_new_long_term_key ev;
9532 memset(&ev, 0, sizeof(ev));
9534 /* Devices using resolvable or non-resolvable random addresses
9535 * without providing an identity resolving key don't require
9536 * to store long term keys. Their addresses will change the
9537 * next time around.
9539 * Only when a remote device provides an identity address
9540 * make sure the long term key is stored. If the remote
9541 * identity is known, the long term keys are internally
9542 * mapped to the identity address. So allow static random
9543 * and public addresses here.
9545 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9546 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9547 ev.store_hint = 0x00;
9548 else
9549 ev.store_hint = persistent;
9551 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9552 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9553 ev.key.type = mgmt_ltk_type(key);
9554 ev.key.enc_size = key->enc_size;
9555 ev.key.ediv = key->ediv;
9556 ev.key.rand = key->rand;
9558 if (key->type == SMP_LTK)
9559 ev.key.initiator = 1;
9561 /* Make sure we copy only the significant bytes based on the
9562 * encryption key size, and set the rest of the value to zeroes.
9564 memcpy(ev.key.val, key->val, key->enc_size);
9565 memset(ev.key.val + key->enc_size, 0,
9566 sizeof(ev.key.val) - key->enc_size);
9568 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9571 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9573 struct mgmt_ev_new_irk ev;
9575 memset(&ev, 0, sizeof(ev));
9577 ev.store_hint = persistent;
9579 bacpy(&ev.rpa, &irk->rpa);
9580 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9581 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9582 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9584 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9587 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9588 bool persistent)
9590 struct mgmt_ev_new_csrk ev;
9592 memset(&ev, 0, sizeof(ev));
9594 /* Devices using resolvable or non-resolvable random addresses
9595 * without providing an identity resolving key don't require
9596 * to store signature resolving keys. Their addresses will change
9597 * the next time around.
9599 * Only when a remote device provides an identity address
9600 * make sure the signature resolving key is stored. So allow
9601 * static random and public addresses here.
9603 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9604 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9605 ev.store_hint = 0x00;
9606 else
9607 ev.store_hint = persistent;
9609 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9610 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9611 ev.key.type = csrk->type;
9612 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9614 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9617 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9618 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9619 u16 max_interval, u16 latency, u16 timeout)
9621 struct mgmt_ev_new_conn_param ev;
9623 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9624 return;
9626 memset(&ev, 0, sizeof(ev));
9627 bacpy(&ev.addr.bdaddr, bdaddr);
9628 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9629 ev.store_hint = store_hint;
9630 ev.min_interval = cpu_to_le16(min_interval);
9631 ev.max_interval = cpu_to_le16(max_interval);
9632 ev.latency = cpu_to_le16(latency);
9633 ev.timeout = cpu_to_le16(timeout);
9635 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9638 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9639 u8 *name, u8 name_len)
9641 struct sk_buff *skb;
9642 struct mgmt_ev_device_connected *ev;
9643 u16 eir_len = 0;
9644 u32 flags = 0;
9646 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9647 return;
9649 /* allocate buff for LE or BR/EDR adv */
9650 if (conn->le_adv_data_len > 0)
9651 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9652 sizeof(*ev) + conn->le_adv_data_len);
9653 else
9654 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9655 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9656 eir_precalc_len(sizeof(conn->dev_class)));
9658 ev = skb_put(skb, sizeof(*ev));
9659 bacpy(&ev->addr.bdaddr, &conn->dst);
9660 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9662 if (conn->out)
9663 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9665 ev->flags = __cpu_to_le32(flags);
9667 /* We must ensure that the EIR Data fields are ordered and
9668 * unique. Keep it simple for now and avoid the problem by not
9669 * adding any BR/EDR data to the LE adv.
9671 if (conn->le_adv_data_len > 0) {
9672 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9673 eir_len = conn->le_adv_data_len;
9674 } else {
9675 if (name)
9676 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9678 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9679 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9680 conn->dev_class, sizeof(conn->dev_class));
9683 ev->eir_len = cpu_to_le16(eir_len);
9685 mgmt_event_skb(skb, NULL);
9688 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9690 struct sock **sk = data;
9692 cmd->cmd_complete(cmd, 0);
9694 *sk = cmd->sk;
9695 sock_hold(*sk);
9697 mgmt_pending_remove(cmd);
9700 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9702 struct hci_dev *hdev = data;
9703 struct mgmt_cp_unpair_device *cp = cmd->param;
9705 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9707 cmd->cmd_complete(cmd, 0);
9708 mgmt_pending_remove(cmd);
9711 bool mgmt_powering_down(struct hci_dev *hdev)
9713 struct mgmt_pending_cmd *cmd;
9714 struct mgmt_mode *cp;
9716 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9717 return true;
9719 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9720 if (!cmd)
9721 return false;
9723 cp = cmd->param;
9724 if (!cp->val)
9725 return true;
9727 return false;
9730 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9731 u8 link_type, u8 addr_type, u8 reason,
9732 bool mgmt_connected)
9734 struct mgmt_ev_device_disconnected ev;
9735 struct sock *sk = NULL;
9737 if (!mgmt_connected)
9738 return;
9740 if (link_type != ACL_LINK && link_type != LE_LINK)
9741 return;
9743 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9745 bacpy(&ev.addr.bdaddr, bdaddr);
9746 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9747 ev.reason = reason;
9749 /* Report disconnects due to suspend */
9750 if (hdev->suspended)
9751 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9753 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9755 if (sk)
9756 sock_put(sk);
9758 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9759 hdev);
9762 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9763 u8 link_type, u8 addr_type, u8 status)
9765 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9766 struct mgmt_cp_disconnect *cp;
9767 struct mgmt_pending_cmd *cmd;
9769 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9770 hdev);
9772 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9773 if (!cmd)
9774 return;
9776 cp = cmd->param;
9778 if (bacmp(bdaddr, &cp->addr.bdaddr))
9779 return;
9781 if (cp->addr.type != bdaddr_type)
9782 return;
9784 cmd->cmd_complete(cmd, mgmt_status(status));
9785 mgmt_pending_remove(cmd);
9788 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9789 u8 addr_type, u8 status)
9791 struct mgmt_ev_connect_failed ev;
9793 bacpy(&ev.addr.bdaddr, bdaddr);
9794 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9795 ev.status = mgmt_status(status);
9797 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9800 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9802 struct mgmt_ev_pin_code_request ev;
9804 bacpy(&ev.addr.bdaddr, bdaddr);
9805 ev.addr.type = BDADDR_BREDR;
9806 ev.secure = secure;
9808 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9811 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9812 u8 status)
9814 struct mgmt_pending_cmd *cmd;
9816 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9817 if (!cmd)
9818 return;
9820 cmd->cmd_complete(cmd, mgmt_status(status));
9821 mgmt_pending_remove(cmd);
9824 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9825 u8 status)
9827 struct mgmt_pending_cmd *cmd;
9829 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9830 if (!cmd)
9831 return;
9833 cmd->cmd_complete(cmd, mgmt_status(status));
9834 mgmt_pending_remove(cmd);
9837 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9838 u8 link_type, u8 addr_type, u32 value,
9839 u8 confirm_hint)
9841 struct mgmt_ev_user_confirm_request ev;
9843 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9845 bacpy(&ev.addr.bdaddr, bdaddr);
9846 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9847 ev.confirm_hint = confirm_hint;
9848 ev.value = cpu_to_le32(value);
9850 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9851 NULL);
9854 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9855 u8 link_type, u8 addr_type)
9857 struct mgmt_ev_user_passkey_request ev;
9859 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9861 bacpy(&ev.addr.bdaddr, bdaddr);
9862 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9864 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9865 NULL);
9868 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9869 u8 link_type, u8 addr_type, u8 status,
9870 u8 opcode)
9872 struct mgmt_pending_cmd *cmd;
9874 cmd = pending_find(opcode, hdev);
9875 if (!cmd)
9876 return -ENOENT;
9878 cmd->cmd_complete(cmd, mgmt_status(status));
9879 mgmt_pending_remove(cmd);
9881 return 0;
9884 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9885 u8 link_type, u8 addr_type, u8 status)
9887 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9888 status, MGMT_OP_USER_CONFIRM_REPLY);
9891 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9892 u8 link_type, u8 addr_type, u8 status)
9894 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9895 status,
9896 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9899 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9900 u8 link_type, u8 addr_type, u8 status)
9902 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9903 status, MGMT_OP_USER_PASSKEY_REPLY);
9906 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9907 u8 link_type, u8 addr_type, u8 status)
9909 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9910 status,
9911 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9914 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9915 u8 link_type, u8 addr_type, u32 passkey,
9916 u8 entered)
9918 struct mgmt_ev_passkey_notify ev;
9920 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9922 bacpy(&ev.addr.bdaddr, bdaddr);
9923 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9924 ev.passkey = __cpu_to_le32(passkey);
9925 ev.entered = entered;
9927 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9930 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9932 struct mgmt_ev_auth_failed ev;
9933 struct mgmt_pending_cmd *cmd;
9934 u8 status = mgmt_status(hci_status);
9936 bacpy(&ev.addr.bdaddr, &conn->dst);
9937 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9938 ev.status = status;
9940 cmd = find_pairing(conn);
9942 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9943 cmd ? cmd->sk : NULL);
9945 if (cmd) {
9946 cmd->cmd_complete(cmd, status);
9947 mgmt_pending_remove(cmd);
9951 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9953 struct cmd_lookup match = { NULL, hdev };
9954 bool changed;
9956 if (status) {
9957 u8 mgmt_err = mgmt_status(status);
9958 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9959 cmd_status_rsp, &mgmt_err);
9960 return;
9963 if (test_bit(HCI_AUTH, &hdev->flags))
9964 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9965 else
9966 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9968 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9969 &match);
9971 if (changed)
9972 new_settings(hdev, match.sk);
9974 if (match.sk)
9975 sock_put(match.sk);
9978 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9980 struct cmd_lookup *match = data;
9982 if (match->sk == NULL) {
9983 match->sk = cmd->sk;
9984 sock_hold(match->sk);
9988 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9989 u8 status)
9991 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9993 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9994 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9995 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9997 if (!status) {
9998 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9999 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10000 ext_info_changed(hdev, NULL);
10003 if (match.sk)
10004 sock_put(match.sk);
10007 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10009 struct mgmt_cp_set_local_name ev;
10010 struct mgmt_pending_cmd *cmd;
10012 if (status)
10013 return;
10015 memset(&ev, 0, sizeof(ev));
10016 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10017 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10019 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10020 if (!cmd) {
10021 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10023 /* If this is a HCI command related to powering on the
10024 * HCI dev don't send any mgmt signals.
10026 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10027 return;
10029 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10030 return;
10033 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10034 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10035 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10038 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10040 int i;
10042 for (i = 0; i < uuid_count; i++) {
10043 if (!memcmp(uuid, uuids[i], 16))
10044 return true;
10047 return false;
10050 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10052 u16 parsed = 0;
10054 while (parsed < eir_len) {
10055 u8 field_len = eir[0];
10056 u8 uuid[16];
10057 int i;
10059 if (field_len == 0)
10060 break;
10062 if (eir_len - parsed < field_len + 1)
10063 break;
10065 switch (eir[1]) {
10066 case EIR_UUID16_ALL:
10067 case EIR_UUID16_SOME:
10068 for (i = 0; i + 3 <= field_len; i += 2) {
10069 memcpy(uuid, bluetooth_base_uuid, 16);
10070 uuid[13] = eir[i + 3];
10071 uuid[12] = eir[i + 2];
10072 if (has_uuid(uuid, uuid_count, uuids))
10073 return true;
10075 break;
10076 case EIR_UUID32_ALL:
10077 case EIR_UUID32_SOME:
10078 for (i = 0; i + 5 <= field_len; i += 4) {
10079 memcpy(uuid, bluetooth_base_uuid, 16);
10080 uuid[15] = eir[i + 5];
10081 uuid[14] = eir[i + 4];
10082 uuid[13] = eir[i + 3];
10083 uuid[12] = eir[i + 2];
10084 if (has_uuid(uuid, uuid_count, uuids))
10085 return true;
10087 break;
10088 case EIR_UUID128_ALL:
10089 case EIR_UUID128_SOME:
10090 for (i = 0; i + 17 <= field_len; i += 16) {
10091 memcpy(uuid, eir + i + 2, 16);
10092 if (has_uuid(uuid, uuid_count, uuids))
10093 return true;
10095 break;
10098 parsed += field_len + 1;
10099 eir += field_len + 1;
10102 return false;
10105 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10106 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10108 /* If a RSSI threshold has been specified, and
10109 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10110 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10111 * is set, let it through for further processing, as we might need to
10112 * restart the scan.
10114 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10115 * the results are also dropped.
10117 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10118 (rssi == HCI_RSSI_INVALID ||
10119 (rssi < hdev->discovery.rssi &&
10120 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10121 return false;
10123 if (hdev->discovery.uuid_count != 0) {
10124 /* If a list of UUIDs is provided in filter, results with no
10125 * matching UUID should be dropped.
10127 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10128 hdev->discovery.uuids) &&
10129 !eir_has_uuids(scan_rsp, scan_rsp_len,
10130 hdev->discovery.uuid_count,
10131 hdev->discovery.uuids))
10132 return false;
10135 /* If duplicate filtering does not report RSSI changes, then restart
10136 * scanning to ensure updated result with updated RSSI values.
10138 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10139 /* Validate RSSI value against the RSSI threshold once more. */
10140 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10141 rssi < hdev->discovery.rssi)
10142 return false;
10145 return true;
10148 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10149 bdaddr_t *bdaddr, u8 addr_type)
10151 struct mgmt_ev_adv_monitor_device_lost ev;
10153 ev.monitor_handle = cpu_to_le16(handle);
10154 bacpy(&ev.addr.bdaddr, bdaddr);
10155 ev.addr.type = addr_type;
10157 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10158 NULL);
10161 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10162 struct sk_buff *skb,
10163 struct sock *skip_sk,
10164 u16 handle)
10166 struct sk_buff *advmon_skb;
10167 size_t advmon_skb_len;
10168 __le16 *monitor_handle;
10170 if (!skb)
10171 return;
10173 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10174 sizeof(struct mgmt_ev_device_found)) + skb->len;
10175 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10176 advmon_skb_len);
10177 if (!advmon_skb)
10178 return;
10180 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10181 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10182 * store monitor_handle of the matched monitor.
10184 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10185 *monitor_handle = cpu_to_le16(handle);
10186 skb_put_data(advmon_skb, skb->data, skb->len);
10188 mgmt_event_skb(advmon_skb, skip_sk);
10191 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10192 bdaddr_t *bdaddr, bool report_device,
10193 struct sk_buff *skb,
10194 struct sock *skip_sk)
10196 struct monitored_device *dev, *tmp;
10197 bool matched = false;
10198 bool notified = false;
10200 /* We have received the Advertisement Report because:
10201 * 1. the kernel has initiated active discovery
10202 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10203 * passive scanning
10204 * 3. if none of the above is true, we have one or more active
10205 * Advertisement Monitor
10207 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10208 * and report ONLY one advertisement per device for the matched Monitor
10209 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10211 * For case 3, since we are not active scanning and all advertisements
10212 * received are due to a matched Advertisement Monitor, report all
10213 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10215 if (report_device && !hdev->advmon_pend_notify) {
10216 mgmt_event_skb(skb, skip_sk);
10217 return;
10220 hdev->advmon_pend_notify = false;
10222 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10223 if (!bacmp(&dev->bdaddr, bdaddr)) {
10224 matched = true;
10226 if (!dev->notified) {
10227 mgmt_send_adv_monitor_device_found(hdev, skb,
10228 skip_sk,
10229 dev->handle);
10230 notified = true;
10231 dev->notified = true;
10235 if (!dev->notified)
10236 hdev->advmon_pend_notify = true;
10239 if (!report_device &&
10240 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10241 /* Handle 0 indicates that we are not active scanning and this
10242 * is a subsequent advertisement report for an already matched
10243 * Advertisement Monitor or the controller offloading support
10244 * is not available.
10246 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10249 if (report_device)
10250 mgmt_event_skb(skb, skip_sk);
10251 else
10252 kfree_skb(skb);
10255 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10256 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10257 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10258 u64 instant)
10260 struct sk_buff *skb;
10261 struct mgmt_ev_mesh_device_found *ev;
10262 int i, j;
10264 if (!hdev->mesh_ad_types[0])
10265 goto accepted;
10267 /* Scan for requested AD types */
10268 if (eir_len > 0) {
10269 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10270 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10271 if (!hdev->mesh_ad_types[j])
10272 break;
10274 if (hdev->mesh_ad_types[j] == eir[i + 1])
10275 goto accepted;
10280 if (scan_rsp_len > 0) {
10281 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10282 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10283 if (!hdev->mesh_ad_types[j])
10284 break;
10286 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10287 goto accepted;
10292 return;
10294 accepted:
10295 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10296 sizeof(*ev) + eir_len + scan_rsp_len);
10297 if (!skb)
10298 return;
10300 ev = skb_put(skb, sizeof(*ev));
10302 bacpy(&ev->addr.bdaddr, bdaddr);
10303 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10304 ev->rssi = rssi;
10305 ev->flags = cpu_to_le32(flags);
10306 ev->instant = cpu_to_le64(instant);
10308 if (eir_len > 0)
10309 /* Copy EIR or advertising data into event */
10310 skb_put_data(skb, eir, eir_len);
10312 if (scan_rsp_len > 0)
10313 /* Append scan response data to event */
10314 skb_put_data(skb, scan_rsp, scan_rsp_len);
10316 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10318 mgmt_event_skb(skb, NULL);
10321 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10322 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10323 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10324 u64 instant)
10326 struct sk_buff *skb;
10327 struct mgmt_ev_device_found *ev;
10328 bool report_device = hci_discovery_active(hdev);
10330 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10331 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10332 eir, eir_len, scan_rsp, scan_rsp_len,
10333 instant);
10335 /* Don't send events for a non-kernel initiated discovery. With
10336 * LE one exception is if we have pend_le_reports > 0 in which
10337 * case we're doing passive scanning and want these events.
10339 if (!hci_discovery_active(hdev)) {
10340 if (link_type == ACL_LINK)
10341 return;
10342 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10343 report_device = true;
10344 else if (!hci_is_adv_monitoring(hdev))
10345 return;
10348 if (hdev->discovery.result_filtering) {
10349 /* We are using service discovery */
10350 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10351 scan_rsp_len))
10352 return;
10355 if (hdev->discovery.limited) {
10356 /* Check for limited discoverable bit */
10357 if (dev_class) {
10358 if (!(dev_class[1] & 0x20))
10359 return;
10360 } else {
10361 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10362 if (!flags || !(flags[0] & LE_AD_LIMITED))
10363 return;
10367 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10368 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10369 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10370 if (!skb)
10371 return;
10373 ev = skb_put(skb, sizeof(*ev));
10375 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10376 * RSSI value was reported as 0 when not available. This behavior
10377 * is kept when using device discovery. This is required for full
10378 * backwards compatibility with the API.
10380 * However when using service discovery, the value 127 will be
10381 * returned when the RSSI is not available.
10383 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10384 link_type == ACL_LINK)
10385 rssi = 0;
10387 bacpy(&ev->addr.bdaddr, bdaddr);
10388 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10389 ev->rssi = rssi;
10390 ev->flags = cpu_to_le32(flags);
10392 if (eir_len > 0)
10393 /* Copy EIR or advertising data into event */
10394 skb_put_data(skb, eir, eir_len);
10396 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10397 u8 eir_cod[5];
10399 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10400 dev_class, 3);
10401 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10404 if (scan_rsp_len > 0)
10405 /* Append scan response data to event */
10406 skb_put_data(skb, scan_rsp, scan_rsp_len);
10408 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10410 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10413 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10414 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10416 struct sk_buff *skb;
10417 struct mgmt_ev_device_found *ev;
10418 u16 eir_len = 0;
10419 u32 flags = 0;
10421 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10422 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10424 ev = skb_put(skb, sizeof(*ev));
10425 bacpy(&ev->addr.bdaddr, bdaddr);
10426 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10427 ev->rssi = rssi;
10429 if (name)
10430 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10431 else
10432 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10434 ev->eir_len = cpu_to_le16(eir_len);
10435 ev->flags = cpu_to_le32(flags);
10437 mgmt_event_skb(skb, NULL);
10440 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10442 struct mgmt_ev_discovering ev;
10444 bt_dev_dbg(hdev, "discovering %u", discovering);
10446 memset(&ev, 0, sizeof(ev));
10447 ev.type = hdev->discovery.type;
10448 ev.discovering = discovering;
10450 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10453 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10455 struct mgmt_ev_controller_suspend ev;
10457 ev.suspend_state = state;
10458 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10461 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10462 u8 addr_type)
10464 struct mgmt_ev_controller_resume ev;
10466 ev.wake_reason = reason;
10467 if (bdaddr) {
10468 bacpy(&ev.addr.bdaddr, bdaddr);
10469 ev.addr.type = addr_type;
10470 } else {
10471 memset(&ev.addr, 0, sizeof(ev.addr));
10474 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10477 static struct hci_mgmt_chan chan = {
10478 .channel = HCI_CHANNEL_CONTROL,
10479 .handler_count = ARRAY_SIZE(mgmt_handlers),
10480 .handlers = mgmt_handlers,
10481 .hdev_init = mgmt_init_hdev,
10484 int mgmt_init(void)
10486 return hci_mgmt_chan_register(&chan);
10489 void mgmt_exit(void)
10491 hci_mgmt_chan_unregister(&chan);
10494 void mgmt_cleanup(struct sock *sk)
10496 struct mgmt_mesh_tx *mesh_tx;
10497 struct hci_dev *hdev;
10499 read_lock(&hci_dev_list_lock);
10501 list_for_each_entry(hdev, &hci_dev_list, list) {
10502 do {
10503 mesh_tx = mgmt_mesh_next(hdev, sk);
10505 if (mesh_tx)
10506 mesh_send_complete(hdev, mesh_tx, true);
10507 } while (mesh_tx);
10510 read_unlock(&hci_dev_list_lock);