video: ARM CLCD: Fix color model capabilities for DT platforms
[linux-2.6/btrfs-unstable.git] / net / bluetooth / mgmt.c
blobb8554d429d889f97bd735cc62aaafce1e85bae5c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "smp.h"
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_BONDABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
100 MGMT_EV_INDEX_ADDED,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
113 MGMT_EV_AUTH_FAILED,
114 MGMT_EV_DEVICE_FOUND,
115 MGMT_EV_DISCOVERING,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
120 MGMT_EV_NEW_IRK,
121 MGMT_EV_NEW_CSRK,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
132 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
133 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
135 struct pending_cmd {
136 struct list_head list;
137 u16 opcode;
138 int index;
139 void *param;
140 struct sock *sk;
141 void *user_data;
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
146 MGMT_STATUS_SUCCESS,
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
209 static u8 mgmt_status(u8 hci_status)
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
214 return MGMT_STATUS_FAILED;
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
220 struct sk_buff *skb;
221 struct mgmt_hdr *hdr;
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224 if (!skb)
225 return -ENOMEM;
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
229 if (hdev)
230 hdr->index = cpu_to_le16(hdev->id);
231 else
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
235 if (data)
236 memcpy(skb_put(skb, data_len), data, data_len);
238 /* Time stamp */
239 __net_timestamp(skb);
241 hci_send_to_control(skb, skip_sk);
242 kfree_skb(skb);
244 return 0;
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
249 struct sk_buff *skb;
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
252 int err;
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
257 if (!skb)
258 return -ENOMEM;
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
266 ev = (void *) skb_put(skb, sizeof(*ev));
267 ev->status = status;
268 ev->opcode = cpu_to_le16(cmd);
270 err = sock_queue_rcv_skb(sk, skb);
271 if (err < 0)
272 kfree_skb(skb);
274 return err;
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
280 struct sk_buff *skb;
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
283 int err;
285 BT_DBG("sock %p", sk);
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
288 if (!skb)
289 return -ENOMEM;
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
299 ev->status = status;
301 if (rp)
302 memcpy(ev->data, rp, rp_len);
304 err = sock_queue_rcv_skb(sk, skb);
305 if (err < 0)
306 kfree_skb(skb);
308 return err;
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
312 u16 data_len)
314 struct mgmt_rp_read_version rp;
316 BT_DBG("sock %p", sk);
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
322 sizeof(rp));
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
326 u16 data_len)
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
331 __le16 *opcode;
332 size_t rp_size;
333 int i, err;
335 BT_DBG("sock %p", sk);
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
339 rp = kmalloc(rp_size, GFP_KERNEL);
340 if (!rp)
341 return -ENOMEM;
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
353 rp_size);
354 kfree(rp);
356 return err;
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
360 u16 data_len)
362 struct mgmt_rp_read_index_list *rp;
363 struct hci_dev *d;
364 size_t rp_len;
365 u16 count;
366 int err;
368 BT_DBG("sock %p", sk);
370 read_lock(&hci_dev_list_lock);
372 count = 0;
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
376 count++;
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
381 if (!rp) {
382 read_unlock(&hci_dev_list_lock);
383 return -ENOMEM;
386 count = 0;
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
391 continue;
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
397 continue;
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
409 read_unlock(&hci_dev_list_lock);
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
412 rp_len);
414 kfree(rp);
416 return err;
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
422 struct mgmt_rp_read_unconf_index_list *rp;
423 struct hci_dev *d;
424 size_t rp_len;
425 u16 count;
426 int err;
428 BT_DBG("sock %p", sk);
430 read_lock(&hci_dev_list_lock);
432 count = 0;
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436 count++;
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
441 if (!rp) {
442 read_unlock(&hci_dev_list_lock);
443 return -ENOMEM;
446 count = 0;
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
451 continue;
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 continue;
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
469 read_unlock(&hci_dev_list_lock);
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
472 0, rp, rp_len);
474 kfree(rp);
476 return err;
479 static bool is_configured(struct hci_dev *hdev)
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
483 return false;
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
487 return false;
489 return true;
492 static __le32 get_missing_options(struct hci_dev *hdev)
494 u32 options = 0;
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
504 return cpu_to_le32(options);
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
509 __le32 options = get_missing_options(hdev);
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
517 __le32 options = get_missing_options(hdev);
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 sizeof(options));
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
526 struct mgmt_rp_read_config_info rp;
527 u32 options = 0;
529 BT_DBG("sock %p %s", sk, hdev->name);
531 hci_dev_lock(hdev);
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
545 hci_dev_unlock(hdev);
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 sizeof(rp));
551 static u32 get_supported_settings(struct hci_dev *hdev)
553 u32 settings = 0;
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_BONDABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
561 if (lmp_bredr_capable(hdev)) {
562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_PRIVACY;
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
584 hdev->set_bdaddr)
585 settings |= MGMT_SETTING_CONFIGURATION;
587 return settings;
590 static u32 get_current_settings(struct hci_dev *hdev)
592 u32 settings = 0;
594 if (hdev_is_powered(hdev))
595 settings |= MGMT_SETTING_POWERED;
597 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
598 settings |= MGMT_SETTING_CONNECTABLE;
600 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
601 settings |= MGMT_SETTING_FAST_CONNECTABLE;
603 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_DISCOVERABLE;
606 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
607 settings |= MGMT_SETTING_BONDABLE;
609 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
610 settings |= MGMT_SETTING_BREDR;
612 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
613 settings |= MGMT_SETTING_LE;
615 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
616 settings |= MGMT_SETTING_LINK_SECURITY;
618 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_SSP;
621 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
622 settings |= MGMT_SETTING_HS;
624 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
625 settings |= MGMT_SETTING_ADVERTISING;
627 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
628 settings |= MGMT_SETTING_SECURE_CONN;
630 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
631 settings |= MGMT_SETTING_DEBUG_KEYS;
633 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
634 settings |= MGMT_SETTING_PRIVACY;
636 return settings;
639 #define PNP_INFO_SVCLASS_ID 0x1200
641 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
643 u8 *ptr = data, *uuids_start = NULL;
644 struct bt_uuid *uuid;
646 if (len < 4)
647 return ptr;
649 list_for_each_entry(uuid, &hdev->uuids, list) {
650 u16 uuid16;
652 if (uuid->size != 16)
653 continue;
655 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
656 if (uuid16 < 0x1100)
657 continue;
659 if (uuid16 == PNP_INFO_SVCLASS_ID)
660 continue;
662 if (!uuids_start) {
663 uuids_start = ptr;
664 uuids_start[0] = 1;
665 uuids_start[1] = EIR_UUID16_ALL;
666 ptr += 2;
669 /* Stop if not enough space to put next UUID */
670 if ((ptr - data) + sizeof(u16) > len) {
671 uuids_start[1] = EIR_UUID16_SOME;
672 break;
675 *ptr++ = (uuid16 & 0x00ff);
676 *ptr++ = (uuid16 & 0xff00) >> 8;
677 uuids_start[0] += sizeof(uuid16);
680 return ptr;
683 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
685 u8 *ptr = data, *uuids_start = NULL;
686 struct bt_uuid *uuid;
688 if (len < 6)
689 return ptr;
691 list_for_each_entry(uuid, &hdev->uuids, list) {
692 if (uuid->size != 32)
693 continue;
695 if (!uuids_start) {
696 uuids_start = ptr;
697 uuids_start[0] = 1;
698 uuids_start[1] = EIR_UUID32_ALL;
699 ptr += 2;
702 /* Stop if not enough space to put next UUID */
703 if ((ptr - data) + sizeof(u32) > len) {
704 uuids_start[1] = EIR_UUID32_SOME;
705 break;
708 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
709 ptr += sizeof(u32);
710 uuids_start[0] += sizeof(u32);
713 return ptr;
716 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
718 u8 *ptr = data, *uuids_start = NULL;
719 struct bt_uuid *uuid;
721 if (len < 18)
722 return ptr;
724 list_for_each_entry(uuid, &hdev->uuids, list) {
725 if (uuid->size != 128)
726 continue;
728 if (!uuids_start) {
729 uuids_start = ptr;
730 uuids_start[0] = 1;
731 uuids_start[1] = EIR_UUID128_ALL;
732 ptr += 2;
735 /* Stop if not enough space to put next UUID */
736 if ((ptr - data) + 16 > len) {
737 uuids_start[1] = EIR_UUID128_SOME;
738 break;
741 memcpy(ptr, uuid->uuid, 16);
742 ptr += 16;
743 uuids_start[0] += 16;
746 return ptr;
749 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
751 struct pending_cmd *cmd;
753 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
754 if (cmd->opcode == opcode)
755 return cmd;
758 return NULL;
761 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762 struct hci_dev *hdev,
763 const void *data)
765 struct pending_cmd *cmd;
767 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768 if (cmd->user_data != data)
769 continue;
770 if (cmd->opcode == opcode)
771 return cmd;
774 return NULL;
777 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
779 u8 ad_len = 0;
780 size_t name_len;
782 name_len = strlen(hdev->dev_name);
783 if (name_len > 0) {
784 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
786 if (name_len > max_len) {
787 name_len = max_len;
788 ptr[1] = EIR_NAME_SHORT;
789 } else
790 ptr[1] = EIR_NAME_COMPLETE;
792 ptr[0] = name_len + 1;
794 memcpy(ptr + 2, hdev->dev_name, name_len);
796 ad_len += (name_len + 2);
797 ptr += (name_len + 2);
800 return ad_len;
803 static void update_scan_rsp_data(struct hci_request *req)
805 struct hci_dev *hdev = req->hdev;
806 struct hci_cp_le_set_scan_rsp_data cp;
807 u8 len;
809 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
810 return;
812 memset(&cp, 0, sizeof(cp));
814 len = create_scan_rsp_data(hdev, cp.data);
816 if (hdev->scan_rsp_data_len == len &&
817 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
818 return;
820 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
821 hdev->scan_rsp_data_len = len;
823 cp.length = len;
825 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
828 static u8 get_adv_discov_flags(struct hci_dev *hdev)
830 struct pending_cmd *cmd;
832 /* If there's a pending mgmt command the flags will not yet have
833 * their final values, so check for this first.
835 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
836 if (cmd) {
837 struct mgmt_mode *cp = cmd->param;
838 if (cp->val == 0x01)
839 return LE_AD_GENERAL;
840 else if (cp->val == 0x02)
841 return LE_AD_LIMITED;
842 } else {
843 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
844 return LE_AD_LIMITED;
845 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
846 return LE_AD_GENERAL;
849 return 0;
852 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
854 u8 ad_len = 0, flags = 0;
856 flags |= get_adv_discov_flags(hdev);
858 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
859 flags |= LE_AD_NO_BREDR;
861 if (flags) {
862 BT_DBG("adv flags 0x%02x", flags);
864 ptr[0] = 2;
865 ptr[1] = EIR_FLAGS;
866 ptr[2] = flags;
868 ad_len += 3;
869 ptr += 3;
872 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
873 ptr[0] = 2;
874 ptr[1] = EIR_TX_POWER;
875 ptr[2] = (u8) hdev->adv_tx_power;
877 ad_len += 3;
878 ptr += 3;
881 return ad_len;
884 static void update_adv_data(struct hci_request *req)
886 struct hci_dev *hdev = req->hdev;
887 struct hci_cp_le_set_adv_data cp;
888 u8 len;
890 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
891 return;
893 memset(&cp, 0, sizeof(cp));
895 len = create_adv_data(hdev, cp.data);
897 if (hdev->adv_data_len == len &&
898 memcmp(cp.data, hdev->adv_data, len) == 0)
899 return;
901 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
902 hdev->adv_data_len = len;
904 cp.length = len;
906 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
909 int mgmt_update_adv_data(struct hci_dev *hdev)
911 struct hci_request req;
913 hci_req_init(&req, hdev);
914 update_adv_data(&req);
916 return hci_req_run(&req, NULL);
919 static void create_eir(struct hci_dev *hdev, u8 *data)
921 u8 *ptr = data;
922 size_t name_len;
924 name_len = strlen(hdev->dev_name);
926 if (name_len > 0) {
927 /* EIR Data type */
928 if (name_len > 48) {
929 name_len = 48;
930 ptr[1] = EIR_NAME_SHORT;
931 } else
932 ptr[1] = EIR_NAME_COMPLETE;
934 /* EIR Data length */
935 ptr[0] = name_len + 1;
937 memcpy(ptr + 2, hdev->dev_name, name_len);
939 ptr += (name_len + 2);
942 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
943 ptr[0] = 2;
944 ptr[1] = EIR_TX_POWER;
945 ptr[2] = (u8) hdev->inq_tx_power;
947 ptr += 3;
950 if (hdev->devid_source > 0) {
951 ptr[0] = 9;
952 ptr[1] = EIR_DEVICE_ID;
954 put_unaligned_le16(hdev->devid_source, ptr + 2);
955 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
956 put_unaligned_le16(hdev->devid_product, ptr + 6);
957 put_unaligned_le16(hdev->devid_version, ptr + 8);
959 ptr += 10;
962 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
963 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
967 static void update_eir(struct hci_request *req)
969 struct hci_dev *hdev = req->hdev;
970 struct hci_cp_write_eir cp;
972 if (!hdev_is_powered(hdev))
973 return;
975 if (!lmp_ext_inq_capable(hdev))
976 return;
978 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
979 return;
981 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
982 return;
984 memset(&cp, 0, sizeof(cp));
986 create_eir(hdev, cp.data);
988 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
989 return;
991 memcpy(hdev->eir, cp.data, sizeof(cp.data));
993 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
996 static u8 get_service_classes(struct hci_dev *hdev)
998 struct bt_uuid *uuid;
999 u8 val = 0;
1001 list_for_each_entry(uuid, &hdev->uuids, list)
1002 val |= uuid->svc_hint;
1004 return val;
1007 static void update_class(struct hci_request *req)
1009 struct hci_dev *hdev = req->hdev;
1010 u8 cod[3];
1012 BT_DBG("%s", hdev->name);
1014 if (!hdev_is_powered(hdev))
1015 return;
1017 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1018 return;
1020 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1021 return;
1023 cod[0] = hdev->minor_class;
1024 cod[1] = hdev->major_class;
1025 cod[2] = get_service_classes(hdev);
1027 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1028 cod[1] |= 0x20;
1030 if (memcmp(cod, hdev->dev_class, 3) == 0)
1031 return;
1033 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1036 static bool get_connectable(struct hci_dev *hdev)
1038 struct pending_cmd *cmd;
1040 /* If there's a pending mgmt command the flag will not yet have
1041 * it's final value, so check for this first.
1043 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1044 if (cmd) {
1045 struct mgmt_mode *cp = cmd->param;
1046 return cp->val;
1049 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1052 static void disable_advertising(struct hci_request *req)
1054 u8 enable = 0x00;
1056 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1059 static void enable_advertising(struct hci_request *req)
1061 struct hci_dev *hdev = req->hdev;
1062 struct hci_cp_le_set_adv_param cp;
1063 u8 own_addr_type, enable = 0x01;
1064 bool connectable;
1066 if (hci_conn_num(hdev, LE_LINK) > 0)
1067 return;
1069 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1070 disable_advertising(req);
1072 /* Clear the HCI_LE_ADV bit temporarily so that the
1073 * hci_update_random_address knows that it's safe to go ahead
1074 * and write a new random address. The flag will be set back on
1075 * as soon as the SET_ADV_ENABLE HCI command completes.
1077 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1079 connectable = get_connectable(hdev);
1081 /* Set require_privacy to true only when non-connectable
1082 * advertising is used. In that case it is fine to use a
1083 * non-resolvable private address.
1085 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1086 return;
1088 memset(&cp, 0, sizeof(cp));
1089 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1090 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1091 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1092 cp.own_address_type = own_addr_type;
1093 cp.channel_map = hdev->le_adv_channel_map;
1095 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1097 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1100 static void service_cache_off(struct work_struct *work)
1102 struct hci_dev *hdev = container_of(work, struct hci_dev,
1103 service_cache.work);
1104 struct hci_request req;
1106 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1107 return;
1109 hci_req_init(&req, hdev);
1111 hci_dev_lock(hdev);
1113 update_eir(&req);
1114 update_class(&req);
1116 hci_dev_unlock(hdev);
1118 hci_req_run(&req, NULL);
1121 static void rpa_expired(struct work_struct *work)
1123 struct hci_dev *hdev = container_of(work, struct hci_dev,
1124 rpa_expired.work);
1125 struct hci_request req;
1127 BT_DBG("");
1129 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1131 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1132 return;
1134 /* The generation of a new RPA and programming it into the
1135 * controller happens in the enable_advertising() function.
1137 hci_req_init(&req, hdev);
1138 enable_advertising(&req);
1139 hci_req_run(&req, NULL);
1142 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1144 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1145 return;
1147 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1148 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1150 /* Non-mgmt controlled devices get this bit set
1151 * implicitly so that pairing works for them, however
1152 * for mgmt we require user-space to explicitly enable
1153 * it
1155 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1158 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1159 void *data, u16 data_len)
1161 struct mgmt_rp_read_info rp;
1163 BT_DBG("sock %p %s", sk, hdev->name);
1165 hci_dev_lock(hdev);
1167 memset(&rp, 0, sizeof(rp));
1169 bacpy(&rp.bdaddr, &hdev->bdaddr);
1171 rp.version = hdev->hci_ver;
1172 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1174 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1175 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1177 memcpy(rp.dev_class, hdev->dev_class, 3);
1179 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1180 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1182 hci_dev_unlock(hdev);
1184 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1185 sizeof(rp));
1188 static void mgmt_pending_free(struct pending_cmd *cmd)
1190 sock_put(cmd->sk);
1191 kfree(cmd->param);
1192 kfree(cmd);
1195 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1196 struct hci_dev *hdev, void *data,
1197 u16 len)
1199 struct pending_cmd *cmd;
1201 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1202 if (!cmd)
1203 return NULL;
1205 cmd->opcode = opcode;
1206 cmd->index = hdev->id;
1208 cmd->param = kmalloc(len, GFP_KERNEL);
1209 if (!cmd->param) {
1210 kfree(cmd);
1211 return NULL;
1214 if (data)
1215 memcpy(cmd->param, data, len);
1217 cmd->sk = sk;
1218 sock_hold(sk);
1220 list_add(&cmd->list, &hdev->mgmt_pending);
1222 return cmd;
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 void (*cb)(struct pending_cmd *cmd,
1227 void *data),
1228 void *data)
1230 struct pending_cmd *cmd, *tmp;
1232 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 if (opcode > 0 && cmd->opcode != opcode)
1234 continue;
1236 cb(cmd, data);
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1242 list_del(&cmd->list);
1243 mgmt_pending_free(cmd);
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1251 sizeof(settings));
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1256 BT_DBG("%s status 0x%02x", hdev->name, status);
1258 if (hci_conn_count(hdev) == 0) {
1259 cancel_delayed_work(&hdev->power_off);
1260 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1264 static bool hci_stop_discovery(struct hci_request *req)
1266 struct hci_dev *hdev = req->hdev;
1267 struct hci_cp_remote_name_req_cancel cp;
1268 struct inquiry_entry *e;
1270 switch (hdev->discovery.state) {
1271 case DISCOVERY_FINDING:
1272 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1274 } else {
1275 cancel_delayed_work(&hdev->le_scan_disable);
1276 hci_req_add_le_scan_disable(req);
1279 return true;
1281 case DISCOVERY_RESOLVING:
1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1283 NAME_PENDING);
1284 if (!e)
1285 break;
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1289 &cp);
1291 return true;
1293 default:
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 hci_req_add_le_scan_disable(req);
1297 return true;
1300 break;
1303 return false;
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1308 struct hci_request req;
1309 struct hci_conn *conn;
1310 bool discov_stopped;
1311 int err;
1313 hci_req_init(&req, hdev);
1315 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 test_bit(HCI_PSCAN, &hdev->flags)) {
1317 u8 scan = 0x00;
1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 disable_advertising(&req);
1324 discov_stopped = hci_stop_discovery(&req);
1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 struct hci_cp_disconnect dc;
1328 struct hci_cp_reject_conn_req rej;
1330 switch (conn->state) {
1331 case BT_CONNECTED:
1332 case BT_CONFIG:
1333 dc.handle = cpu_to_le16(conn->handle);
1334 dc.reason = 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1336 break;
1337 case BT_CONNECT:
1338 if (conn->type == LE_LINK)
1339 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1340 0, NULL);
1341 else if (conn->type == ACL_LINK)
1342 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1343 6, &conn->dst);
1344 break;
1345 case BT_CONNECT2:
1346 bacpy(&rej.bdaddr, &conn->dst);
1347 rej.reason = 0x15; /* Terminated due to Power Off */
1348 if (conn->type == ACL_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1350 sizeof(rej), &rej);
1351 else if (conn->type == SCO_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1353 sizeof(rej), &rej);
1354 break;
1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1362 return err;
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1366 u16 len)
1368 struct mgmt_mode *cp = data;
1369 struct pending_cmd *cmd;
1370 int err;
1372 BT_DBG("request for %s", hdev->name);
1374 if (cp->val != 0x00 && cp->val != 0x01)
1375 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 MGMT_STATUS_INVALID_PARAMS);
1378 hci_dev_lock(hdev);
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1382 MGMT_STATUS_BUSY);
1383 goto failed;
1386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 cancel_delayed_work(&hdev->power_off);
1389 if (cp->val) {
1390 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1391 data, len);
1392 err = mgmt_powered(hdev, 1);
1393 goto failed;
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 goto failed;
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403 if (!cmd) {
1404 err = -ENOMEM;
1405 goto failed;
1408 if (cp->val) {
1409 queue_work(hdev->req_workqueue, &hdev->power_on);
1410 err = 0;
1411 } else {
1412 /* Disconnect connections, stop scans, etc */
1413 err = clean_up_hci_state(hdev);
1414 if (!err)
1415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 HCI_POWER_OFF_TIMEOUT);
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err == -ENODATA) {
1420 cancel_delayed_work(&hdev->power_off);
1421 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1422 err = 0;
1426 failed:
1427 hci_dev_unlock(hdev);
1428 return err;
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1433 __le32 ev;
1435 ev = cpu_to_le32(get_current_settings(hdev));
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1440 int mgmt_new_settings(struct hci_dev *hdev)
1442 return new_settings(hdev, NULL);
1445 struct cmd_lookup {
1446 struct sock *sk;
1447 struct hci_dev *hdev;
1448 u8 mgmt_status;
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1453 struct cmd_lookup *match = data;
1455 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1457 list_del(&cmd->list);
1459 if (match->sk == NULL) {
1460 match->sk = cmd->sk;
1461 sock_hold(match->sk);
1464 mgmt_pending_free(cmd);
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1469 u8 *status = data;
1471 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 mgmt_pending_remove(cmd);
1475 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1477 if (!lmp_bredr_capable(hdev))
1478 return MGMT_STATUS_NOT_SUPPORTED;
1479 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1480 return MGMT_STATUS_REJECTED;
1481 else
1482 return MGMT_STATUS_SUCCESS;
1485 static u8 mgmt_le_support(struct hci_dev *hdev)
1487 if (!lmp_le_capable(hdev))
1488 return MGMT_STATUS_NOT_SUPPORTED;
1489 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1490 return MGMT_STATUS_REJECTED;
1491 else
1492 return MGMT_STATUS_SUCCESS;
1495 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1497 struct pending_cmd *cmd;
1498 struct mgmt_mode *cp;
1499 struct hci_request req;
1500 bool changed;
1502 BT_DBG("status 0x%02x", status);
1504 hci_dev_lock(hdev);
1506 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1507 if (!cmd)
1508 goto unlock;
1510 if (status) {
1511 u8 mgmt_err = mgmt_status(status);
1512 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1513 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1514 goto remove_cmd;
1517 cp = cmd->param;
1518 if (cp->val) {
1519 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1520 &hdev->dev_flags);
1522 if (hdev->discov_timeout > 0) {
1523 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1524 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1525 to);
1527 } else {
1528 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1529 &hdev->dev_flags);
1532 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1534 if (changed)
1535 new_settings(hdev, cmd->sk);
1537 /* When the discoverable mode gets changed, make sure
1538 * that class of device has the limited discoverable
1539 * bit correctly set.
1541 hci_req_init(&req, hdev);
1542 update_class(&req);
1543 hci_req_run(&req, NULL);
1545 remove_cmd:
1546 mgmt_pending_remove(cmd);
1548 unlock:
1549 hci_dev_unlock(hdev);
1552 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1553 u16 len)
1555 struct mgmt_cp_set_discoverable *cp = data;
1556 struct pending_cmd *cmd;
1557 struct hci_request req;
1558 u16 timeout;
1559 u8 scan;
1560 int err;
1562 BT_DBG("request for %s", hdev->name);
1564 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1565 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1566 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 MGMT_STATUS_REJECTED);
1569 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1570 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571 MGMT_STATUS_INVALID_PARAMS);
1573 timeout = __le16_to_cpu(cp->timeout);
1575 /* Disabling discoverable requires that no timeout is set,
1576 * and enabling limited discoverable requires a timeout.
1578 if ((cp->val == 0x00 && timeout > 0) ||
1579 (cp->val == 0x02 && timeout == 0))
1580 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1583 hci_dev_lock(hdev);
1585 if (!hdev_is_powered(hdev) && timeout > 0) {
1586 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_NOT_POWERED);
1588 goto failed;
1591 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1592 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1593 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1594 MGMT_STATUS_BUSY);
1595 goto failed;
1598 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1599 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_REJECTED);
1601 goto failed;
1604 if (!hdev_is_powered(hdev)) {
1605 bool changed = false;
1607 /* Setting limited discoverable when powered off is
1608 * not a valid operation since it requires a timeout
1609 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1611 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1612 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1613 changed = true;
1616 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1617 if (err < 0)
1618 goto failed;
1620 if (changed)
1621 err = new_settings(hdev, sk);
1623 goto failed;
1626 /* If the current mode is the same, then just update the timeout
1627 * value with the new value. And if only the timeout gets updated,
1628 * then no need for any HCI transactions.
1630 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1631 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1632 &hdev->dev_flags)) {
1633 cancel_delayed_work(&hdev->discov_off);
1634 hdev->discov_timeout = timeout;
1636 if (cp->val && hdev->discov_timeout > 0) {
1637 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1638 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1639 to);
1642 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1643 goto failed;
1646 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1647 if (!cmd) {
1648 err = -ENOMEM;
1649 goto failed;
1652 /* Cancel any potential discoverable timeout that might be
1653 * still active and store new timeout value. The arming of
1654 * the timeout happens in the complete handler.
1656 cancel_delayed_work(&hdev->discov_off);
1657 hdev->discov_timeout = timeout;
1659 /* Limited discoverable mode */
1660 if (cp->val == 0x02)
1661 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1662 else
1663 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1665 hci_req_init(&req, hdev);
1667 /* The procedure for LE-only controllers is much simpler - just
1668 * update the advertising data.
1670 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1671 goto update_ad;
1673 scan = SCAN_PAGE;
1675 if (cp->val) {
1676 struct hci_cp_write_current_iac_lap hci_cp;
1678 if (cp->val == 0x02) {
1679 /* Limited discoverable mode */
1680 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1681 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1682 hci_cp.iac_lap[1] = 0x8b;
1683 hci_cp.iac_lap[2] = 0x9e;
1684 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1685 hci_cp.iac_lap[4] = 0x8b;
1686 hci_cp.iac_lap[5] = 0x9e;
1687 } else {
1688 /* General discoverable mode */
1689 hci_cp.num_iac = 1;
1690 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1691 hci_cp.iac_lap[1] = 0x8b;
1692 hci_cp.iac_lap[2] = 0x9e;
1695 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1696 (hci_cp.num_iac * 3) + 1, &hci_cp);
1698 scan |= SCAN_INQUIRY;
1699 } else {
1700 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1703 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1705 update_ad:
1706 update_adv_data(&req);
1708 err = hci_req_run(&req, set_discoverable_complete);
1709 if (err < 0)
1710 mgmt_pending_remove(cmd);
1712 failed:
1713 hci_dev_unlock(hdev);
1714 return err;
1717 static void write_fast_connectable(struct hci_request *req, bool enable)
1719 struct hci_dev *hdev = req->hdev;
1720 struct hci_cp_write_page_scan_activity acp;
1721 u8 type;
1723 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1724 return;
1726 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1727 return;
1729 if (enable) {
1730 type = PAGE_SCAN_TYPE_INTERLACED;
1732 /* 160 msec page scan interval */
1733 acp.interval = cpu_to_le16(0x0100);
1734 } else {
1735 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1737 /* default 1.28 sec page scan */
1738 acp.interval = cpu_to_le16(0x0800);
1741 acp.window = cpu_to_le16(0x0012);
1743 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1744 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1745 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1746 sizeof(acp), &acp);
1748 if (hdev->page_scan_type != type)
1749 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1752 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1754 struct pending_cmd *cmd;
1755 struct mgmt_mode *cp;
1756 bool conn_changed, discov_changed;
1758 BT_DBG("status 0x%02x", status);
1760 hci_dev_lock(hdev);
1762 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1763 if (!cmd)
1764 goto unlock;
1766 if (status) {
1767 u8 mgmt_err = mgmt_status(status);
1768 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1769 goto remove_cmd;
1772 cp = cmd->param;
1773 if (cp->val) {
1774 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1775 &hdev->dev_flags);
1776 discov_changed = false;
1777 } else {
1778 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1779 &hdev->dev_flags);
1780 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1781 &hdev->dev_flags);
1784 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1786 if (conn_changed || discov_changed) {
1787 new_settings(hdev, cmd->sk);
1788 if (discov_changed)
1789 mgmt_update_adv_data(hdev);
1790 hci_update_background_scan(hdev);
1793 remove_cmd:
1794 mgmt_pending_remove(cmd);
1796 unlock:
1797 hci_dev_unlock(hdev);
1800 static int set_connectable_update_settings(struct hci_dev *hdev,
1801 struct sock *sk, u8 val)
1803 bool changed = false;
1804 int err;
1806 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1807 changed = true;
1809 if (val) {
1810 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1811 } else {
1812 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1813 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1816 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1817 if (err < 0)
1818 return err;
1820 if (changed) {
1821 hci_update_background_scan(hdev);
1822 return new_settings(hdev, sk);
1825 return 0;
1828 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1829 u16 len)
1831 struct mgmt_mode *cp = data;
1832 struct pending_cmd *cmd;
1833 struct hci_request req;
1834 u8 scan;
1835 int err;
1837 BT_DBG("request for %s", hdev->name);
1839 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1840 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1841 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1842 MGMT_STATUS_REJECTED);
1844 if (cp->val != 0x00 && cp->val != 0x01)
1845 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1846 MGMT_STATUS_INVALID_PARAMS);
1848 hci_dev_lock(hdev);
1850 if (!hdev_is_powered(hdev)) {
1851 err = set_connectable_update_settings(hdev, sk, cp->val);
1852 goto failed;
1855 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1856 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1857 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1858 MGMT_STATUS_BUSY);
1859 goto failed;
1862 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1863 if (!cmd) {
1864 err = -ENOMEM;
1865 goto failed;
1868 hci_req_init(&req, hdev);
1870 /* If BR/EDR is not enabled and we disable advertising as a
1871 * by-product of disabling connectable, we need to update the
1872 * advertising flags.
1874 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1875 if (!cp->val) {
1876 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1877 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1879 update_adv_data(&req);
1880 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1881 if (cp->val) {
1882 scan = SCAN_PAGE;
1883 } else {
1884 /* If we don't have any whitelist entries just
1885 * disable all scanning. If there are entries
1886 * and we had both page and inquiry scanning
1887 * enabled then fall back to only page scanning.
1888 * Otherwise no changes are needed.
1890 if (list_empty(&hdev->whitelist))
1891 scan = SCAN_DISABLED;
1892 else if (test_bit(HCI_ISCAN, &hdev->flags))
1893 scan = SCAN_PAGE;
1894 else
1895 goto no_scan_update;
1897 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1898 hdev->discov_timeout > 0)
1899 cancel_delayed_work(&hdev->discov_off);
1902 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1905 no_scan_update:
1906 /* If we're going from non-connectable to connectable or
1907 * vice-versa when fast connectable is enabled ensure that fast
1908 * connectable gets disabled. write_fast_connectable won't do
1909 * anything if the page scan parameters are already what they
1910 * should be.
1912 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1913 write_fast_connectable(&req, false);
1915 /* Update the advertising parameters if necessary */
1916 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1917 enable_advertising(&req);
1919 err = hci_req_run(&req, set_connectable_complete);
1920 if (err < 0) {
1921 mgmt_pending_remove(cmd);
1922 if (err == -ENODATA)
1923 err = set_connectable_update_settings(hdev, sk,
1924 cp->val);
1925 goto failed;
1928 failed:
1929 hci_dev_unlock(hdev);
1930 return err;
1933 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1934 u16 len)
1936 struct mgmt_mode *cp = data;
1937 bool changed;
1938 int err;
1940 BT_DBG("request for %s", hdev->name);
1942 if (cp->val != 0x00 && cp->val != 0x01)
1943 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1944 MGMT_STATUS_INVALID_PARAMS);
1946 hci_dev_lock(hdev);
1948 if (cp->val)
1949 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1950 else
1951 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1953 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1954 if (err < 0)
1955 goto unlock;
1957 if (changed)
1958 err = new_settings(hdev, sk);
1960 unlock:
1961 hci_dev_unlock(hdev);
1962 return err;
1965 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1966 u16 len)
1968 struct mgmt_mode *cp = data;
1969 struct pending_cmd *cmd;
1970 u8 val, status;
1971 int err;
1973 BT_DBG("request for %s", hdev->name);
1975 status = mgmt_bredr_support(hdev);
1976 if (status)
1977 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1978 status);
1980 if (cp->val != 0x00 && cp->val != 0x01)
1981 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1982 MGMT_STATUS_INVALID_PARAMS);
1984 hci_dev_lock(hdev);
1986 if (!hdev_is_powered(hdev)) {
1987 bool changed = false;
1989 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1990 &hdev->dev_flags)) {
1991 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1992 changed = true;
1995 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1996 if (err < 0)
1997 goto failed;
1999 if (changed)
2000 err = new_settings(hdev, sk);
2002 goto failed;
2005 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2006 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2007 MGMT_STATUS_BUSY);
2008 goto failed;
2011 val = !!cp->val;
2013 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2014 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2015 goto failed;
2018 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2019 if (!cmd) {
2020 err = -ENOMEM;
2021 goto failed;
2024 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2025 if (err < 0) {
2026 mgmt_pending_remove(cmd);
2027 goto failed;
2030 failed:
2031 hci_dev_unlock(hdev);
2032 return err;
2035 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2037 struct mgmt_mode *cp = data;
2038 struct pending_cmd *cmd;
2039 u8 status;
2040 int err;
2042 BT_DBG("request for %s", hdev->name);
2044 status = mgmt_bredr_support(hdev);
2045 if (status)
2046 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2048 if (!lmp_ssp_capable(hdev))
2049 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2050 MGMT_STATUS_NOT_SUPPORTED);
2052 if (cp->val != 0x00 && cp->val != 0x01)
2053 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2054 MGMT_STATUS_INVALID_PARAMS);
2056 hci_dev_lock(hdev);
2058 if (!hdev_is_powered(hdev)) {
2059 bool changed;
2061 if (cp->val) {
2062 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2063 &hdev->dev_flags);
2064 } else {
2065 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2066 &hdev->dev_flags);
2067 if (!changed)
2068 changed = test_and_clear_bit(HCI_HS_ENABLED,
2069 &hdev->dev_flags);
2070 else
2071 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2074 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2075 if (err < 0)
2076 goto failed;
2078 if (changed)
2079 err = new_settings(hdev, sk);
2081 goto failed;
2084 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2085 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2086 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2087 MGMT_STATUS_BUSY);
2088 goto failed;
2091 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2092 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2093 goto failed;
2096 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2097 if (!cmd) {
2098 err = -ENOMEM;
2099 goto failed;
2102 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2103 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2104 sizeof(cp->val), &cp->val);
2106 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2107 if (err < 0) {
2108 mgmt_pending_remove(cmd);
2109 goto failed;
2112 failed:
2113 hci_dev_unlock(hdev);
2114 return err;
2117 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2119 struct mgmt_mode *cp = data;
2120 bool changed;
2121 u8 status;
2122 int err;
2124 BT_DBG("request for %s", hdev->name);
2126 status = mgmt_bredr_support(hdev);
2127 if (status)
2128 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2130 if (!lmp_ssp_capable(hdev))
2131 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2132 MGMT_STATUS_NOT_SUPPORTED);
2134 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2135 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2136 MGMT_STATUS_REJECTED);
2138 if (cp->val != 0x00 && cp->val != 0x01)
2139 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2140 MGMT_STATUS_INVALID_PARAMS);
2142 hci_dev_lock(hdev);
2144 if (cp->val) {
2145 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2146 } else {
2147 if (hdev_is_powered(hdev)) {
2148 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2149 MGMT_STATUS_REJECTED);
2150 goto unlock;
2153 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2156 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2157 if (err < 0)
2158 goto unlock;
2160 if (changed)
2161 err = new_settings(hdev, sk);
2163 unlock:
2164 hci_dev_unlock(hdev);
2165 return err;
2168 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2170 struct cmd_lookup match = { NULL, hdev };
2172 if (status) {
2173 u8 mgmt_err = mgmt_status(status);
2175 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2176 &mgmt_err);
2177 return;
2180 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2182 new_settings(hdev, match.sk);
2184 if (match.sk)
2185 sock_put(match.sk);
2187 /* Make sure the controller has a good default for
2188 * advertising data. Restrict the update to when LE
2189 * has actually been enabled. During power on, the
2190 * update in powered_update_hci will take care of it.
2192 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2193 struct hci_request req;
2195 hci_dev_lock(hdev);
2197 hci_req_init(&req, hdev);
2198 update_adv_data(&req);
2199 update_scan_rsp_data(&req);
2200 hci_req_run(&req, NULL);
2202 hci_update_background_scan(hdev);
2204 hci_dev_unlock(hdev);
2208 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2210 struct mgmt_mode *cp = data;
2211 struct hci_cp_write_le_host_supported hci_cp;
2212 struct pending_cmd *cmd;
2213 struct hci_request req;
2214 int err;
2215 u8 val, enabled;
2217 BT_DBG("request for %s", hdev->name);
2219 if (!lmp_le_capable(hdev))
2220 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2221 MGMT_STATUS_NOT_SUPPORTED);
2223 if (cp->val != 0x00 && cp->val != 0x01)
2224 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2225 MGMT_STATUS_INVALID_PARAMS);
2227 /* LE-only devices do not allow toggling LE on/off */
2228 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2229 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2230 MGMT_STATUS_REJECTED);
2232 hci_dev_lock(hdev);
2234 val = !!cp->val;
2235 enabled = lmp_host_le_capable(hdev);
2237 if (!hdev_is_powered(hdev) || val == enabled) {
2238 bool changed = false;
2240 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2241 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2242 changed = true;
2245 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2246 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2247 changed = true;
2250 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2251 if (err < 0)
2252 goto unlock;
2254 if (changed)
2255 err = new_settings(hdev, sk);
2257 goto unlock;
2260 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2261 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2262 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2263 MGMT_STATUS_BUSY);
2264 goto unlock;
2267 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2268 if (!cmd) {
2269 err = -ENOMEM;
2270 goto unlock;
2273 hci_req_init(&req, hdev);
2275 memset(&hci_cp, 0, sizeof(hci_cp));
2277 if (val) {
2278 hci_cp.le = val;
2279 hci_cp.simul = 0x00;
2280 } else {
2281 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2282 disable_advertising(&req);
2285 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2286 &hci_cp);
2288 err = hci_req_run(&req, le_enable_complete);
2289 if (err < 0)
2290 mgmt_pending_remove(cmd);
2292 unlock:
2293 hci_dev_unlock(hdev);
2294 return err;
2297 /* This is a helper function to test for pending mgmt commands that can
2298 * cause CoD or EIR HCI commands. We can only allow one such pending
2299 * mgmt command at a time since otherwise we cannot easily track what
2300 * the current values are, will be, and based on that calculate if a new
2301 * HCI command needs to be sent and if yes with what value.
2303 static bool pending_eir_or_class(struct hci_dev *hdev)
2305 struct pending_cmd *cmd;
2307 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2308 switch (cmd->opcode) {
2309 case MGMT_OP_ADD_UUID:
2310 case MGMT_OP_REMOVE_UUID:
2311 case MGMT_OP_SET_DEV_CLASS:
2312 case MGMT_OP_SET_POWERED:
2313 return true;
2317 return false;
2320 static const u8 bluetooth_base_uuid[] = {
2321 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2322 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2325 static u8 get_uuid_size(const u8 *uuid)
2327 u32 val;
2329 if (memcmp(uuid, bluetooth_base_uuid, 12))
2330 return 128;
2332 val = get_unaligned_le32(&uuid[12]);
2333 if (val > 0xffff)
2334 return 32;
2336 return 16;
2339 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2341 struct pending_cmd *cmd;
2343 hci_dev_lock(hdev);
2345 cmd = mgmt_pending_find(mgmt_op, hdev);
2346 if (!cmd)
2347 goto unlock;
2349 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2350 hdev->dev_class, 3);
2352 mgmt_pending_remove(cmd);
2354 unlock:
2355 hci_dev_unlock(hdev);
2358 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2360 BT_DBG("status 0x%02x", status);
2362 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2365 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2367 struct mgmt_cp_add_uuid *cp = data;
2368 struct pending_cmd *cmd;
2369 struct hci_request req;
2370 struct bt_uuid *uuid;
2371 int err;
2373 BT_DBG("request for %s", hdev->name);
2375 hci_dev_lock(hdev);
2377 if (pending_eir_or_class(hdev)) {
2378 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2379 MGMT_STATUS_BUSY);
2380 goto failed;
2383 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2384 if (!uuid) {
2385 err = -ENOMEM;
2386 goto failed;
2389 memcpy(uuid->uuid, cp->uuid, 16);
2390 uuid->svc_hint = cp->svc_hint;
2391 uuid->size = get_uuid_size(cp->uuid);
2393 list_add_tail(&uuid->list, &hdev->uuids);
2395 hci_req_init(&req, hdev);
2397 update_class(&req);
2398 update_eir(&req);
2400 err = hci_req_run(&req, add_uuid_complete);
2401 if (err < 0) {
2402 if (err != -ENODATA)
2403 goto failed;
2405 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2406 hdev->dev_class, 3);
2407 goto failed;
2410 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2411 if (!cmd) {
2412 err = -ENOMEM;
2413 goto failed;
2416 err = 0;
2418 failed:
2419 hci_dev_unlock(hdev);
2420 return err;
2423 static bool enable_service_cache(struct hci_dev *hdev)
2425 if (!hdev_is_powered(hdev))
2426 return false;
2428 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2429 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2430 CACHE_TIMEOUT);
2431 return true;
2434 return false;
2437 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2439 BT_DBG("status 0x%02x", status);
2441 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2444 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2445 u16 len)
2447 struct mgmt_cp_remove_uuid *cp = data;
2448 struct pending_cmd *cmd;
2449 struct bt_uuid *match, *tmp;
2450 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2451 struct hci_request req;
2452 int err, found;
2454 BT_DBG("request for %s", hdev->name);
2456 hci_dev_lock(hdev);
2458 if (pending_eir_or_class(hdev)) {
2459 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2460 MGMT_STATUS_BUSY);
2461 goto unlock;
2464 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2465 hci_uuids_clear(hdev);
2467 if (enable_service_cache(hdev)) {
2468 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2469 0, hdev->dev_class, 3);
2470 goto unlock;
2473 goto update_class;
2476 found = 0;
2478 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2479 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2480 continue;
2482 list_del(&match->list);
2483 kfree(match);
2484 found++;
2487 if (found == 0) {
2488 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2489 MGMT_STATUS_INVALID_PARAMS);
2490 goto unlock;
2493 update_class:
2494 hci_req_init(&req, hdev);
2496 update_class(&req);
2497 update_eir(&req);
2499 err = hci_req_run(&req, remove_uuid_complete);
2500 if (err < 0) {
2501 if (err != -ENODATA)
2502 goto unlock;
2504 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2505 hdev->dev_class, 3);
2506 goto unlock;
2509 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2510 if (!cmd) {
2511 err = -ENOMEM;
2512 goto unlock;
2515 err = 0;
2517 unlock:
2518 hci_dev_unlock(hdev);
2519 return err;
2522 static void set_class_complete(struct hci_dev *hdev, u8 status)
2524 BT_DBG("status 0x%02x", status);
2526 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2529 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2530 u16 len)
2532 struct mgmt_cp_set_dev_class *cp = data;
2533 struct pending_cmd *cmd;
2534 struct hci_request req;
2535 int err;
2537 BT_DBG("request for %s", hdev->name);
2539 if (!lmp_bredr_capable(hdev))
2540 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2541 MGMT_STATUS_NOT_SUPPORTED);
2543 hci_dev_lock(hdev);
2545 if (pending_eir_or_class(hdev)) {
2546 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2547 MGMT_STATUS_BUSY);
2548 goto unlock;
2551 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2552 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2553 MGMT_STATUS_INVALID_PARAMS);
2554 goto unlock;
2557 hdev->major_class = cp->major;
2558 hdev->minor_class = cp->minor;
2560 if (!hdev_is_powered(hdev)) {
2561 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2562 hdev->dev_class, 3);
2563 goto unlock;
2566 hci_req_init(&req, hdev);
2568 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2569 hci_dev_unlock(hdev);
2570 cancel_delayed_work_sync(&hdev->service_cache);
2571 hci_dev_lock(hdev);
2572 update_eir(&req);
2575 update_class(&req);
2577 err = hci_req_run(&req, set_class_complete);
2578 if (err < 0) {
2579 if (err != -ENODATA)
2580 goto unlock;
2582 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2583 hdev->dev_class, 3);
2584 goto unlock;
2587 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2588 if (!cmd) {
2589 err = -ENOMEM;
2590 goto unlock;
2593 err = 0;
2595 unlock:
2596 hci_dev_unlock(hdev);
2597 return err;
2600 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2601 u16 len)
2603 struct mgmt_cp_load_link_keys *cp = data;
2604 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2605 sizeof(struct mgmt_link_key_info));
2606 u16 key_count, expected_len;
2607 bool changed;
2608 int i;
2610 BT_DBG("request for %s", hdev->name);
2612 if (!lmp_bredr_capable(hdev))
2613 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2614 MGMT_STATUS_NOT_SUPPORTED);
2616 key_count = __le16_to_cpu(cp->key_count);
2617 if (key_count > max_key_count) {
2618 BT_ERR("load_link_keys: too big key_count value %u",
2619 key_count);
2620 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2621 MGMT_STATUS_INVALID_PARAMS);
2624 expected_len = sizeof(*cp) + key_count *
2625 sizeof(struct mgmt_link_key_info);
2626 if (expected_len != len) {
2627 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2628 expected_len, len);
2629 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2630 MGMT_STATUS_INVALID_PARAMS);
2633 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2634 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2635 MGMT_STATUS_INVALID_PARAMS);
2637 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2638 key_count);
2640 for (i = 0; i < key_count; i++) {
2641 struct mgmt_link_key_info *key = &cp->keys[i];
2643 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2644 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2645 MGMT_STATUS_INVALID_PARAMS);
2648 hci_dev_lock(hdev);
2650 hci_link_keys_clear(hdev);
2652 if (cp->debug_keys)
2653 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2654 &hdev->dev_flags);
2655 else
2656 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2657 &hdev->dev_flags);
2659 if (changed)
2660 new_settings(hdev, NULL);
2662 for (i = 0; i < key_count; i++) {
2663 struct mgmt_link_key_info *key = &cp->keys[i];
2665 /* Always ignore debug keys and require a new pairing if
2666 * the user wants to use them.
2668 if (key->type == HCI_LK_DEBUG_COMBINATION)
2669 continue;
2671 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2672 key->type, key->pin_len, NULL);
2675 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2677 hci_dev_unlock(hdev);
2679 return 0;
2682 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2683 u8 addr_type, struct sock *skip_sk)
2685 struct mgmt_ev_device_unpaired ev;
2687 bacpy(&ev.addr.bdaddr, bdaddr);
2688 ev.addr.type = addr_type;
2690 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2691 skip_sk);
2694 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2695 u16 len)
2697 struct mgmt_cp_unpair_device *cp = data;
2698 struct mgmt_rp_unpair_device rp;
2699 struct hci_cp_disconnect dc;
2700 struct pending_cmd *cmd;
2701 struct hci_conn *conn;
2702 int err;
2704 memset(&rp, 0, sizeof(rp));
2705 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2706 rp.addr.type = cp->addr.type;
2708 if (!bdaddr_type_is_valid(cp->addr.type))
2709 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2710 MGMT_STATUS_INVALID_PARAMS,
2711 &rp, sizeof(rp));
2713 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2714 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2715 MGMT_STATUS_INVALID_PARAMS,
2716 &rp, sizeof(rp));
2718 hci_dev_lock(hdev);
2720 if (!hdev_is_powered(hdev)) {
2721 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2722 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2723 goto unlock;
2726 if (cp->addr.type == BDADDR_BREDR) {
2727 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2728 } else {
2729 u8 addr_type;
2731 if (cp->addr.type == BDADDR_LE_PUBLIC)
2732 addr_type = ADDR_LE_DEV_PUBLIC;
2733 else
2734 addr_type = ADDR_LE_DEV_RANDOM;
2736 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2738 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2740 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2743 if (err < 0) {
2744 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2745 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2746 goto unlock;
2749 if (cp->disconnect) {
2750 if (cp->addr.type == BDADDR_BREDR)
2751 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2752 &cp->addr.bdaddr);
2753 else
2754 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2755 &cp->addr.bdaddr);
2756 } else {
2757 conn = NULL;
2760 if (!conn) {
2761 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2762 &rp, sizeof(rp));
2763 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2764 goto unlock;
2767 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2768 sizeof(*cp));
2769 if (!cmd) {
2770 err = -ENOMEM;
2771 goto unlock;
2774 dc.handle = cpu_to_le16(conn->handle);
2775 dc.reason = 0x13; /* Remote User Terminated Connection */
2776 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2777 if (err < 0)
2778 mgmt_pending_remove(cmd);
2780 unlock:
2781 hci_dev_unlock(hdev);
2782 return err;
2785 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2786 u16 len)
2788 struct mgmt_cp_disconnect *cp = data;
2789 struct mgmt_rp_disconnect rp;
2790 struct hci_cp_disconnect dc;
2791 struct pending_cmd *cmd;
2792 struct hci_conn *conn;
2793 int err;
2795 BT_DBG("");
2797 memset(&rp, 0, sizeof(rp));
2798 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2799 rp.addr.type = cp->addr.type;
2801 if (!bdaddr_type_is_valid(cp->addr.type))
2802 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2803 MGMT_STATUS_INVALID_PARAMS,
2804 &rp, sizeof(rp));
2806 hci_dev_lock(hdev);
2808 if (!test_bit(HCI_UP, &hdev->flags)) {
2809 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2810 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2811 goto failed;
2814 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2815 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2816 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2817 goto failed;
2820 if (cp->addr.type == BDADDR_BREDR)
2821 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2822 &cp->addr.bdaddr);
2823 else
2824 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2826 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2827 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2828 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2829 goto failed;
2832 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2833 if (!cmd) {
2834 err = -ENOMEM;
2835 goto failed;
2838 dc.handle = cpu_to_le16(conn->handle);
2839 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2841 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2842 if (err < 0)
2843 mgmt_pending_remove(cmd);
2845 failed:
2846 hci_dev_unlock(hdev);
2847 return err;
2850 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2852 switch (link_type) {
2853 case LE_LINK:
2854 switch (addr_type) {
2855 case ADDR_LE_DEV_PUBLIC:
2856 return BDADDR_LE_PUBLIC;
2858 default:
2859 /* Fallback to LE Random address type */
2860 return BDADDR_LE_RANDOM;
2863 default:
2864 /* Fallback to BR/EDR type */
2865 return BDADDR_BREDR;
2869 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2870 u16 data_len)
2872 struct mgmt_rp_get_connections *rp;
2873 struct hci_conn *c;
2874 size_t rp_len;
2875 int err;
2876 u16 i;
2878 BT_DBG("");
2880 hci_dev_lock(hdev);
2882 if (!hdev_is_powered(hdev)) {
2883 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2884 MGMT_STATUS_NOT_POWERED);
2885 goto unlock;
2888 i = 0;
2889 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2890 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2891 i++;
2894 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2895 rp = kmalloc(rp_len, GFP_KERNEL);
2896 if (!rp) {
2897 err = -ENOMEM;
2898 goto unlock;
2901 i = 0;
2902 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2903 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2904 continue;
2905 bacpy(&rp->addr[i].bdaddr, &c->dst);
2906 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2907 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2908 continue;
2909 i++;
2912 rp->conn_count = cpu_to_le16(i);
2914 /* Recalculate length in case of filtered SCO connections, etc */
2915 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2917 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2918 rp_len);
2920 kfree(rp);
2922 unlock:
2923 hci_dev_unlock(hdev);
2924 return err;
2927 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2928 struct mgmt_cp_pin_code_neg_reply *cp)
2930 struct pending_cmd *cmd;
2931 int err;
2933 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2934 sizeof(*cp));
2935 if (!cmd)
2936 return -ENOMEM;
2938 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2939 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2940 if (err < 0)
2941 mgmt_pending_remove(cmd);
2943 return err;
2946 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2947 u16 len)
2949 struct hci_conn *conn;
2950 struct mgmt_cp_pin_code_reply *cp = data;
2951 struct hci_cp_pin_code_reply reply;
2952 struct pending_cmd *cmd;
2953 int err;
2955 BT_DBG("");
2957 hci_dev_lock(hdev);
2959 if (!hdev_is_powered(hdev)) {
2960 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2961 MGMT_STATUS_NOT_POWERED);
2962 goto failed;
2965 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2966 if (!conn) {
2967 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2968 MGMT_STATUS_NOT_CONNECTED);
2969 goto failed;
2972 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2973 struct mgmt_cp_pin_code_neg_reply ncp;
2975 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2977 BT_ERR("PIN code is not 16 bytes long");
2979 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2980 if (err >= 0)
2981 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2982 MGMT_STATUS_INVALID_PARAMS);
2984 goto failed;
2987 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2988 if (!cmd) {
2989 err = -ENOMEM;
2990 goto failed;
2993 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2994 reply.pin_len = cp->pin_len;
2995 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2997 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2998 if (err < 0)
2999 mgmt_pending_remove(cmd);
3001 failed:
3002 hci_dev_unlock(hdev);
3003 return err;
3006 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3007 u16 len)
3009 struct mgmt_cp_set_io_capability *cp = data;
3011 BT_DBG("");
3013 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3014 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3015 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3017 hci_dev_lock(hdev);
3019 hdev->io_capability = cp->io_capability;
3021 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3022 hdev->io_capability);
3024 hci_dev_unlock(hdev);
3026 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3030 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3032 struct hci_dev *hdev = conn->hdev;
3033 struct pending_cmd *cmd;
3035 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3036 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3037 continue;
3039 if (cmd->user_data != conn)
3040 continue;
3042 return cmd;
3045 return NULL;
3048 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3050 struct mgmt_rp_pair_device rp;
3051 struct hci_conn *conn = cmd->user_data;
3053 bacpy(&rp.addr.bdaddr, &conn->dst);
3054 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3056 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3057 &rp, sizeof(rp));
3059 /* So we don't get further callbacks for this connection */
3060 conn->connect_cfm_cb = NULL;
3061 conn->security_cfm_cb = NULL;
3062 conn->disconn_cfm_cb = NULL;
3064 hci_conn_drop(conn);
3066 mgmt_pending_remove(cmd);
3069 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3071 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3072 struct pending_cmd *cmd;
3074 cmd = find_pairing(conn);
3075 if (cmd)
3076 pairing_complete(cmd, status);
3079 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3081 struct pending_cmd *cmd;
3083 BT_DBG("status %u", status);
3085 cmd = find_pairing(conn);
3086 if (!cmd)
3087 BT_DBG("Unable to find a pending command");
3088 else
3089 pairing_complete(cmd, mgmt_status(status));
3092 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3094 struct pending_cmd *cmd;
3096 BT_DBG("status %u", status);
3098 if (!status)
3099 return;
3101 cmd = find_pairing(conn);
3102 if (!cmd)
3103 BT_DBG("Unable to find a pending command");
3104 else
3105 pairing_complete(cmd, mgmt_status(status));
3108 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3109 u16 len)
3111 struct mgmt_cp_pair_device *cp = data;
3112 struct mgmt_rp_pair_device rp;
3113 struct pending_cmd *cmd;
3114 u8 sec_level, auth_type;
3115 struct hci_conn *conn;
3116 int err;
3118 BT_DBG("");
3120 memset(&rp, 0, sizeof(rp));
3121 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3122 rp.addr.type = cp->addr.type;
3124 if (!bdaddr_type_is_valid(cp->addr.type))
3125 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3126 MGMT_STATUS_INVALID_PARAMS,
3127 &rp, sizeof(rp));
3129 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3130 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3131 MGMT_STATUS_INVALID_PARAMS,
3132 &rp, sizeof(rp));
3134 hci_dev_lock(hdev);
3136 if (!hdev_is_powered(hdev)) {
3137 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3138 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3139 goto unlock;
3142 sec_level = BT_SECURITY_MEDIUM;
3143 auth_type = HCI_AT_DEDICATED_BONDING;
3145 if (cp->addr.type == BDADDR_BREDR) {
3146 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3147 auth_type);
3148 } else {
3149 u8 addr_type;
3151 /* Convert from L2CAP channel address type to HCI address type
3153 if (cp->addr.type == BDADDR_LE_PUBLIC)
3154 addr_type = ADDR_LE_DEV_PUBLIC;
3155 else
3156 addr_type = ADDR_LE_DEV_RANDOM;
3158 /* When pairing a new device, it is expected to remember
3159 * this device for future connections. Adding the connection
3160 * parameter information ahead of time allows tracking
3161 * of the slave preferred values and will speed up any
3162 * further connection establishment.
3164 * If connection parameters already exist, then they
3165 * will be kept and this function does nothing.
3167 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3169 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3170 sec_level, HCI_LE_CONN_TIMEOUT,
3171 HCI_ROLE_MASTER);
3174 if (IS_ERR(conn)) {
3175 int status;
3177 if (PTR_ERR(conn) == -EBUSY)
3178 status = MGMT_STATUS_BUSY;
3179 else
3180 status = MGMT_STATUS_CONNECT_FAILED;
3182 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3183 status, &rp,
3184 sizeof(rp));
3185 goto unlock;
3188 if (conn->connect_cfm_cb) {
3189 hci_conn_drop(conn);
3190 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3191 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3192 goto unlock;
3195 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3196 if (!cmd) {
3197 err = -ENOMEM;
3198 hci_conn_drop(conn);
3199 goto unlock;
3202 /* For LE, just connecting isn't a proof that the pairing finished */
3203 if (cp->addr.type == BDADDR_BREDR) {
3204 conn->connect_cfm_cb = pairing_complete_cb;
3205 conn->security_cfm_cb = pairing_complete_cb;
3206 conn->disconn_cfm_cb = pairing_complete_cb;
3207 } else {
3208 conn->connect_cfm_cb = le_pairing_complete_cb;
3209 conn->security_cfm_cb = le_pairing_complete_cb;
3210 conn->disconn_cfm_cb = le_pairing_complete_cb;
3213 conn->io_capability = cp->io_cap;
3214 cmd->user_data = conn;
3216 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3217 hci_conn_security(conn, sec_level, auth_type, true))
3218 pairing_complete(cmd, 0);
3220 err = 0;
3222 unlock:
3223 hci_dev_unlock(hdev);
3224 return err;
3227 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3228 u16 len)
3230 struct mgmt_addr_info *addr = data;
3231 struct pending_cmd *cmd;
3232 struct hci_conn *conn;
3233 int err;
3235 BT_DBG("");
3237 hci_dev_lock(hdev);
3239 if (!hdev_is_powered(hdev)) {
3240 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3241 MGMT_STATUS_NOT_POWERED);
3242 goto unlock;
3245 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3246 if (!cmd) {
3247 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3248 MGMT_STATUS_INVALID_PARAMS);
3249 goto unlock;
3252 conn = cmd->user_data;
3254 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3255 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3256 MGMT_STATUS_INVALID_PARAMS);
3257 goto unlock;
3260 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3262 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3263 addr, sizeof(*addr));
3264 unlock:
3265 hci_dev_unlock(hdev);
3266 return err;
3269 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3270 struct mgmt_addr_info *addr, u16 mgmt_op,
3271 u16 hci_op, __le32 passkey)
3273 struct pending_cmd *cmd;
3274 struct hci_conn *conn;
3275 int err;
3277 hci_dev_lock(hdev);
3279 if (!hdev_is_powered(hdev)) {
3280 err = cmd_complete(sk, hdev->id, mgmt_op,
3281 MGMT_STATUS_NOT_POWERED, addr,
3282 sizeof(*addr));
3283 goto done;
3286 if (addr->type == BDADDR_BREDR)
3287 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3288 else
3289 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3291 if (!conn) {
3292 err = cmd_complete(sk, hdev->id, mgmt_op,
3293 MGMT_STATUS_NOT_CONNECTED, addr,
3294 sizeof(*addr));
3295 goto done;
3298 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3299 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3300 if (!err)
3301 err = cmd_complete(sk, hdev->id, mgmt_op,
3302 MGMT_STATUS_SUCCESS, addr,
3303 sizeof(*addr));
3304 else
3305 err = cmd_complete(sk, hdev->id, mgmt_op,
3306 MGMT_STATUS_FAILED, addr,
3307 sizeof(*addr));
3309 goto done;
3312 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3313 if (!cmd) {
3314 err = -ENOMEM;
3315 goto done;
3318 /* Continue with pairing via HCI */
3319 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3320 struct hci_cp_user_passkey_reply cp;
3322 bacpy(&cp.bdaddr, &addr->bdaddr);
3323 cp.passkey = passkey;
3324 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3325 } else
3326 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3327 &addr->bdaddr);
3329 if (err < 0)
3330 mgmt_pending_remove(cmd);
3332 done:
3333 hci_dev_unlock(hdev);
3334 return err;
3337 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3338 void *data, u16 len)
3340 struct mgmt_cp_pin_code_neg_reply *cp = data;
3342 BT_DBG("");
3344 return user_pairing_resp(sk, hdev, &cp->addr,
3345 MGMT_OP_PIN_CODE_NEG_REPLY,
3346 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3349 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3350 u16 len)
3352 struct mgmt_cp_user_confirm_reply *cp = data;
3354 BT_DBG("");
3356 if (len != sizeof(*cp))
3357 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3358 MGMT_STATUS_INVALID_PARAMS);
3360 return user_pairing_resp(sk, hdev, &cp->addr,
3361 MGMT_OP_USER_CONFIRM_REPLY,
3362 HCI_OP_USER_CONFIRM_REPLY, 0);
3365 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3366 void *data, u16 len)
3368 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3370 BT_DBG("");
3372 return user_pairing_resp(sk, hdev, &cp->addr,
3373 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3374 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3377 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3378 u16 len)
3380 struct mgmt_cp_user_passkey_reply *cp = data;
3382 BT_DBG("");
3384 return user_pairing_resp(sk, hdev, &cp->addr,
3385 MGMT_OP_USER_PASSKEY_REPLY,
3386 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3389 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3390 void *data, u16 len)
3392 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3394 BT_DBG("");
3396 return user_pairing_resp(sk, hdev, &cp->addr,
3397 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3398 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3401 static void update_name(struct hci_request *req)
3403 struct hci_dev *hdev = req->hdev;
3404 struct hci_cp_write_local_name cp;
3406 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3408 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3411 static void set_name_complete(struct hci_dev *hdev, u8 status)
3413 struct mgmt_cp_set_local_name *cp;
3414 struct pending_cmd *cmd;
3416 BT_DBG("status 0x%02x", status);
3418 hci_dev_lock(hdev);
3420 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3421 if (!cmd)
3422 goto unlock;
3424 cp = cmd->param;
3426 if (status)
3427 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3428 mgmt_status(status));
3429 else
3430 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3431 cp, sizeof(*cp));
3433 mgmt_pending_remove(cmd);
3435 unlock:
3436 hci_dev_unlock(hdev);
3439 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3440 u16 len)
3442 struct mgmt_cp_set_local_name *cp = data;
3443 struct pending_cmd *cmd;
3444 struct hci_request req;
3445 int err;
3447 BT_DBG("");
3449 hci_dev_lock(hdev);
3451 /* If the old values are the same as the new ones just return a
3452 * direct command complete event.
3454 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3455 !memcmp(hdev->short_name, cp->short_name,
3456 sizeof(hdev->short_name))) {
3457 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3458 data, len);
3459 goto failed;
3462 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3464 if (!hdev_is_powered(hdev)) {
3465 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3467 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3468 data, len);
3469 if (err < 0)
3470 goto failed;
3472 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3473 sk);
3475 goto failed;
3478 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3479 if (!cmd) {
3480 err = -ENOMEM;
3481 goto failed;
3484 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3486 hci_req_init(&req, hdev);
3488 if (lmp_bredr_capable(hdev)) {
3489 update_name(&req);
3490 update_eir(&req);
3493 /* The name is stored in the scan response data and so
3494 * no need to udpate the advertising data here.
3496 if (lmp_le_capable(hdev))
3497 update_scan_rsp_data(&req);
3499 err = hci_req_run(&req, set_name_complete);
3500 if (err < 0)
3501 mgmt_pending_remove(cmd);
3503 failed:
3504 hci_dev_unlock(hdev);
3505 return err;
3508 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3509 void *data, u16 data_len)
3511 struct pending_cmd *cmd;
3512 int err;
3514 BT_DBG("%s", hdev->name);
3516 hci_dev_lock(hdev);
3518 if (!hdev_is_powered(hdev)) {
3519 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3520 MGMT_STATUS_NOT_POWERED);
3521 goto unlock;
3524 if (!lmp_ssp_capable(hdev)) {
3525 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3526 MGMT_STATUS_NOT_SUPPORTED);
3527 goto unlock;
3530 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3531 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3532 MGMT_STATUS_BUSY);
3533 goto unlock;
3536 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3537 if (!cmd) {
3538 err = -ENOMEM;
3539 goto unlock;
3542 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3543 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3544 0, NULL);
3545 else
3546 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3548 if (err < 0)
3549 mgmt_pending_remove(cmd);
3551 unlock:
3552 hci_dev_unlock(hdev);
3553 return err;
3556 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3557 void *data, u16 len)
3559 int err;
3561 BT_DBG("%s ", hdev->name);
3563 hci_dev_lock(hdev);
3565 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3566 struct mgmt_cp_add_remote_oob_data *cp = data;
3567 u8 status;
3569 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3570 cp->hash, cp->randomizer);
3571 if (err < 0)
3572 status = MGMT_STATUS_FAILED;
3573 else
3574 status = MGMT_STATUS_SUCCESS;
3576 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3577 status, &cp->addr, sizeof(cp->addr));
3578 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3579 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3580 u8 status;
3582 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3583 cp->hash192,
3584 cp->randomizer192,
3585 cp->hash256,
3586 cp->randomizer256);
3587 if (err < 0)
3588 status = MGMT_STATUS_FAILED;
3589 else
3590 status = MGMT_STATUS_SUCCESS;
3592 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3593 status, &cp->addr, sizeof(cp->addr));
3594 } else {
3595 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3596 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3597 MGMT_STATUS_INVALID_PARAMS);
3600 hci_dev_unlock(hdev);
3601 return err;
3604 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3605 void *data, u16 len)
3607 struct mgmt_cp_remove_remote_oob_data *cp = data;
3608 u8 status;
3609 int err;
3611 BT_DBG("%s", hdev->name);
3613 hci_dev_lock(hdev);
3615 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3616 if (err < 0)
3617 status = MGMT_STATUS_INVALID_PARAMS;
3618 else
3619 status = MGMT_STATUS_SUCCESS;
3621 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3622 status, &cp->addr, sizeof(cp->addr));
3624 hci_dev_unlock(hdev);
3625 return err;
3628 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3630 struct pending_cmd *cmd;
3631 u8 type;
3632 int err;
3634 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3636 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3637 if (!cmd)
3638 return -ENOENT;
3640 type = hdev->discovery.type;
3642 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3643 &type, sizeof(type));
3644 mgmt_pending_remove(cmd);
3646 return err;
3649 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3651 unsigned long timeout = 0;
3653 BT_DBG("status %d", status);
3655 if (status) {
3656 hci_dev_lock(hdev);
3657 mgmt_start_discovery_failed(hdev, status);
3658 hci_dev_unlock(hdev);
3659 return;
3662 hci_dev_lock(hdev);
3663 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3664 hci_dev_unlock(hdev);
3666 switch (hdev->discovery.type) {
3667 case DISCOV_TYPE_LE:
3668 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3669 break;
3671 case DISCOV_TYPE_INTERLEAVED:
3672 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3673 break;
3675 case DISCOV_TYPE_BREDR:
3676 break;
3678 default:
3679 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3682 if (!timeout)
3683 return;
3685 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3688 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3689 void *data, u16 len)
3691 struct mgmt_cp_start_discovery *cp = data;
3692 struct pending_cmd *cmd;
3693 struct hci_cp_le_set_scan_param param_cp;
3694 struct hci_cp_le_set_scan_enable enable_cp;
3695 struct hci_cp_inquiry inq_cp;
3696 struct hci_request req;
3697 /* General inquiry access code (GIAC) */
3698 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3699 u8 status, own_addr_type;
3700 int err;
3702 BT_DBG("%s", hdev->name);
3704 hci_dev_lock(hdev);
3706 if (!hdev_is_powered(hdev)) {
3707 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3708 MGMT_STATUS_NOT_POWERED);
3709 goto failed;
3712 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3713 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3714 MGMT_STATUS_BUSY);
3715 goto failed;
3718 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3719 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3720 MGMT_STATUS_BUSY);
3721 goto failed;
3724 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3725 if (!cmd) {
3726 err = -ENOMEM;
3727 goto failed;
3730 hdev->discovery.type = cp->type;
3732 hci_req_init(&req, hdev);
3734 switch (hdev->discovery.type) {
3735 case DISCOV_TYPE_BREDR:
3736 status = mgmt_bredr_support(hdev);
3737 if (status) {
3738 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3739 status);
3740 mgmt_pending_remove(cmd);
3741 goto failed;
3744 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3745 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3746 MGMT_STATUS_BUSY);
3747 mgmt_pending_remove(cmd);
3748 goto failed;
3751 hci_inquiry_cache_flush(hdev);
3753 memset(&inq_cp, 0, sizeof(inq_cp));
3754 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3755 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3756 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3757 break;
3759 case DISCOV_TYPE_LE:
3760 case DISCOV_TYPE_INTERLEAVED:
3761 status = mgmt_le_support(hdev);
3762 if (status) {
3763 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3764 status);
3765 mgmt_pending_remove(cmd);
3766 goto failed;
3769 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3770 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3771 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3772 MGMT_STATUS_NOT_SUPPORTED);
3773 mgmt_pending_remove(cmd);
3774 goto failed;
3777 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3778 /* Don't let discovery abort an outgoing
3779 * connection attempt that's using directed
3780 * advertising.
3782 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3783 BT_CONNECT)) {
3784 err = cmd_status(sk, hdev->id,
3785 MGMT_OP_START_DISCOVERY,
3786 MGMT_STATUS_REJECTED);
3787 mgmt_pending_remove(cmd);
3788 goto failed;
3791 disable_advertising(&req);
3794 /* If controller is scanning, it means the background scanning
3795 * is running. Thus, we should temporarily stop it in order to
3796 * set the discovery scanning parameters.
3798 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3799 hci_req_add_le_scan_disable(&req);
3801 memset(&param_cp, 0, sizeof(param_cp));
3803 /* All active scans will be done with either a resolvable
3804 * private address (when privacy feature has been enabled)
3805 * or unresolvable private address.
3807 err = hci_update_random_address(&req, true, &own_addr_type);
3808 if (err < 0) {
3809 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3810 MGMT_STATUS_FAILED);
3811 mgmt_pending_remove(cmd);
3812 goto failed;
3815 param_cp.type = LE_SCAN_ACTIVE;
3816 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3817 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3818 param_cp.own_address_type = own_addr_type;
3819 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3820 &param_cp);
3822 memset(&enable_cp, 0, sizeof(enable_cp));
3823 enable_cp.enable = LE_SCAN_ENABLE;
3824 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3825 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3826 &enable_cp);
3827 break;
3829 default:
3830 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3831 MGMT_STATUS_INVALID_PARAMS);
3832 mgmt_pending_remove(cmd);
3833 goto failed;
3836 err = hci_req_run(&req, start_discovery_complete);
3837 if (err < 0)
3838 mgmt_pending_remove(cmd);
3839 else
3840 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3842 failed:
3843 hci_dev_unlock(hdev);
3844 return err;
3847 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3849 struct pending_cmd *cmd;
3850 int err;
3852 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3853 if (!cmd)
3854 return -ENOENT;
3856 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3857 &hdev->discovery.type, sizeof(hdev->discovery.type));
3858 mgmt_pending_remove(cmd);
3860 return err;
3863 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3865 BT_DBG("status %d", status);
3867 hci_dev_lock(hdev);
3869 if (status) {
3870 mgmt_stop_discovery_failed(hdev, status);
3871 goto unlock;
3874 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3876 unlock:
3877 hci_dev_unlock(hdev);
3880 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3881 u16 len)
3883 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3884 struct pending_cmd *cmd;
3885 struct hci_request req;
3886 int err;
3888 BT_DBG("%s", hdev->name);
3890 hci_dev_lock(hdev);
3892 if (!hci_discovery_active(hdev)) {
3893 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3894 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3895 sizeof(mgmt_cp->type));
3896 goto unlock;
3899 if (hdev->discovery.type != mgmt_cp->type) {
3900 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3901 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3902 sizeof(mgmt_cp->type));
3903 goto unlock;
3906 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3907 if (!cmd) {
3908 err = -ENOMEM;
3909 goto unlock;
3912 hci_req_init(&req, hdev);
3914 hci_stop_discovery(&req);
3916 err = hci_req_run(&req, stop_discovery_complete);
3917 if (!err) {
3918 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3919 goto unlock;
3922 mgmt_pending_remove(cmd);
3924 /* If no HCI commands were sent we're done */
3925 if (err == -ENODATA) {
3926 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3927 &mgmt_cp->type, sizeof(mgmt_cp->type));
3928 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3931 unlock:
3932 hci_dev_unlock(hdev);
3933 return err;
3936 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3937 u16 len)
3939 struct mgmt_cp_confirm_name *cp = data;
3940 struct inquiry_entry *e;
3941 int err;
3943 BT_DBG("%s", hdev->name);
3945 hci_dev_lock(hdev);
3947 if (!hci_discovery_active(hdev)) {
3948 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3949 MGMT_STATUS_FAILED, &cp->addr,
3950 sizeof(cp->addr));
3951 goto failed;
3954 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3955 if (!e) {
3956 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3957 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3958 sizeof(cp->addr));
3959 goto failed;
3962 if (cp->name_known) {
3963 e->name_state = NAME_KNOWN;
3964 list_del(&e->list);
3965 } else {
3966 e->name_state = NAME_NEEDED;
3967 hci_inquiry_cache_update_resolve(hdev, e);
3970 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3971 sizeof(cp->addr));
3973 failed:
3974 hci_dev_unlock(hdev);
3975 return err;
3978 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3979 u16 len)
3981 struct mgmt_cp_block_device *cp = data;
3982 u8 status;
3983 int err;
3985 BT_DBG("%s", hdev->name);
3987 if (!bdaddr_type_is_valid(cp->addr.type))
3988 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3989 MGMT_STATUS_INVALID_PARAMS,
3990 &cp->addr, sizeof(cp->addr));
3992 hci_dev_lock(hdev);
3994 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3995 cp->addr.type);
3996 if (err < 0) {
3997 status = MGMT_STATUS_FAILED;
3998 goto done;
4001 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4002 sk);
4003 status = MGMT_STATUS_SUCCESS;
4005 done:
4006 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4007 &cp->addr, sizeof(cp->addr));
4009 hci_dev_unlock(hdev);
4011 return err;
4014 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4015 u16 len)
4017 struct mgmt_cp_unblock_device *cp = data;
4018 u8 status;
4019 int err;
4021 BT_DBG("%s", hdev->name);
4023 if (!bdaddr_type_is_valid(cp->addr.type))
4024 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4025 MGMT_STATUS_INVALID_PARAMS,
4026 &cp->addr, sizeof(cp->addr));
4028 hci_dev_lock(hdev);
4030 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4031 cp->addr.type);
4032 if (err < 0) {
4033 status = MGMT_STATUS_INVALID_PARAMS;
4034 goto done;
4037 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4038 sk);
4039 status = MGMT_STATUS_SUCCESS;
4041 done:
4042 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4043 &cp->addr, sizeof(cp->addr));
4045 hci_dev_unlock(hdev);
4047 return err;
4050 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4051 u16 len)
4053 struct mgmt_cp_set_device_id *cp = data;
4054 struct hci_request req;
4055 int err;
4056 __u16 source;
4058 BT_DBG("%s", hdev->name);
4060 source = __le16_to_cpu(cp->source);
4062 if (source > 0x0002)
4063 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4064 MGMT_STATUS_INVALID_PARAMS);
4066 hci_dev_lock(hdev);
4068 hdev->devid_source = source;
4069 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4070 hdev->devid_product = __le16_to_cpu(cp->product);
4071 hdev->devid_version = __le16_to_cpu(cp->version);
4073 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4075 hci_req_init(&req, hdev);
4076 update_eir(&req);
4077 hci_req_run(&req, NULL);
4079 hci_dev_unlock(hdev);
4081 return err;
4084 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4086 struct cmd_lookup match = { NULL, hdev };
4088 if (status) {
4089 u8 mgmt_err = mgmt_status(status);
4091 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4092 cmd_status_rsp, &mgmt_err);
4093 return;
4096 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4097 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4098 else
4099 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4101 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4102 &match);
4104 new_settings(hdev, match.sk);
4106 if (match.sk)
4107 sock_put(match.sk);
4110 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4111 u16 len)
4113 struct mgmt_mode *cp = data;
4114 struct pending_cmd *cmd;
4115 struct hci_request req;
4116 u8 val, enabled, status;
4117 int err;
4119 BT_DBG("request for %s", hdev->name);
4121 status = mgmt_le_support(hdev);
4122 if (status)
4123 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4124 status);
4126 if (cp->val != 0x00 && cp->val != 0x01)
4127 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4128 MGMT_STATUS_INVALID_PARAMS);
4130 hci_dev_lock(hdev);
4132 val = !!cp->val;
4133 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4135 /* The following conditions are ones which mean that we should
4136 * not do any HCI communication but directly send a mgmt
4137 * response to user space (after toggling the flag if
4138 * necessary).
4140 if (!hdev_is_powered(hdev) || val == enabled ||
4141 hci_conn_num(hdev, LE_LINK) > 0 ||
4142 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4143 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4144 bool changed = false;
4146 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4147 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4148 changed = true;
4151 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4152 if (err < 0)
4153 goto unlock;
4155 if (changed)
4156 err = new_settings(hdev, sk);
4158 goto unlock;
4161 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4162 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4163 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4164 MGMT_STATUS_BUSY);
4165 goto unlock;
4168 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4169 if (!cmd) {
4170 err = -ENOMEM;
4171 goto unlock;
4174 hci_req_init(&req, hdev);
4176 if (val)
4177 enable_advertising(&req);
4178 else
4179 disable_advertising(&req);
4181 err = hci_req_run(&req, set_advertising_complete);
4182 if (err < 0)
4183 mgmt_pending_remove(cmd);
4185 unlock:
4186 hci_dev_unlock(hdev);
4187 return err;
4190 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4191 void *data, u16 len)
4193 struct mgmt_cp_set_static_address *cp = data;
4194 int err;
4196 BT_DBG("%s", hdev->name);
4198 if (!lmp_le_capable(hdev))
4199 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4200 MGMT_STATUS_NOT_SUPPORTED);
4202 if (hdev_is_powered(hdev))
4203 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4204 MGMT_STATUS_REJECTED);
4206 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4207 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4208 return cmd_status(sk, hdev->id,
4209 MGMT_OP_SET_STATIC_ADDRESS,
4210 MGMT_STATUS_INVALID_PARAMS);
4212 /* Two most significant bits shall be set */
4213 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4214 return cmd_status(sk, hdev->id,
4215 MGMT_OP_SET_STATIC_ADDRESS,
4216 MGMT_STATUS_INVALID_PARAMS);
4219 hci_dev_lock(hdev);
4221 bacpy(&hdev->static_addr, &cp->bdaddr);
4223 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4225 hci_dev_unlock(hdev);
4227 return err;
4230 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4231 void *data, u16 len)
4233 struct mgmt_cp_set_scan_params *cp = data;
4234 __u16 interval, window;
4235 int err;
4237 BT_DBG("%s", hdev->name);
4239 if (!lmp_le_capable(hdev))
4240 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4241 MGMT_STATUS_NOT_SUPPORTED);
4243 interval = __le16_to_cpu(cp->interval);
4245 if (interval < 0x0004 || interval > 0x4000)
4246 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4247 MGMT_STATUS_INVALID_PARAMS);
4249 window = __le16_to_cpu(cp->window);
4251 if (window < 0x0004 || window > 0x4000)
4252 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4253 MGMT_STATUS_INVALID_PARAMS);
4255 if (window > interval)
4256 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4257 MGMT_STATUS_INVALID_PARAMS);
4259 hci_dev_lock(hdev);
4261 hdev->le_scan_interval = interval;
4262 hdev->le_scan_window = window;
4264 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4266 /* If background scan is running, restart it so new parameters are
4267 * loaded.
4269 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4270 hdev->discovery.state == DISCOVERY_STOPPED) {
4271 struct hci_request req;
4273 hci_req_init(&req, hdev);
4275 hci_req_add_le_scan_disable(&req);
4276 hci_req_add_le_passive_scan(&req);
4278 hci_req_run(&req, NULL);
4281 hci_dev_unlock(hdev);
4283 return err;
4286 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4288 struct pending_cmd *cmd;
4290 BT_DBG("status 0x%02x", status);
4292 hci_dev_lock(hdev);
4294 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4295 if (!cmd)
4296 goto unlock;
4298 if (status) {
4299 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4300 mgmt_status(status));
4301 } else {
4302 struct mgmt_mode *cp = cmd->param;
4304 if (cp->val)
4305 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4306 else
4307 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4309 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4310 new_settings(hdev, cmd->sk);
4313 mgmt_pending_remove(cmd);
4315 unlock:
4316 hci_dev_unlock(hdev);
4319 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4320 void *data, u16 len)
4322 struct mgmt_mode *cp = data;
4323 struct pending_cmd *cmd;
4324 struct hci_request req;
4325 int err;
4327 BT_DBG("%s", hdev->name);
4329 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4330 hdev->hci_ver < BLUETOOTH_VER_1_2)
4331 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4332 MGMT_STATUS_NOT_SUPPORTED);
4334 if (cp->val != 0x00 && cp->val != 0x01)
4335 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4336 MGMT_STATUS_INVALID_PARAMS);
4338 if (!hdev_is_powered(hdev))
4339 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4340 MGMT_STATUS_NOT_POWERED);
4342 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4343 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4344 MGMT_STATUS_REJECTED);
4346 hci_dev_lock(hdev);
4348 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4349 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4350 MGMT_STATUS_BUSY);
4351 goto unlock;
4354 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4355 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4356 hdev);
4357 goto unlock;
4360 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4361 data, len);
4362 if (!cmd) {
4363 err = -ENOMEM;
4364 goto unlock;
4367 hci_req_init(&req, hdev);
4369 write_fast_connectable(&req, cp->val);
4371 err = hci_req_run(&req, fast_connectable_complete);
4372 if (err < 0) {
4373 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4374 MGMT_STATUS_FAILED);
4375 mgmt_pending_remove(cmd);
4378 unlock:
4379 hci_dev_unlock(hdev);
4381 return err;
4384 static void set_bredr_scan(struct hci_request *req)
4386 struct hci_dev *hdev = req->hdev;
4387 u8 scan = 0;
4389 /* Ensure that fast connectable is disabled. This function will
4390 * not do anything if the page scan parameters are already what
4391 * they should be.
4393 write_fast_connectable(req, false);
4395 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4396 !list_empty(&hdev->whitelist))
4397 scan |= SCAN_PAGE;
4398 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4399 scan |= SCAN_INQUIRY;
4401 if (scan)
4402 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4405 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4407 struct pending_cmd *cmd;
4409 BT_DBG("status 0x%02x", status);
4411 hci_dev_lock(hdev);
4413 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4414 if (!cmd)
4415 goto unlock;
4417 if (status) {
4418 u8 mgmt_err = mgmt_status(status);
4420 /* We need to restore the flag if related HCI commands
4421 * failed.
4423 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4425 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4426 } else {
4427 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4428 new_settings(hdev, cmd->sk);
4431 mgmt_pending_remove(cmd);
4433 unlock:
4434 hci_dev_unlock(hdev);
4437 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4439 struct mgmt_mode *cp = data;
4440 struct pending_cmd *cmd;
4441 struct hci_request req;
4442 int err;
4444 BT_DBG("request for %s", hdev->name);
4446 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4447 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4448 MGMT_STATUS_NOT_SUPPORTED);
4450 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4451 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4452 MGMT_STATUS_REJECTED);
4454 if (cp->val != 0x00 && cp->val != 0x01)
4455 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4456 MGMT_STATUS_INVALID_PARAMS);
4458 hci_dev_lock(hdev);
4460 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4461 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4462 goto unlock;
4465 if (!hdev_is_powered(hdev)) {
4466 if (!cp->val) {
4467 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4468 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4469 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4470 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4471 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4474 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4476 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4477 if (err < 0)
4478 goto unlock;
4480 err = new_settings(hdev, sk);
4481 goto unlock;
4484 /* Reject disabling when powered on */
4485 if (!cp->val) {
4486 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4487 MGMT_STATUS_REJECTED);
4488 goto unlock;
4491 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4492 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4493 MGMT_STATUS_BUSY);
4494 goto unlock;
4497 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4498 if (!cmd) {
4499 err = -ENOMEM;
4500 goto unlock;
4503 /* We need to flip the bit already here so that update_adv_data
4504 * generates the correct flags.
4506 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4508 hci_req_init(&req, hdev);
4510 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4511 !list_empty(&hdev->whitelist))
4512 set_bredr_scan(&req);
4514 /* Since only the advertising data flags will change, there
4515 * is no need to update the scan response data.
4517 update_adv_data(&req);
4519 err = hci_req_run(&req, set_bredr_complete);
4520 if (err < 0)
4521 mgmt_pending_remove(cmd);
4523 unlock:
4524 hci_dev_unlock(hdev);
4525 return err;
4528 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4529 void *data, u16 len)
4531 struct mgmt_mode *cp = data;
4532 struct pending_cmd *cmd;
4533 u8 val, status;
4534 int err;
4536 BT_DBG("request for %s", hdev->name);
4538 status = mgmt_bredr_support(hdev);
4539 if (status)
4540 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4541 status);
4543 if (!lmp_sc_capable(hdev) &&
4544 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4545 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4546 MGMT_STATUS_NOT_SUPPORTED);
4548 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4549 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4550 MGMT_STATUS_INVALID_PARAMS);
4552 hci_dev_lock(hdev);
4554 if (!hdev_is_powered(hdev)) {
4555 bool changed;
4557 if (cp->val) {
4558 changed = !test_and_set_bit(HCI_SC_ENABLED,
4559 &hdev->dev_flags);
4560 if (cp->val == 0x02)
4561 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4562 else
4563 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4564 } else {
4565 changed = test_and_clear_bit(HCI_SC_ENABLED,
4566 &hdev->dev_flags);
4567 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4570 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4571 if (err < 0)
4572 goto failed;
4574 if (changed)
4575 err = new_settings(hdev, sk);
4577 goto failed;
4580 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4581 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4582 MGMT_STATUS_BUSY);
4583 goto failed;
4586 val = !!cp->val;
4588 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4589 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4590 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4591 goto failed;
4594 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4595 if (!cmd) {
4596 err = -ENOMEM;
4597 goto failed;
4600 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4601 if (err < 0) {
4602 mgmt_pending_remove(cmd);
4603 goto failed;
4606 if (cp->val == 0x02)
4607 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4608 else
4609 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4611 failed:
4612 hci_dev_unlock(hdev);
4613 return err;
4616 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4617 void *data, u16 len)
4619 struct mgmt_mode *cp = data;
4620 bool changed, use_changed;
4621 int err;
4623 BT_DBG("request for %s", hdev->name);
4625 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4626 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4627 MGMT_STATUS_INVALID_PARAMS);
4629 hci_dev_lock(hdev);
4631 if (cp->val)
4632 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4633 &hdev->dev_flags);
4634 else
4635 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4636 &hdev->dev_flags);
4638 if (cp->val == 0x02)
4639 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4640 &hdev->dev_flags);
4641 else
4642 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4643 &hdev->dev_flags);
4645 if (hdev_is_powered(hdev) && use_changed &&
4646 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4647 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4648 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4649 sizeof(mode), &mode);
4652 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4653 if (err < 0)
4654 goto unlock;
4656 if (changed)
4657 err = new_settings(hdev, sk);
4659 unlock:
4660 hci_dev_unlock(hdev);
4661 return err;
4664 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4665 u16 len)
4667 struct mgmt_cp_set_privacy *cp = cp_data;
4668 bool changed;
4669 int err;
4671 BT_DBG("request for %s", hdev->name);
4673 if (!lmp_le_capable(hdev))
4674 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4675 MGMT_STATUS_NOT_SUPPORTED);
4677 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4678 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4679 MGMT_STATUS_INVALID_PARAMS);
4681 if (hdev_is_powered(hdev))
4682 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4683 MGMT_STATUS_REJECTED);
4685 hci_dev_lock(hdev);
4687 /* If user space supports this command it is also expected to
4688 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4690 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4692 if (cp->privacy) {
4693 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4694 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4695 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4696 } else {
4697 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4698 memset(hdev->irk, 0, sizeof(hdev->irk));
4699 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4702 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4703 if (err < 0)
4704 goto unlock;
4706 if (changed)
4707 err = new_settings(hdev, sk);
4709 unlock:
4710 hci_dev_unlock(hdev);
4711 return err;
4714 static bool irk_is_valid(struct mgmt_irk_info *irk)
4716 switch (irk->addr.type) {
4717 case BDADDR_LE_PUBLIC:
4718 return true;
4720 case BDADDR_LE_RANDOM:
4721 /* Two most significant bits shall be set */
4722 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4723 return false;
4724 return true;
4727 return false;
4730 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4731 u16 len)
4733 struct mgmt_cp_load_irks *cp = cp_data;
4734 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4735 sizeof(struct mgmt_irk_info));
4736 u16 irk_count, expected_len;
4737 int i, err;
4739 BT_DBG("request for %s", hdev->name);
4741 if (!lmp_le_capable(hdev))
4742 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4743 MGMT_STATUS_NOT_SUPPORTED);
4745 irk_count = __le16_to_cpu(cp->irk_count);
4746 if (irk_count > max_irk_count) {
4747 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4748 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4749 MGMT_STATUS_INVALID_PARAMS);
4752 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4753 if (expected_len != len) {
4754 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4755 expected_len, len);
4756 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4757 MGMT_STATUS_INVALID_PARAMS);
4760 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4762 for (i = 0; i < irk_count; i++) {
4763 struct mgmt_irk_info *key = &cp->irks[i];
4765 if (!irk_is_valid(key))
4766 return cmd_status(sk, hdev->id,
4767 MGMT_OP_LOAD_IRKS,
4768 MGMT_STATUS_INVALID_PARAMS);
4771 hci_dev_lock(hdev);
4773 hci_smp_irks_clear(hdev);
4775 for (i = 0; i < irk_count; i++) {
4776 struct mgmt_irk_info *irk = &cp->irks[i];
4777 u8 addr_type;
4779 if (irk->addr.type == BDADDR_LE_PUBLIC)
4780 addr_type = ADDR_LE_DEV_PUBLIC;
4781 else
4782 addr_type = ADDR_LE_DEV_RANDOM;
4784 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4785 BDADDR_ANY);
4788 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4790 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4792 hci_dev_unlock(hdev);
4794 return err;
4797 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4799 if (key->master != 0x00 && key->master != 0x01)
4800 return false;
4802 switch (key->addr.type) {
4803 case BDADDR_LE_PUBLIC:
4804 return true;
4806 case BDADDR_LE_RANDOM:
4807 /* Two most significant bits shall be set */
4808 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4809 return false;
4810 return true;
4813 return false;
4816 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4817 void *cp_data, u16 len)
4819 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4820 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4821 sizeof(struct mgmt_ltk_info));
4822 u16 key_count, expected_len;
4823 int i, err;
4825 BT_DBG("request for %s", hdev->name);
4827 if (!lmp_le_capable(hdev))
4828 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4829 MGMT_STATUS_NOT_SUPPORTED);
4831 key_count = __le16_to_cpu(cp->key_count);
4832 if (key_count > max_key_count) {
4833 BT_ERR("load_ltks: too big key_count value %u", key_count);
4834 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4835 MGMT_STATUS_INVALID_PARAMS);
4838 expected_len = sizeof(*cp) + key_count *
4839 sizeof(struct mgmt_ltk_info);
4840 if (expected_len != len) {
4841 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4842 expected_len, len);
4843 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4844 MGMT_STATUS_INVALID_PARAMS);
4847 BT_DBG("%s key_count %u", hdev->name, key_count);
4849 for (i = 0; i < key_count; i++) {
4850 struct mgmt_ltk_info *key = &cp->keys[i];
4852 if (!ltk_is_valid(key))
4853 return cmd_status(sk, hdev->id,
4854 MGMT_OP_LOAD_LONG_TERM_KEYS,
4855 MGMT_STATUS_INVALID_PARAMS);
4858 hci_dev_lock(hdev);
4860 hci_smp_ltks_clear(hdev);
4862 for (i = 0; i < key_count; i++) {
4863 struct mgmt_ltk_info *key = &cp->keys[i];
4864 u8 type, addr_type, authenticated;
4866 if (key->addr.type == BDADDR_LE_PUBLIC)
4867 addr_type = ADDR_LE_DEV_PUBLIC;
4868 else
4869 addr_type = ADDR_LE_DEV_RANDOM;
4871 if (key->master)
4872 type = SMP_LTK;
4873 else
4874 type = SMP_LTK_SLAVE;
4876 switch (key->type) {
4877 case MGMT_LTK_UNAUTHENTICATED:
4878 authenticated = 0x00;
4879 break;
4880 case MGMT_LTK_AUTHENTICATED:
4881 authenticated = 0x01;
4882 break;
4883 default:
4884 continue;
4887 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4888 authenticated, key->val, key->enc_size, key->ediv,
4889 key->rand);
4892 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4893 NULL, 0);
4895 hci_dev_unlock(hdev);
4897 return err;
4900 struct cmd_conn_lookup {
4901 struct hci_conn *conn;
4902 bool valid_tx_power;
4903 u8 mgmt_status;
4906 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4908 struct cmd_conn_lookup *match = data;
4909 struct mgmt_cp_get_conn_info *cp;
4910 struct mgmt_rp_get_conn_info rp;
4911 struct hci_conn *conn = cmd->user_data;
4913 if (conn != match->conn)
4914 return;
4916 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4918 memset(&rp, 0, sizeof(rp));
4919 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4920 rp.addr.type = cp->addr.type;
4922 if (!match->mgmt_status) {
4923 rp.rssi = conn->rssi;
4925 if (match->valid_tx_power) {
4926 rp.tx_power = conn->tx_power;
4927 rp.max_tx_power = conn->max_tx_power;
4928 } else {
4929 rp.tx_power = HCI_TX_POWER_INVALID;
4930 rp.max_tx_power = HCI_TX_POWER_INVALID;
4934 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4935 match->mgmt_status, &rp, sizeof(rp));
4937 hci_conn_drop(conn);
4939 mgmt_pending_remove(cmd);
4942 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4944 struct hci_cp_read_rssi *cp;
4945 struct hci_conn *conn;
4946 struct cmd_conn_lookup match;
4947 u16 handle;
4949 BT_DBG("status 0x%02x", status);
4951 hci_dev_lock(hdev);
4953 /* TX power data is valid in case request completed successfully,
4954 * otherwise we assume it's not valid. At the moment we assume that
4955 * either both or none of current and max values are valid to keep code
4956 * simple.
4958 match.valid_tx_power = !status;
4960 /* Commands sent in request are either Read RSSI or Read Transmit Power
4961 * Level so we check which one was last sent to retrieve connection
4962 * handle. Both commands have handle as first parameter so it's safe to
4963 * cast data on the same command struct.
4965 * First command sent is always Read RSSI and we fail only if it fails.
4966 * In other case we simply override error to indicate success as we
4967 * already remembered if TX power value is actually valid.
4969 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4970 if (!cp) {
4971 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4972 status = 0;
4975 if (!cp) {
4976 BT_ERR("invalid sent_cmd in response");
4977 goto unlock;
4980 handle = __le16_to_cpu(cp->handle);
4981 conn = hci_conn_hash_lookup_handle(hdev, handle);
4982 if (!conn) {
4983 BT_ERR("unknown handle (%d) in response", handle);
4984 goto unlock;
4987 match.conn = conn;
4988 match.mgmt_status = mgmt_status(status);
4990 /* Cache refresh is complete, now reply for mgmt request for given
4991 * connection only.
4993 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4994 get_conn_info_complete, &match);
4996 unlock:
4997 hci_dev_unlock(hdev);
5000 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5001 u16 len)
5003 struct mgmt_cp_get_conn_info *cp = data;
5004 struct mgmt_rp_get_conn_info rp;
5005 struct hci_conn *conn;
5006 unsigned long conn_info_age;
5007 int err = 0;
5009 BT_DBG("%s", hdev->name);
5011 memset(&rp, 0, sizeof(rp));
5012 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5013 rp.addr.type = cp->addr.type;
5015 if (!bdaddr_type_is_valid(cp->addr.type))
5016 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5017 MGMT_STATUS_INVALID_PARAMS,
5018 &rp, sizeof(rp));
5020 hci_dev_lock(hdev);
5022 if (!hdev_is_powered(hdev)) {
5023 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5024 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5025 goto unlock;
5028 if (cp->addr.type == BDADDR_BREDR)
5029 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5030 &cp->addr.bdaddr);
5031 else
5032 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5034 if (!conn || conn->state != BT_CONNECTED) {
5035 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5036 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5037 goto unlock;
5040 /* To avoid client trying to guess when to poll again for information we
5041 * calculate conn info age as random value between min/max set in hdev.
5043 conn_info_age = hdev->conn_info_min_age +
5044 prandom_u32_max(hdev->conn_info_max_age -
5045 hdev->conn_info_min_age);
5047 /* Query controller to refresh cached values if they are too old or were
5048 * never read.
5050 if (time_after(jiffies, conn->conn_info_timestamp +
5051 msecs_to_jiffies(conn_info_age)) ||
5052 !conn->conn_info_timestamp) {
5053 struct hci_request req;
5054 struct hci_cp_read_tx_power req_txp_cp;
5055 struct hci_cp_read_rssi req_rssi_cp;
5056 struct pending_cmd *cmd;
5058 hci_req_init(&req, hdev);
5059 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5060 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5061 &req_rssi_cp);
5063 /* For LE links TX power does not change thus we don't need to
5064 * query for it once value is known.
5066 if (!bdaddr_type_is_le(cp->addr.type) ||
5067 conn->tx_power == HCI_TX_POWER_INVALID) {
5068 req_txp_cp.handle = cpu_to_le16(conn->handle);
5069 req_txp_cp.type = 0x00;
5070 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5071 sizeof(req_txp_cp), &req_txp_cp);
5074 /* Max TX power needs to be read only once per connection */
5075 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5076 req_txp_cp.handle = cpu_to_le16(conn->handle);
5077 req_txp_cp.type = 0x01;
5078 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5079 sizeof(req_txp_cp), &req_txp_cp);
5082 err = hci_req_run(&req, conn_info_refresh_complete);
5083 if (err < 0)
5084 goto unlock;
5086 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5087 data, len);
5088 if (!cmd) {
5089 err = -ENOMEM;
5090 goto unlock;
5093 hci_conn_hold(conn);
5094 cmd->user_data = conn;
5096 conn->conn_info_timestamp = jiffies;
5097 } else {
5098 /* Cache is valid, just reply with values cached in hci_conn */
5099 rp.rssi = conn->rssi;
5100 rp.tx_power = conn->tx_power;
5101 rp.max_tx_power = conn->max_tx_power;
5103 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5104 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5107 unlock:
5108 hci_dev_unlock(hdev);
5109 return err;
5112 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5114 struct mgmt_cp_get_clock_info *cp;
5115 struct mgmt_rp_get_clock_info rp;
5116 struct hci_cp_read_clock *hci_cp;
5117 struct pending_cmd *cmd;
5118 struct hci_conn *conn;
5120 BT_DBG("%s status %u", hdev->name, status);
5122 hci_dev_lock(hdev);
5124 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5125 if (!hci_cp)
5126 goto unlock;
5128 if (hci_cp->which) {
5129 u16 handle = __le16_to_cpu(hci_cp->handle);
5130 conn = hci_conn_hash_lookup_handle(hdev, handle);
5131 } else {
5132 conn = NULL;
5135 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5136 if (!cmd)
5137 goto unlock;
5139 cp = cmd->param;
5141 memset(&rp, 0, sizeof(rp));
5142 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5144 if (status)
5145 goto send_rsp;
5147 rp.local_clock = cpu_to_le32(hdev->clock);
5149 if (conn) {
5150 rp.piconet_clock = cpu_to_le32(conn->clock);
5151 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5154 send_rsp:
5155 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5156 &rp, sizeof(rp));
5157 mgmt_pending_remove(cmd);
5158 if (conn)
5159 hci_conn_drop(conn);
5161 unlock:
5162 hci_dev_unlock(hdev);
5165 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5166 u16 len)
5168 struct mgmt_cp_get_clock_info *cp = data;
5169 struct mgmt_rp_get_clock_info rp;
5170 struct hci_cp_read_clock hci_cp;
5171 struct pending_cmd *cmd;
5172 struct hci_request req;
5173 struct hci_conn *conn;
5174 int err;
5176 BT_DBG("%s", hdev->name);
5178 memset(&rp, 0, sizeof(rp));
5179 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5180 rp.addr.type = cp->addr.type;
5182 if (cp->addr.type != BDADDR_BREDR)
5183 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5184 MGMT_STATUS_INVALID_PARAMS,
5185 &rp, sizeof(rp));
5187 hci_dev_lock(hdev);
5189 if (!hdev_is_powered(hdev)) {
5190 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5191 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5192 goto unlock;
5195 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5196 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5197 &cp->addr.bdaddr);
5198 if (!conn || conn->state != BT_CONNECTED) {
5199 err = cmd_complete(sk, hdev->id,
5200 MGMT_OP_GET_CLOCK_INFO,
5201 MGMT_STATUS_NOT_CONNECTED,
5202 &rp, sizeof(rp));
5203 goto unlock;
5205 } else {
5206 conn = NULL;
5209 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5210 if (!cmd) {
5211 err = -ENOMEM;
5212 goto unlock;
5215 hci_req_init(&req, hdev);
5217 memset(&hci_cp, 0, sizeof(hci_cp));
5218 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5220 if (conn) {
5221 hci_conn_hold(conn);
5222 cmd->user_data = conn;
5224 hci_cp.handle = cpu_to_le16(conn->handle);
5225 hci_cp.which = 0x01; /* Piconet clock */
5226 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5229 err = hci_req_run(&req, get_clock_info_complete);
5230 if (err < 0)
5231 mgmt_pending_remove(cmd);
5233 unlock:
5234 hci_dev_unlock(hdev);
5235 return err;
5238 /* Helper for Add/Remove Device commands */
5239 static void update_page_scan(struct hci_dev *hdev, u8 scan)
5241 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5242 return;
5244 if (!hdev_is_powered(hdev))
5245 return;
5247 /* If HCI_CONNECTABLE is set then Add/Remove Device should not
5248 * make any changes to page scanning.
5250 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
5251 return;
5253 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5254 scan |= SCAN_INQUIRY;
5256 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5259 static void device_added(struct sock *sk, struct hci_dev *hdev,
5260 bdaddr_t *bdaddr, u8 type, u8 action)
5262 struct mgmt_ev_device_added ev;
5264 bacpy(&ev.addr.bdaddr, bdaddr);
5265 ev.addr.type = type;
5266 ev.action = action;
5268 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5271 static int add_device(struct sock *sk, struct hci_dev *hdev,
5272 void *data, u16 len)
5274 struct mgmt_cp_add_device *cp = data;
5275 u8 auto_conn, addr_type;
5276 int err;
5278 BT_DBG("%s", hdev->name);
5280 if (!bdaddr_type_is_valid(cp->addr.type) ||
5281 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5282 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5283 MGMT_STATUS_INVALID_PARAMS,
5284 &cp->addr, sizeof(cp->addr));
5286 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5287 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5288 MGMT_STATUS_INVALID_PARAMS,
5289 &cp->addr, sizeof(cp->addr));
5291 hci_dev_lock(hdev);
5293 if (cp->addr.type == BDADDR_BREDR) {
5294 bool update_scan;
5296 /* Only incoming connections action is supported for now */
5297 if (cp->action != 0x01) {
5298 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5299 MGMT_STATUS_INVALID_PARAMS,
5300 &cp->addr, sizeof(cp->addr));
5301 goto unlock;
5304 update_scan = list_empty(&hdev->whitelist);
5306 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5307 cp->addr.type);
5308 if (err)
5309 goto unlock;
5311 if (update_scan)
5312 update_page_scan(hdev, SCAN_PAGE);
5314 goto added;
5317 if (cp->addr.type == BDADDR_LE_PUBLIC)
5318 addr_type = ADDR_LE_DEV_PUBLIC;
5319 else
5320 addr_type = ADDR_LE_DEV_RANDOM;
5322 if (cp->action == 0x02)
5323 auto_conn = HCI_AUTO_CONN_ALWAYS;
5324 else if (cp->action == 0x01)
5325 auto_conn = HCI_AUTO_CONN_DIRECT;
5326 else
5327 auto_conn = HCI_AUTO_CONN_REPORT;
5329 /* If the connection parameters don't exist for this device,
5330 * they will be created and configured with defaults.
5332 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5333 auto_conn) < 0) {
5334 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5335 MGMT_STATUS_FAILED,
5336 &cp->addr, sizeof(cp->addr));
5337 goto unlock;
5340 added:
5341 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5343 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5344 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5346 unlock:
5347 hci_dev_unlock(hdev);
5348 return err;
5351 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5352 bdaddr_t *bdaddr, u8 type)
5354 struct mgmt_ev_device_removed ev;
5356 bacpy(&ev.addr.bdaddr, bdaddr);
5357 ev.addr.type = type;
5359 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5362 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5363 void *data, u16 len)
5365 struct mgmt_cp_remove_device *cp = data;
5366 int err;
5368 BT_DBG("%s", hdev->name);
5370 hci_dev_lock(hdev);
5372 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5373 struct hci_conn_params *params;
5374 u8 addr_type;
5376 if (!bdaddr_type_is_valid(cp->addr.type)) {
5377 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5378 MGMT_STATUS_INVALID_PARAMS,
5379 &cp->addr, sizeof(cp->addr));
5380 goto unlock;
5383 if (cp->addr.type == BDADDR_BREDR) {
5384 err = hci_bdaddr_list_del(&hdev->whitelist,
5385 &cp->addr.bdaddr,
5386 cp->addr.type);
5387 if (err) {
5388 err = cmd_complete(sk, hdev->id,
5389 MGMT_OP_REMOVE_DEVICE,
5390 MGMT_STATUS_INVALID_PARAMS,
5391 &cp->addr, sizeof(cp->addr));
5392 goto unlock;
5395 if (list_empty(&hdev->whitelist))
5396 update_page_scan(hdev, SCAN_DISABLED);
5398 device_removed(sk, hdev, &cp->addr.bdaddr,
5399 cp->addr.type);
5400 goto complete;
5403 if (cp->addr.type == BDADDR_LE_PUBLIC)
5404 addr_type = ADDR_LE_DEV_PUBLIC;
5405 else
5406 addr_type = ADDR_LE_DEV_RANDOM;
5408 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5409 addr_type);
5410 if (!params) {
5411 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5412 MGMT_STATUS_INVALID_PARAMS,
5413 &cp->addr, sizeof(cp->addr));
5414 goto unlock;
5417 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5418 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5419 MGMT_STATUS_INVALID_PARAMS,
5420 &cp->addr, sizeof(cp->addr));
5421 goto unlock;
5424 list_del(&params->action);
5425 list_del(&params->list);
5426 kfree(params);
5427 hci_update_background_scan(hdev);
5429 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5430 } else {
5431 struct hci_conn_params *p, *tmp;
5432 struct bdaddr_list *b, *btmp;
5434 if (cp->addr.type) {
5435 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5436 MGMT_STATUS_INVALID_PARAMS,
5437 &cp->addr, sizeof(cp->addr));
5438 goto unlock;
5441 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5442 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5443 list_del(&b->list);
5444 kfree(b);
5447 update_page_scan(hdev, SCAN_DISABLED);
5449 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5450 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5451 continue;
5452 device_removed(sk, hdev, &p->addr, p->addr_type);
5453 list_del(&p->action);
5454 list_del(&p->list);
5455 kfree(p);
5458 BT_DBG("All LE connection parameters were removed");
5460 hci_update_background_scan(hdev);
5463 complete:
5464 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5465 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5467 unlock:
5468 hci_dev_unlock(hdev);
5469 return err;
5472 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5473 u16 len)
5475 struct mgmt_cp_load_conn_param *cp = data;
5476 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5477 sizeof(struct mgmt_conn_param));
5478 u16 param_count, expected_len;
5479 int i;
5481 if (!lmp_le_capable(hdev))
5482 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5483 MGMT_STATUS_NOT_SUPPORTED);
5485 param_count = __le16_to_cpu(cp->param_count);
5486 if (param_count > max_param_count) {
5487 BT_ERR("load_conn_param: too big param_count value %u",
5488 param_count);
5489 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5490 MGMT_STATUS_INVALID_PARAMS);
5493 expected_len = sizeof(*cp) + param_count *
5494 sizeof(struct mgmt_conn_param);
5495 if (expected_len != len) {
5496 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5497 expected_len, len);
5498 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5499 MGMT_STATUS_INVALID_PARAMS);
5502 BT_DBG("%s param_count %u", hdev->name, param_count);
5504 hci_dev_lock(hdev);
5506 hci_conn_params_clear_disabled(hdev);
5508 for (i = 0; i < param_count; i++) {
5509 struct mgmt_conn_param *param = &cp->params[i];
5510 struct hci_conn_params *hci_param;
5511 u16 min, max, latency, timeout;
5512 u8 addr_type;
5514 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5515 param->addr.type);
5517 if (param->addr.type == BDADDR_LE_PUBLIC) {
5518 addr_type = ADDR_LE_DEV_PUBLIC;
5519 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5520 addr_type = ADDR_LE_DEV_RANDOM;
5521 } else {
5522 BT_ERR("Ignoring invalid connection parameters");
5523 continue;
5526 min = le16_to_cpu(param->min_interval);
5527 max = le16_to_cpu(param->max_interval);
5528 latency = le16_to_cpu(param->latency);
5529 timeout = le16_to_cpu(param->timeout);
5531 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5532 min, max, latency, timeout);
5534 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5535 BT_ERR("Ignoring invalid connection parameters");
5536 continue;
5539 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5540 addr_type);
5541 if (!hci_param) {
5542 BT_ERR("Failed to add connection parameters");
5543 continue;
5546 hci_param->conn_min_interval = min;
5547 hci_param->conn_max_interval = max;
5548 hci_param->conn_latency = latency;
5549 hci_param->supervision_timeout = timeout;
5552 hci_dev_unlock(hdev);
5554 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5557 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5558 void *data, u16 len)
5560 struct mgmt_cp_set_external_config *cp = data;
5561 bool changed;
5562 int err;
5564 BT_DBG("%s", hdev->name);
5566 if (hdev_is_powered(hdev))
5567 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5568 MGMT_STATUS_REJECTED);
5570 if (cp->config != 0x00 && cp->config != 0x01)
5571 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5572 MGMT_STATUS_INVALID_PARAMS);
5574 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5575 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5576 MGMT_STATUS_NOT_SUPPORTED);
5578 hci_dev_lock(hdev);
5580 if (cp->config)
5581 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5582 &hdev->dev_flags);
5583 else
5584 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5585 &hdev->dev_flags);
5587 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5588 if (err < 0)
5589 goto unlock;
5591 if (!changed)
5592 goto unlock;
5594 err = new_options(hdev, sk);
5596 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5597 mgmt_index_removed(hdev);
5599 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5600 set_bit(HCI_CONFIG, &hdev->dev_flags);
5601 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5603 queue_work(hdev->req_workqueue, &hdev->power_on);
5604 } else {
5605 set_bit(HCI_RAW, &hdev->flags);
5606 mgmt_index_added(hdev);
5610 unlock:
5611 hci_dev_unlock(hdev);
5612 return err;
5615 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5616 void *data, u16 len)
5618 struct mgmt_cp_set_public_address *cp = data;
5619 bool changed;
5620 int err;
5622 BT_DBG("%s", hdev->name);
5624 if (hdev_is_powered(hdev))
5625 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5626 MGMT_STATUS_REJECTED);
5628 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5629 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5630 MGMT_STATUS_INVALID_PARAMS);
5632 if (!hdev->set_bdaddr)
5633 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5634 MGMT_STATUS_NOT_SUPPORTED);
5636 hci_dev_lock(hdev);
5638 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5639 bacpy(&hdev->public_addr, &cp->bdaddr);
5641 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5642 if (err < 0)
5643 goto unlock;
5645 if (!changed)
5646 goto unlock;
5648 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5649 err = new_options(hdev, sk);
5651 if (is_configured(hdev)) {
5652 mgmt_index_removed(hdev);
5654 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5656 set_bit(HCI_CONFIG, &hdev->dev_flags);
5657 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5659 queue_work(hdev->req_workqueue, &hdev->power_on);
5662 unlock:
5663 hci_dev_unlock(hdev);
5664 return err;
5667 static const struct mgmt_handler {
5668 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5669 u16 data_len);
5670 bool var_len;
5671 size_t data_len;
5672 } mgmt_handlers[] = {
5673 { NULL }, /* 0x0000 (no command) */
5674 { read_version, false, MGMT_READ_VERSION_SIZE },
5675 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5676 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5677 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5678 { set_powered, false, MGMT_SETTING_SIZE },
5679 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5680 { set_connectable, false, MGMT_SETTING_SIZE },
5681 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5682 { set_bondable, false, MGMT_SETTING_SIZE },
5683 { set_link_security, false, MGMT_SETTING_SIZE },
5684 { set_ssp, false, MGMT_SETTING_SIZE },
5685 { set_hs, false, MGMT_SETTING_SIZE },
5686 { set_le, false, MGMT_SETTING_SIZE },
5687 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5688 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5689 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5690 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5691 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5692 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5693 { disconnect, false, MGMT_DISCONNECT_SIZE },
5694 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5695 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5696 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5697 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5698 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5699 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5700 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5701 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5702 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5703 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5704 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5705 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5706 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5707 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5708 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5709 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5710 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5711 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5712 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5713 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5714 { set_advertising, false, MGMT_SETTING_SIZE },
5715 { set_bredr, false, MGMT_SETTING_SIZE },
5716 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5717 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5718 { set_secure_conn, false, MGMT_SETTING_SIZE },
5719 { set_debug_keys, false, MGMT_SETTING_SIZE },
5720 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5721 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5722 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5723 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5724 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5725 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5726 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5727 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5728 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5729 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5730 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5733 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5735 void *buf;
5736 u8 *cp;
5737 struct mgmt_hdr *hdr;
5738 u16 opcode, index, len;
5739 struct hci_dev *hdev = NULL;
5740 const struct mgmt_handler *handler;
5741 int err;
5743 BT_DBG("got %zu bytes", msglen);
5745 if (msglen < sizeof(*hdr))
5746 return -EINVAL;
5748 buf = kmalloc(msglen, GFP_KERNEL);
5749 if (!buf)
5750 return -ENOMEM;
5752 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5753 err = -EFAULT;
5754 goto done;
5757 hdr = buf;
5758 opcode = __le16_to_cpu(hdr->opcode);
5759 index = __le16_to_cpu(hdr->index);
5760 len = __le16_to_cpu(hdr->len);
5762 if (len != msglen - sizeof(*hdr)) {
5763 err = -EINVAL;
5764 goto done;
5767 if (index != MGMT_INDEX_NONE) {
5768 hdev = hci_dev_get(index);
5769 if (!hdev) {
5770 err = cmd_status(sk, index, opcode,
5771 MGMT_STATUS_INVALID_INDEX);
5772 goto done;
5775 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5776 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5777 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5778 err = cmd_status(sk, index, opcode,
5779 MGMT_STATUS_INVALID_INDEX);
5780 goto done;
5783 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5784 opcode != MGMT_OP_READ_CONFIG_INFO &&
5785 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5786 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5787 err = cmd_status(sk, index, opcode,
5788 MGMT_STATUS_INVALID_INDEX);
5789 goto done;
5793 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5794 mgmt_handlers[opcode].func == NULL) {
5795 BT_DBG("Unknown op %u", opcode);
5796 err = cmd_status(sk, index, opcode,
5797 MGMT_STATUS_UNKNOWN_COMMAND);
5798 goto done;
5801 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5802 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5803 err = cmd_status(sk, index, opcode,
5804 MGMT_STATUS_INVALID_INDEX);
5805 goto done;
5808 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5809 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5810 err = cmd_status(sk, index, opcode,
5811 MGMT_STATUS_INVALID_INDEX);
5812 goto done;
5815 handler = &mgmt_handlers[opcode];
5817 if ((handler->var_len && len < handler->data_len) ||
5818 (!handler->var_len && len != handler->data_len)) {
5819 err = cmd_status(sk, index, opcode,
5820 MGMT_STATUS_INVALID_PARAMS);
5821 goto done;
5824 if (hdev)
5825 mgmt_init_hdev(sk, hdev);
5827 cp = buf + sizeof(*hdr);
5829 err = handler->func(sk, hdev, cp, len);
5830 if (err < 0)
5831 goto done;
5833 err = msglen;
5835 done:
5836 if (hdev)
5837 hci_dev_put(hdev);
5839 kfree(buf);
5840 return err;
5843 void mgmt_index_added(struct hci_dev *hdev)
5845 if (hdev->dev_type != HCI_BREDR)
5846 return;
5848 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5849 return;
5851 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5852 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5853 else
5854 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5857 void mgmt_index_removed(struct hci_dev *hdev)
5859 u8 status = MGMT_STATUS_INVALID_INDEX;
5861 if (hdev->dev_type != HCI_BREDR)
5862 return;
5864 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5865 return;
5867 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5869 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5870 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5871 else
5872 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5875 /* This function requires the caller holds hdev->lock */
5876 static void restart_le_actions(struct hci_dev *hdev)
5878 struct hci_conn_params *p;
5880 list_for_each_entry(p, &hdev->le_conn_params, list) {
5881 /* Needed for AUTO_OFF case where might not "really"
5882 * have been powered off.
5884 list_del_init(&p->action);
5886 switch (p->auto_connect) {
5887 case HCI_AUTO_CONN_DIRECT:
5888 case HCI_AUTO_CONN_ALWAYS:
5889 list_add(&p->action, &hdev->pend_le_conns);
5890 break;
5891 case HCI_AUTO_CONN_REPORT:
5892 list_add(&p->action, &hdev->pend_le_reports);
5893 break;
5894 default:
5895 break;
5899 hci_update_background_scan(hdev);
5902 static void powered_complete(struct hci_dev *hdev, u8 status)
5904 struct cmd_lookup match = { NULL, hdev };
5906 BT_DBG("status 0x%02x", status);
5908 hci_dev_lock(hdev);
5910 restart_le_actions(hdev);
5912 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5914 new_settings(hdev, match.sk);
5916 hci_dev_unlock(hdev);
5918 if (match.sk)
5919 sock_put(match.sk);
5922 static int powered_update_hci(struct hci_dev *hdev)
5924 struct hci_request req;
5925 u8 link_sec;
5927 hci_req_init(&req, hdev);
5929 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5930 !lmp_host_ssp_capable(hdev)) {
5931 u8 ssp = 1;
5933 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5936 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5937 lmp_bredr_capable(hdev)) {
5938 struct hci_cp_write_le_host_supported cp;
5940 cp.le = 0x01;
5941 cp.simul = 0x00;
5943 /* Check first if we already have the right
5944 * host state (host features set)
5946 if (cp.le != lmp_host_le_capable(hdev) ||
5947 cp.simul != lmp_host_le_br_capable(hdev))
5948 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5949 sizeof(cp), &cp);
5952 if (lmp_le_capable(hdev)) {
5953 /* Make sure the controller has a good default for
5954 * advertising data. This also applies to the case
5955 * where BR/EDR was toggled during the AUTO_OFF phase.
5957 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5958 update_adv_data(&req);
5959 update_scan_rsp_data(&req);
5962 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5963 enable_advertising(&req);
5966 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5967 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5968 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5969 sizeof(link_sec), &link_sec);
5971 if (lmp_bredr_capable(hdev)) {
5972 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5973 set_bredr_scan(&req);
5974 update_class(&req);
5975 update_name(&req);
5976 update_eir(&req);
5979 return hci_req_run(&req, powered_complete);
5982 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5984 struct cmd_lookup match = { NULL, hdev };
5985 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5986 u8 zero_cod[] = { 0, 0, 0 };
5987 int err;
5989 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5990 return 0;
5992 if (powered) {
5993 if (powered_update_hci(hdev) == 0)
5994 return 0;
5996 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5997 &match);
5998 goto new_settings;
6001 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6002 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
6004 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6005 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6006 zero_cod, sizeof(zero_cod), NULL);
6008 new_settings:
6009 err = new_settings(hdev, match.sk);
6011 if (match.sk)
6012 sock_put(match.sk);
6014 return err;
6017 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6019 struct pending_cmd *cmd;
6020 u8 status;
6022 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6023 if (!cmd)
6024 return;
6026 if (err == -ERFKILL)
6027 status = MGMT_STATUS_RFKILLED;
6028 else
6029 status = MGMT_STATUS_FAILED;
6031 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6033 mgmt_pending_remove(cmd);
6036 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6038 struct hci_request req;
6040 hci_dev_lock(hdev);
6042 /* When discoverable timeout triggers, then just make sure
6043 * the limited discoverable flag is cleared. Even in the case
6044 * of a timeout triggered from general discoverable, it is
6045 * safe to unconditionally clear the flag.
6047 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6048 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6050 hci_req_init(&req, hdev);
6051 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6052 u8 scan = SCAN_PAGE;
6053 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6054 sizeof(scan), &scan);
6056 update_class(&req);
6057 update_adv_data(&req);
6058 hci_req_run(&req, NULL);
6060 hdev->discov_timeout = 0;
6062 new_settings(hdev, NULL);
6064 hci_dev_unlock(hdev);
6067 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6068 bool persistent)
6070 struct mgmt_ev_new_link_key ev;
6072 memset(&ev, 0, sizeof(ev));
6074 ev.store_hint = persistent;
6075 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6076 ev.key.addr.type = BDADDR_BREDR;
6077 ev.key.type = key->type;
6078 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6079 ev.key.pin_len = key->pin_len;
6081 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6084 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6086 if (ltk->authenticated)
6087 return MGMT_LTK_AUTHENTICATED;
6089 return MGMT_LTK_UNAUTHENTICATED;
6092 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6094 struct mgmt_ev_new_long_term_key ev;
6096 memset(&ev, 0, sizeof(ev));
6098 /* Devices using resolvable or non-resolvable random addresses
6099 * without providing an indentity resolving key don't require
6100 * to store long term keys. Their addresses will change the
6101 * next time around.
6103 * Only when a remote device provides an identity address
6104 * make sure the long term key is stored. If the remote
6105 * identity is known, the long term keys are internally
6106 * mapped to the identity address. So allow static random
6107 * and public addresses here.
6109 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6110 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6111 ev.store_hint = 0x00;
6112 else
6113 ev.store_hint = persistent;
6115 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6116 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6117 ev.key.type = mgmt_ltk_type(key);
6118 ev.key.enc_size = key->enc_size;
6119 ev.key.ediv = key->ediv;
6120 ev.key.rand = key->rand;
6122 if (key->type == SMP_LTK)
6123 ev.key.master = 1;
6125 memcpy(ev.key.val, key->val, sizeof(key->val));
6127 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6130 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6132 struct mgmt_ev_new_irk ev;
6134 memset(&ev, 0, sizeof(ev));
6136 /* For identity resolving keys from devices that are already
6137 * using a public address or static random address, do not
6138 * ask for storing this key. The identity resolving key really
6139 * is only mandatory for devices using resovlable random
6140 * addresses.
6142 * Storing all identity resolving keys has the downside that
6143 * they will be also loaded on next boot of they system. More
6144 * identity resolving keys, means more time during scanning is
6145 * needed to actually resolve these addresses.
6147 if (bacmp(&irk->rpa, BDADDR_ANY))
6148 ev.store_hint = 0x01;
6149 else
6150 ev.store_hint = 0x00;
6152 bacpy(&ev.rpa, &irk->rpa);
6153 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6154 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6155 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6157 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6160 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6161 bool persistent)
6163 struct mgmt_ev_new_csrk ev;
6165 memset(&ev, 0, sizeof(ev));
6167 /* Devices using resolvable or non-resolvable random addresses
6168 * without providing an indentity resolving key don't require
6169 * to store signature resolving keys. Their addresses will change
6170 * the next time around.
6172 * Only when a remote device provides an identity address
6173 * make sure the signature resolving key is stored. So allow
6174 * static random and public addresses here.
6176 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6177 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6178 ev.store_hint = 0x00;
6179 else
6180 ev.store_hint = persistent;
6182 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6183 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6184 ev.key.master = csrk->master;
6185 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6187 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6190 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6191 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6192 u16 max_interval, u16 latency, u16 timeout)
6194 struct mgmt_ev_new_conn_param ev;
6196 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6197 return;
6199 memset(&ev, 0, sizeof(ev));
6200 bacpy(&ev.addr.bdaddr, bdaddr);
6201 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6202 ev.store_hint = store_hint;
6203 ev.min_interval = cpu_to_le16(min_interval);
6204 ev.max_interval = cpu_to_le16(max_interval);
6205 ev.latency = cpu_to_le16(latency);
6206 ev.timeout = cpu_to_le16(timeout);
6208 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6211 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6212 u8 data_len)
6214 eir[eir_len++] = sizeof(type) + data_len;
6215 eir[eir_len++] = type;
6216 memcpy(&eir[eir_len], data, data_len);
6217 eir_len += data_len;
6219 return eir_len;
6222 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6223 u8 addr_type, u32 flags, u8 *name, u8 name_len,
6224 u8 *dev_class)
6226 char buf[512];
6227 struct mgmt_ev_device_connected *ev = (void *) buf;
6228 u16 eir_len = 0;
6230 bacpy(&ev->addr.bdaddr, bdaddr);
6231 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6233 ev->flags = __cpu_to_le32(flags);
6235 if (name_len > 0)
6236 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6237 name, name_len);
6239 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6240 eir_len = eir_append_data(ev->eir, eir_len,
6241 EIR_CLASS_OF_DEV, dev_class, 3);
6243 ev->eir_len = cpu_to_le16(eir_len);
6245 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6246 sizeof(*ev) + eir_len, NULL);
6249 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6251 struct mgmt_cp_disconnect *cp = cmd->param;
6252 struct sock **sk = data;
6253 struct mgmt_rp_disconnect rp;
6255 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6256 rp.addr.type = cp->addr.type;
6258 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6259 sizeof(rp));
6261 *sk = cmd->sk;
6262 sock_hold(*sk);
6264 mgmt_pending_remove(cmd);
6267 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6269 struct hci_dev *hdev = data;
6270 struct mgmt_cp_unpair_device *cp = cmd->param;
6271 struct mgmt_rp_unpair_device rp;
6273 memset(&rp, 0, sizeof(rp));
6274 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6275 rp.addr.type = cp->addr.type;
6277 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6279 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6281 mgmt_pending_remove(cmd);
6284 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6285 u8 link_type, u8 addr_type, u8 reason,
6286 bool mgmt_connected)
6288 struct mgmt_ev_device_disconnected ev;
6289 struct pending_cmd *power_off;
6290 struct sock *sk = NULL;
6292 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6293 if (power_off) {
6294 struct mgmt_mode *cp = power_off->param;
6296 /* The connection is still in hci_conn_hash so test for 1
6297 * instead of 0 to know if this is the last one.
6299 if (!cp->val && hci_conn_count(hdev) == 1) {
6300 cancel_delayed_work(&hdev->power_off);
6301 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6305 if (!mgmt_connected)
6306 return;
6308 if (link_type != ACL_LINK && link_type != LE_LINK)
6309 return;
6311 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6313 bacpy(&ev.addr.bdaddr, bdaddr);
6314 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6315 ev.reason = reason;
6317 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6319 if (sk)
6320 sock_put(sk);
6322 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6323 hdev);
6326 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6327 u8 link_type, u8 addr_type, u8 status)
6329 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6330 struct mgmt_cp_disconnect *cp;
6331 struct mgmt_rp_disconnect rp;
6332 struct pending_cmd *cmd;
6334 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6335 hdev);
6337 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6338 if (!cmd)
6339 return;
6341 cp = cmd->param;
6343 if (bacmp(bdaddr, &cp->addr.bdaddr))
6344 return;
6346 if (cp->addr.type != bdaddr_type)
6347 return;
6349 bacpy(&rp.addr.bdaddr, bdaddr);
6350 rp.addr.type = bdaddr_type;
6352 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6353 mgmt_status(status), &rp, sizeof(rp));
6355 mgmt_pending_remove(cmd);
6358 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6359 u8 addr_type, u8 status)
6361 struct mgmt_ev_connect_failed ev;
6362 struct pending_cmd *power_off;
6364 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6365 if (power_off) {
6366 struct mgmt_mode *cp = power_off->param;
6368 /* The connection is still in hci_conn_hash so test for 1
6369 * instead of 0 to know if this is the last one.
6371 if (!cp->val && hci_conn_count(hdev) == 1) {
6372 cancel_delayed_work(&hdev->power_off);
6373 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6377 bacpy(&ev.addr.bdaddr, bdaddr);
6378 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6379 ev.status = mgmt_status(status);
6381 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6384 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6386 struct mgmt_ev_pin_code_request ev;
6388 bacpy(&ev.addr.bdaddr, bdaddr);
6389 ev.addr.type = BDADDR_BREDR;
6390 ev.secure = secure;
6392 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6395 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6396 u8 status)
6398 struct pending_cmd *cmd;
6399 struct mgmt_rp_pin_code_reply rp;
6401 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6402 if (!cmd)
6403 return;
6405 bacpy(&rp.addr.bdaddr, bdaddr);
6406 rp.addr.type = BDADDR_BREDR;
6408 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6409 mgmt_status(status), &rp, sizeof(rp));
6411 mgmt_pending_remove(cmd);
6414 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6415 u8 status)
6417 struct pending_cmd *cmd;
6418 struct mgmt_rp_pin_code_reply rp;
6420 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6421 if (!cmd)
6422 return;
6424 bacpy(&rp.addr.bdaddr, bdaddr);
6425 rp.addr.type = BDADDR_BREDR;
6427 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6428 mgmt_status(status), &rp, sizeof(rp));
6430 mgmt_pending_remove(cmd);
6433 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6434 u8 link_type, u8 addr_type, u32 value,
6435 u8 confirm_hint)
6437 struct mgmt_ev_user_confirm_request ev;
6439 BT_DBG("%s", hdev->name);
6441 bacpy(&ev.addr.bdaddr, bdaddr);
6442 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6443 ev.confirm_hint = confirm_hint;
6444 ev.value = cpu_to_le32(value);
6446 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6447 NULL);
6450 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6451 u8 link_type, u8 addr_type)
6453 struct mgmt_ev_user_passkey_request ev;
6455 BT_DBG("%s", hdev->name);
6457 bacpy(&ev.addr.bdaddr, bdaddr);
6458 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6460 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6461 NULL);
6464 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6465 u8 link_type, u8 addr_type, u8 status,
6466 u8 opcode)
6468 struct pending_cmd *cmd;
6469 struct mgmt_rp_user_confirm_reply rp;
6470 int err;
6472 cmd = mgmt_pending_find(opcode, hdev);
6473 if (!cmd)
6474 return -ENOENT;
6476 bacpy(&rp.addr.bdaddr, bdaddr);
6477 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6478 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6479 &rp, sizeof(rp));
6481 mgmt_pending_remove(cmd);
6483 return err;
6486 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6487 u8 link_type, u8 addr_type, u8 status)
6489 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6490 status, MGMT_OP_USER_CONFIRM_REPLY);
6493 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6494 u8 link_type, u8 addr_type, u8 status)
6496 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6497 status,
6498 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6501 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6502 u8 link_type, u8 addr_type, u8 status)
6504 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6505 status, MGMT_OP_USER_PASSKEY_REPLY);
6508 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6509 u8 link_type, u8 addr_type, u8 status)
6511 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6512 status,
6513 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6516 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6517 u8 link_type, u8 addr_type, u32 passkey,
6518 u8 entered)
6520 struct mgmt_ev_passkey_notify ev;
6522 BT_DBG("%s", hdev->name);
6524 bacpy(&ev.addr.bdaddr, bdaddr);
6525 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6526 ev.passkey = __cpu_to_le32(passkey);
6527 ev.entered = entered;
6529 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6532 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6533 u8 addr_type, u8 status)
6535 struct mgmt_ev_auth_failed ev;
6537 bacpy(&ev.addr.bdaddr, bdaddr);
6538 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6539 ev.status = mgmt_status(status);
6541 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6544 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6546 struct cmd_lookup match = { NULL, hdev };
6547 bool changed;
6549 if (status) {
6550 u8 mgmt_err = mgmt_status(status);
6551 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6552 cmd_status_rsp, &mgmt_err);
6553 return;
6556 if (test_bit(HCI_AUTH, &hdev->flags))
6557 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6558 &hdev->dev_flags);
6559 else
6560 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6561 &hdev->dev_flags);
6563 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6564 &match);
6566 if (changed)
6567 new_settings(hdev, match.sk);
6569 if (match.sk)
6570 sock_put(match.sk);
6573 static void clear_eir(struct hci_request *req)
6575 struct hci_dev *hdev = req->hdev;
6576 struct hci_cp_write_eir cp;
6578 if (!lmp_ext_inq_capable(hdev))
6579 return;
6581 memset(hdev->eir, 0, sizeof(hdev->eir));
6583 memset(&cp, 0, sizeof(cp));
6585 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6588 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6590 struct cmd_lookup match = { NULL, hdev };
6591 struct hci_request req;
6592 bool changed = false;
6594 if (status) {
6595 u8 mgmt_err = mgmt_status(status);
6597 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6598 &hdev->dev_flags)) {
6599 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6600 new_settings(hdev, NULL);
6603 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6604 &mgmt_err);
6605 return;
6608 if (enable) {
6609 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6610 } else {
6611 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6612 if (!changed)
6613 changed = test_and_clear_bit(HCI_HS_ENABLED,
6614 &hdev->dev_flags);
6615 else
6616 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6619 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6621 if (changed)
6622 new_settings(hdev, match.sk);
6624 if (match.sk)
6625 sock_put(match.sk);
6627 hci_req_init(&req, hdev);
6629 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6630 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6631 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6632 sizeof(enable), &enable);
6633 update_eir(&req);
6634 } else {
6635 clear_eir(&req);
6638 hci_req_run(&req, NULL);
6641 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6643 struct cmd_lookup match = { NULL, hdev };
6644 bool changed = false;
6646 if (status) {
6647 u8 mgmt_err = mgmt_status(status);
6649 if (enable) {
6650 if (test_and_clear_bit(HCI_SC_ENABLED,
6651 &hdev->dev_flags))
6652 new_settings(hdev, NULL);
6653 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6656 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6657 cmd_status_rsp, &mgmt_err);
6658 return;
6661 if (enable) {
6662 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6663 } else {
6664 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6665 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6668 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6669 settings_rsp, &match);
6671 if (changed)
6672 new_settings(hdev, match.sk);
6674 if (match.sk)
6675 sock_put(match.sk);
6678 static void sk_lookup(struct pending_cmd *cmd, void *data)
6680 struct cmd_lookup *match = data;
6682 if (match->sk == NULL) {
6683 match->sk = cmd->sk;
6684 sock_hold(match->sk);
6688 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6689 u8 status)
6691 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6693 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6694 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6695 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6697 if (!status)
6698 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6699 NULL);
6701 if (match.sk)
6702 sock_put(match.sk);
6705 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6707 struct mgmt_cp_set_local_name ev;
6708 struct pending_cmd *cmd;
6710 if (status)
6711 return;
6713 memset(&ev, 0, sizeof(ev));
6714 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6715 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6717 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6718 if (!cmd) {
6719 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6721 /* If this is a HCI command related to powering on the
6722 * HCI dev don't send any mgmt signals.
6724 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6725 return;
6728 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6729 cmd ? cmd->sk : NULL);
6732 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6733 u8 *randomizer192, u8 *hash256,
6734 u8 *randomizer256, u8 status)
6736 struct pending_cmd *cmd;
6738 BT_DBG("%s status %u", hdev->name, status);
6740 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6741 if (!cmd)
6742 return;
6744 if (status) {
6745 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6746 mgmt_status(status));
6747 } else {
6748 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6749 hash256 && randomizer256) {
6750 struct mgmt_rp_read_local_oob_ext_data rp;
6752 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6753 memcpy(rp.randomizer192, randomizer192,
6754 sizeof(rp.randomizer192));
6756 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6757 memcpy(rp.randomizer256, randomizer256,
6758 sizeof(rp.randomizer256));
6760 cmd_complete(cmd->sk, hdev->id,
6761 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6762 &rp, sizeof(rp));
6763 } else {
6764 struct mgmt_rp_read_local_oob_data rp;
6766 memcpy(rp.hash, hash192, sizeof(rp.hash));
6767 memcpy(rp.randomizer, randomizer192,
6768 sizeof(rp.randomizer));
6770 cmd_complete(cmd->sk, hdev->id,
6771 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6772 &rp, sizeof(rp));
6776 mgmt_pending_remove(cmd);
6779 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6780 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6781 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6783 char buf[512];
6784 struct mgmt_ev_device_found *ev = (void *) buf;
6785 size_t ev_size;
6787 /* Don't send events for a non-kernel initiated discovery. With
6788 * LE one exception is if we have pend_le_reports > 0 in which
6789 * case we're doing passive scanning and want these events.
6791 if (!hci_discovery_active(hdev)) {
6792 if (link_type == ACL_LINK)
6793 return;
6794 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6795 return;
6798 /* Make sure that the buffer is big enough. The 5 extra bytes
6799 * are for the potential CoD field.
6801 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6802 return;
6804 memset(buf, 0, sizeof(buf));
6806 bacpy(&ev->addr.bdaddr, bdaddr);
6807 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6808 ev->rssi = rssi;
6809 ev->flags = cpu_to_le32(flags);
6811 if (eir_len > 0)
6812 memcpy(ev->eir, eir, eir_len);
6814 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6815 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6816 dev_class, 3);
6818 if (scan_rsp_len > 0)
6819 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6821 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6822 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6824 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6827 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6828 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6830 struct mgmt_ev_device_found *ev;
6831 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6832 u16 eir_len;
6834 ev = (struct mgmt_ev_device_found *) buf;
6836 memset(buf, 0, sizeof(buf));
6838 bacpy(&ev->addr.bdaddr, bdaddr);
6839 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6840 ev->rssi = rssi;
6842 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6843 name_len);
6845 ev->eir_len = cpu_to_le16(eir_len);
6847 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6850 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6852 struct mgmt_ev_discovering ev;
6853 struct pending_cmd *cmd;
6855 BT_DBG("%s discovering %u", hdev->name, discovering);
6857 if (discovering)
6858 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6859 else
6860 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6862 if (cmd != NULL) {
6863 u8 type = hdev->discovery.type;
6865 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6866 sizeof(type));
6867 mgmt_pending_remove(cmd);
6870 memset(&ev, 0, sizeof(ev));
6871 ev.type = hdev->discovery.type;
6872 ev.discovering = discovering;
6874 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6877 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6879 BT_DBG("%s status %u", hdev->name, status);
6882 void mgmt_reenable_advertising(struct hci_dev *hdev)
6884 struct hci_request req;
6886 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6887 return;
6889 hci_req_init(&req, hdev);
6890 enable_advertising(&req);
6891 hci_req_run(&req, adv_enable_complete);