1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2023 Google Corporation
6 #include <linux/devcoredump.h>
8 #include <asm/unaligned.h>
9 #include <net/bluetooth/bluetooth.h>
10 #include <net/bluetooth/hci_core.h>
12 enum hci_devcoredump_pkt_type
{
13 HCI_DEVCOREDUMP_PKT_INIT
,
14 HCI_DEVCOREDUMP_PKT_SKB
,
15 HCI_DEVCOREDUMP_PKT_PATTERN
,
16 HCI_DEVCOREDUMP_PKT_COMPLETE
,
17 HCI_DEVCOREDUMP_PKT_ABORT
,
20 struct hci_devcoredump_skb_cb
{
24 struct hci_devcoredump_skb_pattern
{
29 #define hci_dmp_cb(skb) ((struct hci_devcoredump_skb_cb *)((skb)->cb))
31 #define DBG_UNEXPECTED_STATE() \
33 "Unexpected packet (%d) for state (%d). ", \
34 hci_dmp_cb(skb)->pkt_type, hdev->dump.state)
36 #define MAX_DEVCOREDUMP_HDR_SIZE 512 /* bytes */
38 static int hci_devcd_update_hdr_state(char *buf
, size_t size
, int state
)
45 len
= scnprintf(buf
, size
, "Bluetooth devcoredump\nState: %d\n", state
);
47 return len
+ 1; /* scnprintf adds \0 at the end upon state rewrite */
50 /* Call with hci_dev_lock only. */
51 static int hci_devcd_update_state(struct hci_dev
*hdev
, int state
)
53 bt_dev_dbg(hdev
, "Updating devcoredump state from %d to %d.",
54 hdev
->dump
.state
, state
);
56 hdev
->dump
.state
= state
;
58 return hci_devcd_update_hdr_state(hdev
->dump
.head
,
59 hdev
->dump
.alloc_size
, state
);
62 static int hci_devcd_mkheader(struct hci_dev
*hdev
, struct sk_buff
*skb
)
64 char dump_start
[] = "--- Start dump ---\n";
68 hdr_len
= hci_devcd_update_hdr_state(hdr
, sizeof(hdr
),
69 HCI_DEVCOREDUMP_IDLE
);
70 skb_put_data(skb
, hdr
, hdr_len
);
72 if (hdev
->dump
.dmp_hdr
)
73 hdev
->dump
.dmp_hdr(hdev
, skb
);
75 skb_put_data(skb
, dump_start
, strlen(dump_start
));
80 /* Do not call with hci_dev_lock since this calls driver code. */
81 static void hci_devcd_notify(struct hci_dev
*hdev
, int state
)
83 if (hdev
->dump
.notify_change
)
84 hdev
->dump
.notify_change(hdev
, state
);
87 /* Call with hci_dev_lock only. */
88 void hci_devcd_reset(struct hci_dev
*hdev
)
90 hdev
->dump
.head
= NULL
;
91 hdev
->dump
.tail
= NULL
;
92 hdev
->dump
.alloc_size
= 0;
94 hci_devcd_update_state(hdev
, HCI_DEVCOREDUMP_IDLE
);
96 cancel_delayed_work(&hdev
->dump
.dump_timeout
);
97 skb_queue_purge(&hdev
->dump
.dump_q
);
100 /* Call with hci_dev_lock only. */
101 static void hci_devcd_free(struct hci_dev
*hdev
)
103 vfree(hdev
->dump
.head
);
105 hci_devcd_reset(hdev
);
108 /* Call with hci_dev_lock only. */
109 static int hci_devcd_alloc(struct hci_dev
*hdev
, u32 size
)
111 hdev
->dump
.head
= vmalloc(size
);
112 if (!hdev
->dump
.head
)
115 hdev
->dump
.alloc_size
= size
;
116 hdev
->dump
.tail
= hdev
->dump
.head
;
117 hdev
->dump
.end
= hdev
->dump
.head
+ size
;
119 hci_devcd_update_state(hdev
, HCI_DEVCOREDUMP_IDLE
);
124 /* Call with hci_dev_lock only. */
125 static bool hci_devcd_copy(struct hci_dev
*hdev
, char *buf
, u32 size
)
127 if (hdev
->dump
.tail
+ size
> hdev
->dump
.end
)
130 memcpy(hdev
->dump
.tail
, buf
, size
);
131 hdev
->dump
.tail
+= size
;
136 /* Call with hci_dev_lock only. */
137 static bool hci_devcd_memset(struct hci_dev
*hdev
, u8 pattern
, u32 len
)
139 if (hdev
->dump
.tail
+ len
> hdev
->dump
.end
)
142 memset(hdev
->dump
.tail
, pattern
, len
);
143 hdev
->dump
.tail
+= len
;
148 /* Call with hci_dev_lock only. */
149 static int hci_devcd_prepare(struct hci_dev
*hdev
, u32 dump_size
)
155 skb
= alloc_skb(MAX_DEVCOREDUMP_HDR_SIZE
, GFP_ATOMIC
);
159 dump_hdr_size
= hci_devcd_mkheader(hdev
, skb
);
161 if (hci_devcd_alloc(hdev
, dump_hdr_size
+ dump_size
)) {
166 /* Insert the device header */
167 if (!hci_devcd_copy(hdev
, skb
->data
, skb
->len
)) {
168 bt_dev_err(hdev
, "Failed to insert header");
169 hci_devcd_free(hdev
);
181 static void hci_devcd_handle_pkt_init(struct hci_dev
*hdev
, struct sk_buff
*skb
)
185 if (hdev
->dump
.state
!= HCI_DEVCOREDUMP_IDLE
) {
186 DBG_UNEXPECTED_STATE();
190 if (skb
->len
!= sizeof(dump_size
)) {
191 bt_dev_dbg(hdev
, "Invalid dump init pkt");
195 dump_size
= get_unaligned_le32(skb_pull_data(skb
, 4));
197 bt_dev_err(hdev
, "Zero size dump init pkt");
201 if (hci_devcd_prepare(hdev
, dump_size
)) {
202 bt_dev_err(hdev
, "Failed to prepare for dump");
206 hci_devcd_update_state(hdev
, HCI_DEVCOREDUMP_ACTIVE
);
207 queue_delayed_work(hdev
->workqueue
, &hdev
->dump
.dump_timeout
,
211 static void hci_devcd_handle_pkt_skb(struct hci_dev
*hdev
, struct sk_buff
*skb
)
213 if (hdev
->dump
.state
!= HCI_DEVCOREDUMP_ACTIVE
) {
214 DBG_UNEXPECTED_STATE();
218 if (!hci_devcd_copy(hdev
, skb
->data
, skb
->len
))
219 bt_dev_dbg(hdev
, "Failed to insert skb");
222 static void hci_devcd_handle_pkt_pattern(struct hci_dev
*hdev
,
225 struct hci_devcoredump_skb_pattern
*pattern
;
227 if (hdev
->dump
.state
!= HCI_DEVCOREDUMP_ACTIVE
) {
228 DBG_UNEXPECTED_STATE();
232 if (skb
->len
!= sizeof(*pattern
)) {
233 bt_dev_dbg(hdev
, "Invalid pattern skb");
237 pattern
= skb_pull_data(skb
, sizeof(*pattern
));
239 if (!hci_devcd_memset(hdev
, pattern
->pattern
, pattern
->len
))
240 bt_dev_dbg(hdev
, "Failed to set pattern");
243 static void hci_devcd_handle_pkt_complete(struct hci_dev
*hdev
,
248 if (hdev
->dump
.state
!= HCI_DEVCOREDUMP_ACTIVE
) {
249 DBG_UNEXPECTED_STATE();
253 hci_devcd_update_state(hdev
, HCI_DEVCOREDUMP_DONE
);
254 dump_size
= hdev
->dump
.tail
- hdev
->dump
.head
;
256 bt_dev_dbg(hdev
, "complete with size %u (expect %zu)", dump_size
,
257 hdev
->dump
.alloc_size
);
259 dev_coredumpv(&hdev
->dev
, hdev
->dump
.head
, dump_size
, GFP_KERNEL
);
262 static void hci_devcd_handle_pkt_abort(struct hci_dev
*hdev
,
267 if (hdev
->dump
.state
!= HCI_DEVCOREDUMP_ACTIVE
) {
268 DBG_UNEXPECTED_STATE();
272 hci_devcd_update_state(hdev
, HCI_DEVCOREDUMP_ABORT
);
273 dump_size
= hdev
->dump
.tail
- hdev
->dump
.head
;
275 bt_dev_dbg(hdev
, "aborted with size %u (expect %zu)", dump_size
,
276 hdev
->dump
.alloc_size
);
278 /* Emit a devcoredump with the available data */
279 dev_coredumpv(&hdev
->dev
, hdev
->dump
.head
, dump_size
, GFP_KERNEL
);
282 /* Bluetooth devcoredump state machine.
284 * Devcoredump states:
286 * HCI_DEVCOREDUMP_IDLE: The default state.
288 * HCI_DEVCOREDUMP_ACTIVE: A devcoredump will be in this state once it has
289 * been initialized using hci_devcd_init(). Once active, the driver
290 * can append data using hci_devcd_append() or insert a pattern
291 * using hci_devcd_append_pattern().
293 * HCI_DEVCOREDUMP_DONE: Once the dump collection is complete, the drive
294 * can signal the completion using hci_devcd_complete(). A
295 * devcoredump is generated indicating the completion event and
296 * then the state machine is reset to the default state.
298 * HCI_DEVCOREDUMP_ABORT: The driver can cancel ongoing dump collection in
299 * case of any error using hci_devcd_abort(). A devcoredump is
300 * still generated with the available data indicating the abort
301 * event and then the state machine is reset to the default state.
303 * HCI_DEVCOREDUMP_TIMEOUT: A timeout timer for HCI_DEVCOREDUMP_TIMEOUT sec
304 * is started during devcoredump initialization. Once the timeout
305 * occurs, the driver is notified, a devcoredump is generated with
306 * the available data indicating the timeout event and then the
307 * state machine is reset to the default state.
309 * The driver must register using hci_devcd_register() before using the hci
312 void hci_devcd_rx(struct work_struct
*work
)
314 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, dump
.dump_rx
);
318 while ((skb
= skb_dequeue(&hdev
->dump
.dump_q
))) {
319 /* Return if timeout occurs. The timeout handler function
320 * hci_devcd_timeout() will report the available dump data.
322 if (hdev
->dump
.state
== HCI_DEVCOREDUMP_TIMEOUT
) {
328 start_state
= hdev
->dump
.state
;
330 switch (hci_dmp_cb(skb
)->pkt_type
) {
331 case HCI_DEVCOREDUMP_PKT_INIT
:
332 hci_devcd_handle_pkt_init(hdev
, skb
);
335 case HCI_DEVCOREDUMP_PKT_SKB
:
336 hci_devcd_handle_pkt_skb(hdev
, skb
);
339 case HCI_DEVCOREDUMP_PKT_PATTERN
:
340 hci_devcd_handle_pkt_pattern(hdev
, skb
);
343 case HCI_DEVCOREDUMP_PKT_COMPLETE
:
344 hci_devcd_handle_pkt_complete(hdev
, skb
);
347 case HCI_DEVCOREDUMP_PKT_ABORT
:
348 hci_devcd_handle_pkt_abort(hdev
, skb
);
352 bt_dev_dbg(hdev
, "Unknown packet (%d) for state (%d). ",
353 hci_dmp_cb(skb
)->pkt_type
, hdev
->dump
.state
);
357 hci_dev_unlock(hdev
);
360 /* Notify the driver about any state changes before resetting
363 if (start_state
!= hdev
->dump
.state
)
364 hci_devcd_notify(hdev
, hdev
->dump
.state
);
366 /* Reset the state machine if the devcoredump is complete */
368 if (hdev
->dump
.state
== HCI_DEVCOREDUMP_DONE
||
369 hdev
->dump
.state
== HCI_DEVCOREDUMP_ABORT
)
370 hci_devcd_reset(hdev
);
371 hci_dev_unlock(hdev
);
374 EXPORT_SYMBOL(hci_devcd_rx
);
376 void hci_devcd_timeout(struct work_struct
*work
)
378 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
379 dump
.dump_timeout
.work
);
382 hci_devcd_notify(hdev
, HCI_DEVCOREDUMP_TIMEOUT
);
386 cancel_work(&hdev
->dump
.dump_rx
);
388 hci_devcd_update_state(hdev
, HCI_DEVCOREDUMP_TIMEOUT
);
390 dump_size
= hdev
->dump
.tail
- hdev
->dump
.head
;
391 bt_dev_dbg(hdev
, "timeout with size %u (expect %zu)", dump_size
,
392 hdev
->dump
.alloc_size
);
394 /* Emit a devcoredump with the available data */
395 dev_coredumpv(&hdev
->dev
, hdev
->dump
.head
, dump_size
, GFP_KERNEL
);
397 hci_devcd_reset(hdev
);
399 hci_dev_unlock(hdev
);
401 EXPORT_SYMBOL(hci_devcd_timeout
);
403 int hci_devcd_register(struct hci_dev
*hdev
, coredump_t coredump
,
404 dmp_hdr_t dmp_hdr
, notify_change_t notify_change
)
406 /* Driver must implement coredump() and dmp_hdr() functions for
407 * bluetooth devcoredump. The coredump() should trigger a coredump
408 * event on the controller when the device's coredump sysfs entry is
409 * written to. The dmp_hdr() should create a dump header to identify
410 * the controller/fw/driver info.
412 if (!coredump
|| !dmp_hdr
)
416 hdev
->dump
.coredump
= coredump
;
417 hdev
->dump
.dmp_hdr
= dmp_hdr
;
418 hdev
->dump
.notify_change
= notify_change
;
419 hdev
->dump
.supported
= true;
420 hdev
->dump
.timeout
= DEVCOREDUMP_TIMEOUT
;
421 hci_dev_unlock(hdev
);
425 EXPORT_SYMBOL(hci_devcd_register
);
427 static inline bool hci_devcd_enabled(struct hci_dev
*hdev
)
429 return hdev
->dump
.supported
;
432 int hci_devcd_init(struct hci_dev
*hdev
, u32 dump_size
)
436 if (!hci_devcd_enabled(hdev
))
439 skb
= alloc_skb(sizeof(dump_size
), GFP_ATOMIC
);
443 hci_dmp_cb(skb
)->pkt_type
= HCI_DEVCOREDUMP_PKT_INIT
;
444 put_unaligned_le32(dump_size
, skb_put(skb
, 4));
446 skb_queue_tail(&hdev
->dump
.dump_q
, skb
);
447 queue_work(hdev
->workqueue
, &hdev
->dump
.dump_rx
);
451 EXPORT_SYMBOL(hci_devcd_init
);
453 int hci_devcd_append(struct hci_dev
*hdev
, struct sk_buff
*skb
)
458 if (!hci_devcd_enabled(hdev
)) {
463 hci_dmp_cb(skb
)->pkt_type
= HCI_DEVCOREDUMP_PKT_SKB
;
465 skb_queue_tail(&hdev
->dump
.dump_q
, skb
);
466 queue_work(hdev
->workqueue
, &hdev
->dump
.dump_rx
);
470 EXPORT_SYMBOL(hci_devcd_append
);
472 int hci_devcd_append_pattern(struct hci_dev
*hdev
, u8 pattern
, u32 len
)
474 struct hci_devcoredump_skb_pattern p
;
477 if (!hci_devcd_enabled(hdev
))
480 skb
= alloc_skb(sizeof(p
), GFP_ATOMIC
);
487 hci_dmp_cb(skb
)->pkt_type
= HCI_DEVCOREDUMP_PKT_PATTERN
;
488 skb_put_data(skb
, &p
, sizeof(p
));
490 skb_queue_tail(&hdev
->dump
.dump_q
, skb
);
491 queue_work(hdev
->workqueue
, &hdev
->dump
.dump_rx
);
495 EXPORT_SYMBOL(hci_devcd_append_pattern
);
497 int hci_devcd_complete(struct hci_dev
*hdev
)
501 if (!hci_devcd_enabled(hdev
))
504 skb
= alloc_skb(0, GFP_ATOMIC
);
508 hci_dmp_cb(skb
)->pkt_type
= HCI_DEVCOREDUMP_PKT_COMPLETE
;
510 skb_queue_tail(&hdev
->dump
.dump_q
, skb
);
511 queue_work(hdev
->workqueue
, &hdev
->dump
.dump_rx
);
515 EXPORT_SYMBOL(hci_devcd_complete
);
517 int hci_devcd_abort(struct hci_dev
*hdev
)
521 if (!hci_devcd_enabled(hdev
))
524 skb
= alloc_skb(0, GFP_ATOMIC
);
528 hci_dmp_cb(skb
)->pkt_type
= HCI_DEVCOREDUMP_PKT_ABORT
;
530 skb_queue_tail(&hdev
->dump
.dump_q
, skb
);
531 queue_work(hdev
->workqueue
, &hdev
->dump
.dump_rx
);
535 EXPORT_SYMBOL(hci_devcd_abort
);