Linux 6.10-rc3
[linux-stable.git] / net / bluetooth / coredump.c
blobec97a4bab1c9f8aedadaa30a5e3c71e9b756bf86
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2023 Google Corporation
4 */
6 #include <linux/devcoredump.h>
8 #include <asm/unaligned.h>
9 #include <net/bluetooth/bluetooth.h>
10 #include <net/bluetooth/hci_core.h>
12 enum hci_devcoredump_pkt_type {
13 HCI_DEVCOREDUMP_PKT_INIT,
14 HCI_DEVCOREDUMP_PKT_SKB,
15 HCI_DEVCOREDUMP_PKT_PATTERN,
16 HCI_DEVCOREDUMP_PKT_COMPLETE,
17 HCI_DEVCOREDUMP_PKT_ABORT,
20 struct hci_devcoredump_skb_cb {
21 u16 pkt_type;
24 struct hci_devcoredump_skb_pattern {
25 u8 pattern;
26 u32 len;
27 } __packed;
29 #define hci_dmp_cb(skb) ((struct hci_devcoredump_skb_cb *)((skb)->cb))
31 #define DBG_UNEXPECTED_STATE() \
32 bt_dev_dbg(hdev, \
33 "Unexpected packet (%d) for state (%d). ", \
34 hci_dmp_cb(skb)->pkt_type, hdev->dump.state)
36 #define MAX_DEVCOREDUMP_HDR_SIZE 512 /* bytes */
38 static int hci_devcd_update_hdr_state(char *buf, size_t size, int state)
40 int len = 0;
42 if (!buf)
43 return 0;
45 len = scnprintf(buf, size, "Bluetooth devcoredump\nState: %d\n", state);
47 return len + 1; /* scnprintf adds \0 at the end upon state rewrite */
50 /* Call with hci_dev_lock only. */
51 static int hci_devcd_update_state(struct hci_dev *hdev, int state)
53 bt_dev_dbg(hdev, "Updating devcoredump state from %d to %d.",
54 hdev->dump.state, state);
56 hdev->dump.state = state;
58 return hci_devcd_update_hdr_state(hdev->dump.head,
59 hdev->dump.alloc_size, state);
62 static int hci_devcd_mkheader(struct hci_dev *hdev, struct sk_buff *skb)
64 char dump_start[] = "--- Start dump ---\n";
65 char hdr[80];
66 int hdr_len;
68 hdr_len = hci_devcd_update_hdr_state(hdr, sizeof(hdr),
69 HCI_DEVCOREDUMP_IDLE);
70 skb_put_data(skb, hdr, hdr_len);
72 if (hdev->dump.dmp_hdr)
73 hdev->dump.dmp_hdr(hdev, skb);
75 skb_put_data(skb, dump_start, strlen(dump_start));
77 return skb->len;
80 /* Do not call with hci_dev_lock since this calls driver code. */
81 static void hci_devcd_notify(struct hci_dev *hdev, int state)
83 if (hdev->dump.notify_change)
84 hdev->dump.notify_change(hdev, state);
87 /* Call with hci_dev_lock only. */
88 void hci_devcd_reset(struct hci_dev *hdev)
90 hdev->dump.head = NULL;
91 hdev->dump.tail = NULL;
92 hdev->dump.alloc_size = 0;
94 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
96 cancel_delayed_work(&hdev->dump.dump_timeout);
97 skb_queue_purge(&hdev->dump.dump_q);
100 /* Call with hci_dev_lock only. */
101 static void hci_devcd_free(struct hci_dev *hdev)
103 vfree(hdev->dump.head);
105 hci_devcd_reset(hdev);
108 /* Call with hci_dev_lock only. */
109 static int hci_devcd_alloc(struct hci_dev *hdev, u32 size)
111 hdev->dump.head = vmalloc(size);
112 if (!hdev->dump.head)
113 return -ENOMEM;
115 hdev->dump.alloc_size = size;
116 hdev->dump.tail = hdev->dump.head;
117 hdev->dump.end = hdev->dump.head + size;
119 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
121 return 0;
124 /* Call with hci_dev_lock only. */
125 static bool hci_devcd_copy(struct hci_dev *hdev, char *buf, u32 size)
127 if (hdev->dump.tail + size > hdev->dump.end)
128 return false;
130 memcpy(hdev->dump.tail, buf, size);
131 hdev->dump.tail += size;
133 return true;
136 /* Call with hci_dev_lock only. */
137 static bool hci_devcd_memset(struct hci_dev *hdev, u8 pattern, u32 len)
139 if (hdev->dump.tail + len > hdev->dump.end)
140 return false;
142 memset(hdev->dump.tail, pattern, len);
143 hdev->dump.tail += len;
145 return true;
148 /* Call with hci_dev_lock only. */
149 static int hci_devcd_prepare(struct hci_dev *hdev, u32 dump_size)
151 struct sk_buff *skb;
152 int dump_hdr_size;
153 int err = 0;
155 skb = alloc_skb(MAX_DEVCOREDUMP_HDR_SIZE, GFP_ATOMIC);
156 if (!skb)
157 return -ENOMEM;
159 dump_hdr_size = hci_devcd_mkheader(hdev, skb);
161 if (hci_devcd_alloc(hdev, dump_hdr_size + dump_size)) {
162 err = -ENOMEM;
163 goto hdr_free;
166 /* Insert the device header */
167 if (!hci_devcd_copy(hdev, skb->data, skb->len)) {
168 bt_dev_err(hdev, "Failed to insert header");
169 hci_devcd_free(hdev);
171 err = -ENOMEM;
172 goto hdr_free;
175 hdr_free:
176 kfree_skb(skb);
178 return err;
181 static void hci_devcd_handle_pkt_init(struct hci_dev *hdev, struct sk_buff *skb)
183 u32 dump_size;
185 if (hdev->dump.state != HCI_DEVCOREDUMP_IDLE) {
186 DBG_UNEXPECTED_STATE();
187 return;
190 if (skb->len != sizeof(dump_size)) {
191 bt_dev_dbg(hdev, "Invalid dump init pkt");
192 return;
195 dump_size = get_unaligned_le32(skb_pull_data(skb, 4));
196 if (!dump_size) {
197 bt_dev_err(hdev, "Zero size dump init pkt");
198 return;
201 if (hci_devcd_prepare(hdev, dump_size)) {
202 bt_dev_err(hdev, "Failed to prepare for dump");
203 return;
206 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ACTIVE);
207 queue_delayed_work(hdev->workqueue, &hdev->dump.dump_timeout,
208 hdev->dump.timeout);
211 static void hci_devcd_handle_pkt_skb(struct hci_dev *hdev, struct sk_buff *skb)
213 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
214 DBG_UNEXPECTED_STATE();
215 return;
218 if (!hci_devcd_copy(hdev, skb->data, skb->len))
219 bt_dev_dbg(hdev, "Failed to insert skb");
222 static void hci_devcd_handle_pkt_pattern(struct hci_dev *hdev,
223 struct sk_buff *skb)
225 struct hci_devcoredump_skb_pattern *pattern;
227 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
228 DBG_UNEXPECTED_STATE();
229 return;
232 if (skb->len != sizeof(*pattern)) {
233 bt_dev_dbg(hdev, "Invalid pattern skb");
234 return;
237 pattern = skb_pull_data(skb, sizeof(*pattern));
239 if (!hci_devcd_memset(hdev, pattern->pattern, pattern->len))
240 bt_dev_dbg(hdev, "Failed to set pattern");
243 static void hci_devcd_handle_pkt_complete(struct hci_dev *hdev,
244 struct sk_buff *skb)
246 u32 dump_size;
248 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
249 DBG_UNEXPECTED_STATE();
250 return;
253 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_DONE);
254 dump_size = hdev->dump.tail - hdev->dump.head;
256 bt_dev_dbg(hdev, "complete with size %u (expect %zu)", dump_size,
257 hdev->dump.alloc_size);
259 dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
262 static void hci_devcd_handle_pkt_abort(struct hci_dev *hdev,
263 struct sk_buff *skb)
265 u32 dump_size;
267 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
268 DBG_UNEXPECTED_STATE();
269 return;
272 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ABORT);
273 dump_size = hdev->dump.tail - hdev->dump.head;
275 bt_dev_dbg(hdev, "aborted with size %u (expect %zu)", dump_size,
276 hdev->dump.alloc_size);
278 /* Emit a devcoredump with the available data */
279 dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
282 /* Bluetooth devcoredump state machine.
284 * Devcoredump states:
286 * HCI_DEVCOREDUMP_IDLE: The default state.
288 * HCI_DEVCOREDUMP_ACTIVE: A devcoredump will be in this state once it has
289 * been initialized using hci_devcd_init(). Once active, the driver
290 * can append data using hci_devcd_append() or insert a pattern
291 * using hci_devcd_append_pattern().
293 * HCI_DEVCOREDUMP_DONE: Once the dump collection is complete, the drive
294 * can signal the completion using hci_devcd_complete(). A
295 * devcoredump is generated indicating the completion event and
296 * then the state machine is reset to the default state.
298 * HCI_DEVCOREDUMP_ABORT: The driver can cancel ongoing dump collection in
299 * case of any error using hci_devcd_abort(). A devcoredump is
300 * still generated with the available data indicating the abort
301 * event and then the state machine is reset to the default state.
303 * HCI_DEVCOREDUMP_TIMEOUT: A timeout timer for HCI_DEVCOREDUMP_TIMEOUT sec
304 * is started during devcoredump initialization. Once the timeout
305 * occurs, the driver is notified, a devcoredump is generated with
306 * the available data indicating the timeout event and then the
307 * state machine is reset to the default state.
309 * The driver must register using hci_devcd_register() before using the hci
310 * devcoredump APIs.
312 void hci_devcd_rx(struct work_struct *work)
314 struct hci_dev *hdev = container_of(work, struct hci_dev, dump.dump_rx);
315 struct sk_buff *skb;
316 int start_state;
318 while ((skb = skb_dequeue(&hdev->dump.dump_q))) {
319 /* Return if timeout occurs. The timeout handler function
320 * hci_devcd_timeout() will report the available dump data.
322 if (hdev->dump.state == HCI_DEVCOREDUMP_TIMEOUT) {
323 kfree_skb(skb);
324 return;
327 hci_dev_lock(hdev);
328 start_state = hdev->dump.state;
330 switch (hci_dmp_cb(skb)->pkt_type) {
331 case HCI_DEVCOREDUMP_PKT_INIT:
332 hci_devcd_handle_pkt_init(hdev, skb);
333 break;
335 case HCI_DEVCOREDUMP_PKT_SKB:
336 hci_devcd_handle_pkt_skb(hdev, skb);
337 break;
339 case HCI_DEVCOREDUMP_PKT_PATTERN:
340 hci_devcd_handle_pkt_pattern(hdev, skb);
341 break;
343 case HCI_DEVCOREDUMP_PKT_COMPLETE:
344 hci_devcd_handle_pkt_complete(hdev, skb);
345 break;
347 case HCI_DEVCOREDUMP_PKT_ABORT:
348 hci_devcd_handle_pkt_abort(hdev, skb);
349 break;
351 default:
352 bt_dev_dbg(hdev, "Unknown packet (%d) for state (%d). ",
353 hci_dmp_cb(skb)->pkt_type, hdev->dump.state);
354 break;
357 hci_dev_unlock(hdev);
358 kfree_skb(skb);
360 /* Notify the driver about any state changes before resetting
361 * the state machine
363 if (start_state != hdev->dump.state)
364 hci_devcd_notify(hdev, hdev->dump.state);
366 /* Reset the state machine if the devcoredump is complete */
367 hci_dev_lock(hdev);
368 if (hdev->dump.state == HCI_DEVCOREDUMP_DONE ||
369 hdev->dump.state == HCI_DEVCOREDUMP_ABORT)
370 hci_devcd_reset(hdev);
371 hci_dev_unlock(hdev);
374 EXPORT_SYMBOL(hci_devcd_rx);
376 void hci_devcd_timeout(struct work_struct *work)
378 struct hci_dev *hdev = container_of(work, struct hci_dev,
379 dump.dump_timeout.work);
380 u32 dump_size;
382 hci_devcd_notify(hdev, HCI_DEVCOREDUMP_TIMEOUT);
384 hci_dev_lock(hdev);
386 cancel_work(&hdev->dump.dump_rx);
388 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_TIMEOUT);
390 dump_size = hdev->dump.tail - hdev->dump.head;
391 bt_dev_dbg(hdev, "timeout with size %u (expect %zu)", dump_size,
392 hdev->dump.alloc_size);
394 /* Emit a devcoredump with the available data */
395 dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
397 hci_devcd_reset(hdev);
399 hci_dev_unlock(hdev);
401 EXPORT_SYMBOL(hci_devcd_timeout);
403 int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump,
404 dmp_hdr_t dmp_hdr, notify_change_t notify_change)
406 /* Driver must implement coredump() and dmp_hdr() functions for
407 * bluetooth devcoredump. The coredump() should trigger a coredump
408 * event on the controller when the device's coredump sysfs entry is
409 * written to. The dmp_hdr() should create a dump header to identify
410 * the controller/fw/driver info.
412 if (!coredump || !dmp_hdr)
413 return -EINVAL;
415 hci_dev_lock(hdev);
416 hdev->dump.coredump = coredump;
417 hdev->dump.dmp_hdr = dmp_hdr;
418 hdev->dump.notify_change = notify_change;
419 hdev->dump.supported = true;
420 hdev->dump.timeout = DEVCOREDUMP_TIMEOUT;
421 hci_dev_unlock(hdev);
423 return 0;
425 EXPORT_SYMBOL(hci_devcd_register);
427 static inline bool hci_devcd_enabled(struct hci_dev *hdev)
429 return hdev->dump.supported;
432 int hci_devcd_init(struct hci_dev *hdev, u32 dump_size)
434 struct sk_buff *skb;
436 if (!hci_devcd_enabled(hdev))
437 return -EOPNOTSUPP;
439 skb = alloc_skb(sizeof(dump_size), GFP_ATOMIC);
440 if (!skb)
441 return -ENOMEM;
443 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_INIT;
444 put_unaligned_le32(dump_size, skb_put(skb, 4));
446 skb_queue_tail(&hdev->dump.dump_q, skb);
447 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
449 return 0;
451 EXPORT_SYMBOL(hci_devcd_init);
453 int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb)
455 if (!skb)
456 return -ENOMEM;
458 if (!hci_devcd_enabled(hdev)) {
459 kfree_skb(skb);
460 return -EOPNOTSUPP;
463 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_SKB;
465 skb_queue_tail(&hdev->dump.dump_q, skb);
466 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
468 return 0;
470 EXPORT_SYMBOL(hci_devcd_append);
472 int hci_devcd_append_pattern(struct hci_dev *hdev, u8 pattern, u32 len)
474 struct hci_devcoredump_skb_pattern p;
475 struct sk_buff *skb;
477 if (!hci_devcd_enabled(hdev))
478 return -EOPNOTSUPP;
480 skb = alloc_skb(sizeof(p), GFP_ATOMIC);
481 if (!skb)
482 return -ENOMEM;
484 p.pattern = pattern;
485 p.len = len;
487 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_PATTERN;
488 skb_put_data(skb, &p, sizeof(p));
490 skb_queue_tail(&hdev->dump.dump_q, skb);
491 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
493 return 0;
495 EXPORT_SYMBOL(hci_devcd_append_pattern);
497 int hci_devcd_complete(struct hci_dev *hdev)
499 struct sk_buff *skb;
501 if (!hci_devcd_enabled(hdev))
502 return -EOPNOTSUPP;
504 skb = alloc_skb(0, GFP_ATOMIC);
505 if (!skb)
506 return -ENOMEM;
508 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_COMPLETE;
510 skb_queue_tail(&hdev->dump.dump_q, skb);
511 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
513 return 0;
515 EXPORT_SYMBOL(hci_devcd_complete);
517 int hci_devcd_abort(struct hci_dev *hdev)
519 struct sk_buff *skb;
521 if (!hci_devcd_enabled(hdev))
522 return -EOPNOTSUPP;
524 skb = alloc_skb(0, GFP_ATOMIC);
525 if (!skb)
526 return -ENOMEM;
528 hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_ABORT;
530 skb_queue_tail(&hdev->dump.dump_q, skb);
531 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
533 return 0;
535 EXPORT_SYMBOL(hci_devcd_abort);