2 * Generic Virtual-Device Fuzzing Target
4 * Copyright Red Hat Inc., 2020
7 * Alexander Bulekov <alxndr@bu.edu>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
17 #include "hw/core/cpu.h"
18 #include "tests/qtest/libqos/libqtest.h"
19 #include "tests/qtest/libqos/pci-pc.h"
21 #include "fork_fuzz.h"
23 #include "exec/memory.h"
24 #include "exec/ramblock.h"
25 #include "hw/qdev-core.h"
26 #include "hw/pci/pci.h"
27 #include "hw/boards.h"
28 #include "generic_fuzz_configs.h"
29 #include "hw/mem/sparse-mem.h"
32 * SEPARATOR is used to separate "operations" in the fuzz input
34 #define SEPARATOR "FUZZ"
45 OP_CLEAR_DMA_PATTERNS
,
49 #define DEFAULT_TIMEOUT_US 100000
50 #define USEC_IN_SEC 1000000000
52 #define MAX_DMA_FILL_SIZE 0x10000
54 #define PCI_HOST_BRIDGE_CFG 0xcf8
55 #define PCI_HOST_BRIDGE_DATA 0xcfc
59 ram_addr_t size
; /* The number of bytes until the end of the I/O region */
62 static useconds_t timeout
= DEFAULT_TIMEOUT_US
;
64 static bool qtest_log_enabled
;
66 MemoryRegion
*sparse_mem_mr
;
69 * A pattern used to populate a DMA region or perform a memwrite. This is
70 * useful for e.g. populating tables of unique addresses.
71 * Example {.index = 1; .stride = 2; .len = 3; .data = "\x00\x01\x02"}
72 * Renders as: 00 01 02 00 03 02 00 05 02 00 07 02 ...
75 uint8_t index
; /* Index of a byte to increment by stride */
76 uint8_t stride
; /* Increment each index'th byte by this amount */
81 /* Avoid filling the same DMA region between MMIO/PIO commands ? */
82 static bool avoid_double_fetches
;
84 static QTestState
*qts_global
; /* Need a global for the DMA callback */
87 * List of memory regions that are children of QOM objects specified by the
90 static GHashTable
*fuzzable_memoryregions
;
91 static GPtrArray
*fuzzable_pci_devices
;
93 struct get_io_cb_info
{
99 static bool get_io_address_cb(Int128 start
, Int128 size
,
100 const MemoryRegion
*mr
,
101 hwaddr offset_in_region
,
104 struct get_io_cb_info
*info
= opaque
;
105 if (g_hash_table_lookup(fuzzable_memoryregions
, mr
)) {
106 if (info
->index
== 0) {
107 info
->result
.addr
= (ram_addr_t
)start
;
108 info
->result
.size
= (ram_addr_t
)size
;
118 * List of dma regions populated since the last fuzzing command. Used to ensure
119 * that we only write to each DMA address once, to avoid race conditions when
120 * building reproducers.
122 static GArray
*dma_regions
;
124 static GArray
*dma_patterns
;
125 static int dma_pattern_index
;
126 static bool pci_disabled
;
129 * Allocate a block of memory and populate it with a pattern.
131 static void *pattern_alloc(pattern p
, size_t len
)
134 uint8_t *buf
= g_malloc(len
);
137 for (i
= 0; i
< len
; ++i
) {
138 buf
[i
] = p
.data
[i
% p
.len
];
139 if ((i
% p
.len
) == p
.index
) {
147 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
149 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
152 * Regions are assumed to support 1-4 byte accesses unless
153 * otherwise specified.
155 if (access_size_max
== 0) {
159 /* Bound the maximum access by the alignment of the address. */
160 if (!mr
->ops
->impl
.unaligned
) {
161 unsigned align_size_max
= addr
& -addr
;
162 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
163 access_size_max
= align_size_max
;
167 /* Don't attempt accesses larger than the maximum. */
168 if (l
> access_size_max
) {
177 * Call-back for functions that perform DMA reads from guest memory. Confirm
178 * that the region has not already been populated since the last loop in
179 * generic_fuzz(), avoiding potential race-conditions, which we don't have
180 * a good way for reproducing right now.
182 void fuzz_dma_read_cb(size_t addr
, size_t len
, MemoryRegion
*mr
)
184 /* Are we in the generic-fuzzer or are we using another fuzz-target? */
190 * Return immediately if:
191 * - We have no DMA patterns defined
192 * - The length of the DMA read request is zero
193 * - The DMA read is hitting an MR other than the machine's main RAM
194 * - The DMA request hits past the bounds of our RAM
196 if (dma_patterns
->len
== 0
198 || (mr
!= current_machine
->ram
&& mr
!= sparse_mem_mr
)) {
203 * If we overlap with any existing dma_regions, split the range and only
204 * populate the non-overlapping parts.
206 address_range region
;
207 bool double_fetch
= false;
209 i
< dma_regions
->len
&& (avoid_double_fetches
|| qtest_log_enabled
);
211 region
= g_array_index(dma_regions
, address_range
, i
);
212 if (addr
< region
.addr
+ region
.size
&& addr
+ len
> region
.addr
) {
214 if (addr
< region
.addr
215 && avoid_double_fetches
) {
216 fuzz_dma_read_cb(addr
, region
.addr
- addr
, mr
);
218 if (addr
+ len
> region
.addr
+ region
.size
219 && avoid_double_fetches
) {
220 fuzz_dma_read_cb(region
.addr
+ region
.size
,
221 addr
+ len
- (region
.addr
+ region
.size
), mr
);
227 /* Cap the length of the DMA access to something reasonable */
228 len
= MIN(len
, MAX_DMA_FILL_SIZE
);
230 address_range ar
= {addr
, len
};
231 g_array_append_val(dma_regions
, ar
);
232 pattern p
= g_array_index(dma_patterns
, pattern
, dma_pattern_index
);
233 void *buf_base
= pattern_alloc(p
, ar
.size
);
234 void *buf
= buf_base
;
239 mr1
= address_space_translate(first_cpu
->as
,
240 addr
, &addr1
, &l
, true,
241 MEMTXATTRS_UNSPECIFIED
);
243 if (!(memory_region_is_ram(mr1
) ||
244 memory_region_is_romd(mr1
)) && mr1
!= sparse_mem_mr
) {
245 l
= memory_access_size(mr1
, l
, addr1
);
248 if (qtest_log_enabled
) {
250 * With QTEST_LOG, use a normal, slow QTest memwrite. Prefix the log
251 * that will be written by qtest.c with a DMA tag, so we can reorder
252 * the resulting QTest trace so the DMA fills precede the last PIO/MMIO
255 fprintf(stderr
, "[DMA] ");
257 fprintf(stderr
, "[DOUBLE-FETCH] ");
261 qtest_memwrite(qts_global
, addr
, buf
, l
);
270 /* Increment the index of the pattern for the next DMA access */
271 dma_pattern_index
= (dma_pattern_index
+ 1) % dma_patterns
->len
;
275 * Here we want to convert a fuzzer-provided [io-region-index, offset] to
276 * a physical address. To do this, we iterate over all of the matched
277 * MemoryRegions. Check whether each region exists within the particular io
278 * space. Return the absolute address of the offset within the index'th region
279 * that is a subregion of the io_space and the distance until the end of the
282 static bool get_io_address(address_range
*result
, AddressSpace
*as
,
286 view
= as
->current_map
;
288 struct get_io_cb_info cb_info
= {};
290 cb_info
.index
= index
;
293 * Loop around the FlatView until we match "index" number of
294 * fuzzable_memoryregions, or until we know that there are no matching
298 flatview_for_each_range(view
, get_io_address_cb
, &cb_info
);
299 } while (cb_info
.index
!= index
&& !cb_info
.found
);
301 *result
= cb_info
.result
;
303 offset
= offset
% result
->size
;
304 result
->addr
+= offset
;
305 result
->size
-= offset
;
307 return cb_info
.found
;
310 static bool get_pio_address(address_range
*result
,
311 uint8_t index
, uint16_t offset
)
314 * PIO BARs can be set past the maximum port address (0xFFFF). Thus, result
315 * can contain an addr that extends past the PIO space. When we pass this
316 * address to qtest_in/qtest_out, it is cast to a uint16_t, so we might end
317 * up fuzzing a completely different MemoryRegion/Device. Therefore, check
318 * that the address here is within the PIO space limits.
320 bool found
= get_io_address(result
, &address_space_io
, index
, offset
);
321 return result
->addr
<= 0xFFFF ? found
: false;
324 static bool get_mmio_address(address_range
*result
,
325 uint8_t index
, uint32_t offset
)
327 return get_io_address(result
, &address_space_memory
, index
, offset
);
330 static void op_in(QTestState
*s
, const unsigned char * data
, size_t len
)
332 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
340 if (len
< sizeof(a
)) {
343 memcpy(&a
, data
, sizeof(a
));
344 if (get_pio_address(&abs
, a
.base
, a
.offset
) == 0) {
348 switch (a
.size
%= end_sizes
) {
350 qtest_inb(s
, abs
.addr
);
354 qtest_inw(s
, abs
.addr
);
359 qtest_inl(s
, abs
.addr
);
365 static void op_out(QTestState
*s
, const unsigned char * data
, size_t len
)
367 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
376 if (len
< sizeof(a
)) {
379 memcpy(&a
, data
, sizeof(a
));
381 if (get_pio_address(&abs
, a
.base
, a
.offset
) == 0) {
385 switch (a
.size
%= end_sizes
) {
387 qtest_outb(s
, abs
.addr
, a
.value
& 0xFF);
391 qtest_outw(s
, abs
.addr
, a
.value
& 0xFFFF);
396 qtest_outl(s
, abs
.addr
, a
.value
);
402 static void op_read(QTestState
*s
, const unsigned char * data
, size_t len
)
404 enum Sizes
{Byte
, Word
, Long
, Quad
, end_sizes
};
412 if (len
< sizeof(a
)) {
415 memcpy(&a
, data
, sizeof(a
));
417 if (get_mmio_address(&abs
, a
.base
, a
.offset
) == 0) {
421 switch (a
.size
%= end_sizes
) {
423 qtest_readb(s
, abs
.addr
);
427 qtest_readw(s
, abs
.addr
);
432 qtest_readl(s
, abs
.addr
);
437 qtest_readq(s
, abs
.addr
);
443 static void op_write(QTestState
*s
, const unsigned char * data
, size_t len
)
445 enum Sizes
{Byte
, Word
, Long
, Quad
, end_sizes
};
454 if (len
< sizeof(a
)) {
457 memcpy(&a
, data
, sizeof(a
));
459 if (get_mmio_address(&abs
, a
.base
, a
.offset
) == 0) {
463 switch (a
.size
%= end_sizes
) {
465 qtest_writeb(s
, abs
.addr
, a
.value
& 0xFF);
469 qtest_writew(s
, abs
.addr
, a
.value
& 0xFFFF);
474 qtest_writel(s
, abs
.addr
, a
.value
& 0xFFFFFFFF);
479 qtest_writeq(s
, abs
.addr
, a
.value
);
485 static void op_pci_read(QTestState
*s
, const unsigned char * data
, size_t len
)
487 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
493 if (len
< sizeof(a
) || fuzzable_pci_devices
->len
== 0 || pci_disabled
) {
496 memcpy(&a
, data
, sizeof(a
));
497 PCIDevice
*dev
= g_ptr_array_index(fuzzable_pci_devices
,
498 a
.base
% fuzzable_pci_devices
->len
);
499 int devfn
= dev
->devfn
;
500 qtest_outl(s
, PCI_HOST_BRIDGE_CFG
, (1U << 31) | (devfn
<< 8) | a
.offset
);
501 switch (a
.size
%= end_sizes
) {
503 qtest_inb(s
, PCI_HOST_BRIDGE_DATA
);
506 qtest_inw(s
, PCI_HOST_BRIDGE_DATA
);
509 qtest_inl(s
, PCI_HOST_BRIDGE_DATA
);
514 static void op_pci_write(QTestState
*s
, const unsigned char * data
, size_t len
)
516 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
523 if (len
< sizeof(a
) || fuzzable_pci_devices
->len
== 0 || pci_disabled
) {
526 memcpy(&a
, data
, sizeof(a
));
527 PCIDevice
*dev
= g_ptr_array_index(fuzzable_pci_devices
,
528 a
.base
% fuzzable_pci_devices
->len
);
529 int devfn
= dev
->devfn
;
530 qtest_outl(s
, PCI_HOST_BRIDGE_CFG
, (1U << 31) | (devfn
<< 8) | a
.offset
);
531 switch (a
.size
%= end_sizes
) {
533 qtest_outb(s
, PCI_HOST_BRIDGE_DATA
, a
.value
& 0xFF);
536 qtest_outw(s
, PCI_HOST_BRIDGE_DATA
, a
.value
& 0xFFFF);
539 qtest_outl(s
, PCI_HOST_BRIDGE_DATA
, a
.value
& 0xFFFFFFFF);
544 static void op_add_dma_pattern(QTestState
*s
,
545 const unsigned char *data
, size_t len
)
549 * index and stride can be used to increment the index-th byte of the
550 * pattern by the value stride, for each loop of the pattern.
556 if (len
< sizeof(a
) + 1) {
559 memcpy(&a
, data
, sizeof(a
));
560 pattern p
= {a
.index
, a
.stride
, len
- sizeof(a
), data
+ sizeof(a
)};
561 p
.index
= a
.index
% p
.len
;
562 g_array_append_val(dma_patterns
, p
);
566 static void op_clear_dma_patterns(QTestState
*s
,
567 const unsigned char *data
, size_t len
)
569 g_array_set_size(dma_patterns
, 0);
570 dma_pattern_index
= 0;
573 static void op_clock_step(QTestState
*s
, const unsigned char *data
, size_t len
)
575 qtest_clock_step_next(s
);
578 static void op_disable_pci(QTestState
*s
, const unsigned char *data
, size_t len
)
583 static void handle_timeout(int sig
)
585 if (qtest_log_enabled
) {
586 fprintf(stderr
, "[Timeout]\n");
591 * If there is a crash, libfuzzer/ASAN forks a child to run an
592 * "llvm-symbolizer" process for printing out a pretty stacktrace. It
593 * communicates with this child using a pipe. If we timeout+Exit, while
594 * libfuzzer is still communicating with the llvm-symbolizer child, we will
595 * be left with an orphan llvm-symbolizer process. Sometimes, this appears
596 * to lead to a deadlock in the forkserver. Use waitpid to check if there
597 * are any waitable children. If so, exit out of the signal-handler, and
598 * let libfuzzer finish communicating with the child, and exit, on its own.
600 if (waitpid(-1, NULL
, WNOHANG
) == 0) {
608 * Here, we interpret random bytes from the fuzzer, as a sequence of commands.
609 * Some commands can be variable-width, so we use a separator, SEPARATOR, to
610 * specify the boundaries between commands. SEPARATOR is used to separate
611 * "operations" in the fuzz input. Why use a separator, instead of just using
612 * the operations' length to identify operation boundaries?
613 * 1. This is a simple way to support variable-length operations
614 * 2. This adds "stability" to the input.
615 * For example take the input "AbBcgDefg", where there is no separator and
616 * Opcodes are capitalized.
617 * Simply, by removing the first byte, we end up with a very different
620 * By adding a separator, we avoid this problem:
621 * Ab SEP Bcg SEP Defg -> B SEP Bcg SEP Defg
622 * Since B uses two additional bytes as operands, the first "B" will be
623 * ignored. The fuzzer actively tries to reduce inputs, so such unused
624 * bytes are likely to be pruned, eventually.
626 * SEPARATOR is trivial for the fuzzer to discover when using ASan. Optionally,
627 * SEPARATOR can be manually specified as a dictionary value (see libfuzzer's
628 * -dict), though this should not be necessary.
630 * As a result, the stream of bytes is converted into a sequence of commands.
631 * In a simplified example where SEPARATOR is 0xFF:
632 * 00 01 02 FF 03 04 05 06 FF 01 FF ...
633 * becomes this sequence of commands:
634 * 00 01 02 -> op00 (0102) -> in (0102, 2)
635 * 03 04 05 06 -> op03 (040506) -> write (040506, 3)
636 * 01 -> op01 (-,0) -> out (-,0)
639 * Note here that it is the job of the individual opcode functions to check
640 * that enough data was provided. I.e. in the last command out (,0), out needs
641 * to check that there is not enough data provided to select an address/value
644 static void generic_fuzz(QTestState
*s
, const unsigned char *Data
, size_t Size
)
646 void (*ops
[]) (QTestState
*s
, const unsigned char* , size_t) = {
650 [OP_WRITE
] = op_write
,
651 [OP_PCI_READ
] = op_pci_read
,
652 [OP_PCI_WRITE
] = op_pci_write
,
653 [OP_DISABLE_PCI
] = op_disable_pci
,
654 [OP_ADD_DMA_PATTERN
] = op_add_dma_pattern
,
655 [OP_CLEAR_DMA_PATTERNS
] = op_clear_dma_patterns
,
656 [OP_CLOCK_STEP
] = op_clock_step
,
658 const unsigned char *cmd
= Data
;
659 const unsigned char *nextcmd
;
665 * Sometimes the fuzzer will find inputs that take quite a long time to
666 * process. Often times, these inputs do not result in new coverage.
667 * Even if these inputs might be interesting, they can slow down the
668 * fuzzer, overall. Set a timeout to avoid hurting performance, too much
671 struct sigaction sact
;
672 struct itimerval timer
;
674 sigemptyset(&sact
.sa_mask
);
675 sact
.sa_flags
= SA_NODEFER
;
676 sact
.sa_handler
= handle_timeout
;
677 sigaction(SIGALRM
, &sact
, NULL
);
679 memset(&timer
, 0, sizeof(timer
));
680 timer
.it_value
.tv_sec
= timeout
/ USEC_IN_SEC
;
681 timer
.it_value
.tv_usec
= timeout
% USEC_IN_SEC
;
682 setitimer(ITIMER_VIRTUAL
, &timer
, NULL
);
685 op_clear_dma_patterns(s
, NULL
, 0);
686 pci_disabled
= false;
688 while (cmd
&& Size
) {
689 /* Get the length until the next command or end of input */
690 nextcmd
= memmem(cmd
, Size
, SEPARATOR
, strlen(SEPARATOR
));
691 cmd_len
= nextcmd
? nextcmd
- cmd
: Size
;
694 /* Interpret the first byte of the command as an opcode */
695 op
= *cmd
% (sizeof(ops
) / sizeof((ops
)[0]));
696 ops
[op
](s
, cmd
+ 1, cmd_len
- 1);
698 /* Run the main loop */
701 /* Advance to the next command */
702 cmd
= nextcmd
? nextcmd
+ sizeof(SEPARATOR
) - 1 : nextcmd
;
703 Size
= Size
- (cmd_len
+ sizeof(SEPARATOR
) - 1);
704 g_array_set_size(dma_regions
, 0);
713 static void usage(void)
715 printf("Please specify the following environment variables:\n");
716 printf("QEMU_FUZZ_ARGS= the command line arguments passed to qemu\n");
717 printf("QEMU_FUZZ_OBJECTS= "
718 "a space separated list of QOM type names for objects to fuzz\n");
719 printf("Optionally: QEMU_AVOID_DOUBLE_FETCH= "
720 "Try to avoid racy DMA double fetch bugs? %d by default\n",
721 avoid_double_fetches
);
722 printf("Optionally: QEMU_FUZZ_TIMEOUT= Specify a custom timeout (us). "
723 "0 to disable. %d by default\n", timeout
);
727 static int locate_fuzz_memory_regions(Object
*child
, void *opaque
)
731 if (object_dynamic_cast(child
, TYPE_MEMORY_REGION
)) {
732 mr
= MEMORY_REGION(child
);
733 if ((memory_region_is_ram(mr
) ||
734 memory_region_is_ram_device(mr
) ||
735 memory_region_is_rom(mr
)) == false) {
736 name
= object_get_canonical_path_component(child
);
738 * We don't want duplicate pointers to the same MemoryRegion, so
739 * try to remove copies of the pointer, before adding it.
741 g_hash_table_insert(fuzzable_memoryregions
, mr
, (gpointer
)true);
747 static int locate_fuzz_objects(Object
*child
, void *opaque
)
749 char *pattern
= opaque
;
750 if (g_pattern_match_simple(pattern
, object_get_typename(child
))) {
751 /* Find and save ptrs to any child MemoryRegions */
752 object_child_foreach_recursive(child
, locate_fuzz_memory_regions
, NULL
);
755 * We matched an object. If its a PCI device, store a pointer to it so
756 * we can map BARs and fuzz its config space.
758 if (object_dynamic_cast(OBJECT(child
), TYPE_PCI_DEVICE
)) {
760 * Don't want duplicate pointers to the same PCIDevice, so remove
761 * copies of the pointer, before adding it.
763 g_ptr_array_remove_fast(fuzzable_pci_devices
, PCI_DEVICE(child
));
764 g_ptr_array_add(fuzzable_pci_devices
, PCI_DEVICE(child
));
766 } else if (object_dynamic_cast(OBJECT(child
), TYPE_MEMORY_REGION
)) {
767 if (g_pattern_match_simple(pattern
,
768 object_get_canonical_path_component(child
))) {
770 mr
= MEMORY_REGION(child
);
771 if ((memory_region_is_ram(mr
) ||
772 memory_region_is_ram_device(mr
) ||
773 memory_region_is_rom(mr
)) == false) {
774 g_hash_table_insert(fuzzable_memoryregions
, mr
, (gpointer
)true);
782 static void pci_enum(gpointer pcidev
, gpointer bus
)
784 PCIDevice
*dev
= pcidev
;
788 qdev
= qpci_device_find(bus
, dev
->devfn
);
789 g_assert(qdev
!= NULL
);
790 for (i
= 0; i
< 6; i
++) {
791 if (dev
->io_regions
[i
].size
) {
792 qpci_iomap(qdev
, i
, NULL
);
795 qpci_device_enable(qdev
);
799 static void generic_pre_fuzz(QTestState
*s
)
806 if (!getenv("QEMU_FUZZ_OBJECTS")) {
809 if (getenv("QTEST_LOG")) {
810 qtest_log_enabled
= 1;
812 if (getenv("QEMU_AVOID_DOUBLE_FETCH")) {
813 avoid_double_fetches
= 1;
815 if (getenv("QEMU_FUZZ_TIMEOUT")) {
816 timeout
= g_ascii_strtoll(getenv("QEMU_FUZZ_TIMEOUT"), NULL
, 0);
821 * Create a special device that we can use to back DMA buffers at very
822 * high memory addresses
824 sparse_mem_mr
= sparse_mem_init(0, UINT64_MAX
);
826 dma_regions
= g_array_new(false, false, sizeof(address_range
));
827 dma_patterns
= g_array_new(false, false, sizeof(pattern
));
829 fuzzable_memoryregions
= g_hash_table_new(NULL
, NULL
);
830 fuzzable_pci_devices
= g_ptr_array_new();
832 result
= g_strsplit(getenv("QEMU_FUZZ_OBJECTS"), " ", -1);
833 for (int i
= 0; result
[i
] != NULL
; i
++) {
834 printf("Matching objects by name %s\n", result
[i
]);
835 object_child_foreach_recursive(qdev_get_machine(),
840 printf("This process will try to fuzz the following MemoryRegions:\n");
842 g_hash_table_iter_init(&iter
, fuzzable_memoryregions
);
843 while (g_hash_table_iter_next(&iter
, (gpointer
)&mr
, NULL
)) {
844 printf(" * %s (size 0x%" PRIx64
")\n",
845 object_get_canonical_path_component(&(mr
->parent_obj
)),
846 memory_region_size(mr
));
849 if (!g_hash_table_size(fuzzable_memoryregions
)) {
850 printf("No fuzzable memory regions found...\n");
854 pcibus
= qpci_new_pc(s
, NULL
);
855 g_ptr_array_foreach(fuzzable_pci_devices
, pci_enum
, pcibus
);
856 qpci_free_pc(pcibus
);
862 * When libfuzzer gives us two inputs to combine, return a new input with the
863 * following structure:
867 * Clear out the DMA Patterns
869 * Disable the pci_read/write instructions
873 * The idea is to collate the core behaviors of the two inputs.
875 * Input 1: maps a device's BARs, sets up three DMA patterns, and triggers
876 * device functionality A
877 * Input 2: maps a device's BARs, sets up one DMA pattern, and triggers device
880 * This function attempts to produce an input that:
881 * Ouptut: maps a device's BARs, set up three DMA patterns, triggers
882 * functionality A device, replaces the DMA patterns with a single
883 * patten, and triggers device functionality B.
885 static size_t generic_fuzz_crossover(const uint8_t *data1
, size_t size1
, const
886 uint8_t *data2
, size_t size2
, uint8_t *out
,
887 size_t max_out_size
, unsigned int seed
)
889 size_t copy_len
= 0, size
= 0;
891 /* Check that we have enough space for data1 and at least part of data2 */
892 if (max_out_size
<= size1
+ strlen(SEPARATOR
) * 3 + 2) {
896 /* Copy_Len in the first input */
898 memcpy(out
+ size
, data1
, copy_len
);
900 max_out_size
-= copy_len
;
902 /* Append a separator */
903 copy_len
= strlen(SEPARATOR
);
904 memcpy(out
+ size
, SEPARATOR
, copy_len
);
906 max_out_size
-= copy_len
;
908 /* Clear out the DMA Patterns */
911 out
[size
] = OP_CLEAR_DMA_PATTERNS
;
914 max_out_size
-= copy_len
;
916 /* Append a separator */
917 copy_len
= strlen(SEPARATOR
);
918 memcpy(out
+ size
, SEPARATOR
, copy_len
);
920 max_out_size
-= copy_len
;
922 /* Disable PCI ops. Assume data1 took care of setting up PCI */
925 out
[size
] = OP_DISABLE_PCI
;
928 max_out_size
-= copy_len
;
930 /* Append a separator */
931 copy_len
= strlen(SEPARATOR
);
932 memcpy(out
+ size
, SEPARATOR
, copy_len
);
934 max_out_size
-= copy_len
;
936 /* Copy_Len over the second input */
937 copy_len
= MIN(size2
, max_out_size
);
938 memcpy(out
+ size
, data2
, copy_len
);
940 max_out_size
-= copy_len
;
946 static GString
*generic_fuzz_cmdline(FuzzTarget
*t
)
948 GString
*cmd_line
= g_string_new(TARGET_NAME
);
949 if (!getenv("QEMU_FUZZ_ARGS")) {
952 g_string_append_printf(cmd_line
, " -display none \
953 -machine accel=qtest, \
954 -m 512M %s ", getenv("QEMU_FUZZ_ARGS"));
958 static GString
*generic_fuzz_predefined_config_cmdline(FuzzTarget
*t
)
961 const generic_fuzz_config
*config
;
965 setenv("QEMU_AVOID_DOUBLE_FETCH", "1", 1);
966 if (config
->argfunc
) {
967 args
= config
->argfunc();
968 setenv("QEMU_FUZZ_ARGS", args
, 1);
971 g_assert_nonnull(config
->args
);
972 setenv("QEMU_FUZZ_ARGS", config
->args
, 1);
974 setenv("QEMU_FUZZ_OBJECTS", config
->objects
, 1);
975 return generic_fuzz_cmdline(t
);
978 static void register_generic_fuzz_targets(void)
980 fuzz_add_target(&(FuzzTarget
){
981 .name
= "generic-fuzz",
982 .description
= "Fuzz based on any qemu command-line args. ",
983 .get_init_cmdline
= generic_fuzz_cmdline
,
984 .pre_fuzz
= generic_pre_fuzz
,
985 .fuzz
= generic_fuzz
,
986 .crossover
= generic_fuzz_crossover
990 const generic_fuzz_config
*config
;
993 i
< sizeof(predefined_configs
) / sizeof(generic_fuzz_config
);
995 config
= predefined_configs
+ i
;
996 name
= g_string_new("generic-fuzz");
997 g_string_append_printf(name
, "-%s", config
->name
);
998 fuzz_add_target(&(FuzzTarget
){
1000 .description
= "Predefined generic-fuzz config.",
1001 .get_init_cmdline
= generic_fuzz_predefined_config_cmdline
,
1002 .pre_fuzz
= generic_pre_fuzz
,
1003 .fuzz
= generic_fuzz
,
1004 .crossover
= generic_fuzz_crossover
,
1005 .opaque
= (void *)config
1010 fuzz_target_init(register_generic_fuzz_targets
);