2 * Generic Virtual-Device Fuzzing Target
4 * Copyright Red Hat Inc., 2020
7 * Alexander Bulekov <alxndr@bu.edu>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
17 #include "hw/core/cpu.h"
18 #include "tests/qtest/libqos/libqtest.h"
19 #include "tests/qtest/libqos/pci-pc.h"
21 #include "fork_fuzz.h"
22 #include "exec/address-spaces.h"
24 #include "exec/memory.h"
25 #include "exec/ramblock.h"
26 #include "exec/address-spaces.h"
27 #include "hw/qdev-core.h"
28 #include "hw/pci/pci.h"
29 #include "hw/boards.h"
30 #include "generic_fuzz_configs.h"
31 #include "hw/mem/sparse-mem.h"
34 * SEPARATOR is used to separate "operations" in the fuzz input
36 #define SEPARATOR "FUZZ"
47 OP_CLEAR_DMA_PATTERNS
,
51 #define DEFAULT_TIMEOUT_US 100000
52 #define USEC_IN_SEC 1000000000
54 #define MAX_DMA_FILL_SIZE 0x10000
56 #define PCI_HOST_BRIDGE_CFG 0xcf8
57 #define PCI_HOST_BRIDGE_DATA 0xcfc
61 ram_addr_t size
; /* The number of bytes until the end of the I/O region */
64 static useconds_t timeout
= DEFAULT_TIMEOUT_US
;
66 static bool qtest_log_enabled
;
68 MemoryRegion
*sparse_mem_mr
;
71 * A pattern used to populate a DMA region or perform a memwrite. This is
72 * useful for e.g. populating tables of unique addresses.
73 * Example {.index = 1; .stride = 2; .len = 3; .data = "\x00\x01\x02"}
74 * Renders as: 00 01 02 00 03 02 00 05 02 00 07 02 ...
77 uint8_t index
; /* Index of a byte to increment by stride */
78 uint8_t stride
; /* Increment each index'th byte by this amount */
83 /* Avoid filling the same DMA region between MMIO/PIO commands ? */
84 static bool avoid_double_fetches
;
86 static QTestState
*qts_global
; /* Need a global for the DMA callback */
89 * List of memory regions that are children of QOM objects specified by the
92 static GHashTable
*fuzzable_memoryregions
;
93 static GPtrArray
*fuzzable_pci_devices
;
95 struct get_io_cb_info
{
101 static bool get_io_address_cb(Int128 start
, Int128 size
,
102 const MemoryRegion
*mr
,
103 hwaddr offset_in_region
,
106 struct get_io_cb_info
*info
= opaque
;
107 if (g_hash_table_lookup(fuzzable_memoryregions
, mr
)) {
108 if (info
->index
== 0) {
109 info
->result
.addr
= (ram_addr_t
)start
;
110 info
->result
.size
= (ram_addr_t
)size
;
120 * List of dma regions populated since the last fuzzing command. Used to ensure
121 * that we only write to each DMA address once, to avoid race conditions when
122 * building reproducers.
124 static GArray
*dma_regions
;
126 static GArray
*dma_patterns
;
127 static int dma_pattern_index
;
128 static bool pci_disabled
;
131 * Allocate a block of memory and populate it with a pattern.
133 static void *pattern_alloc(pattern p
, size_t len
)
136 uint8_t *buf
= g_malloc(len
);
139 for (i
= 0; i
< len
; ++i
) {
140 buf
[i
] = p
.data
[i
% p
.len
];
141 if ((i
% p
.len
) == p
.index
) {
149 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
151 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
154 * Regions are assumed to support 1-4 byte accesses unless
155 * otherwise specified.
157 if (access_size_max
== 0) {
161 /* Bound the maximum access by the alignment of the address. */
162 if (!mr
->ops
->impl
.unaligned
) {
163 unsigned align_size_max
= addr
& -addr
;
164 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
165 access_size_max
= align_size_max
;
169 /* Don't attempt accesses larger than the maximum. */
170 if (l
> access_size_max
) {
179 * Call-back for functions that perform DMA reads from guest memory. Confirm
180 * that the region has not already been populated since the last loop in
181 * generic_fuzz(), avoiding potential race-conditions, which we don't have
182 * a good way for reproducing right now.
184 void fuzz_dma_read_cb(size_t addr
, size_t len
, MemoryRegion
*mr
)
186 /* Are we in the generic-fuzzer or are we using another fuzz-target? */
192 * Return immediately if:
193 * - We have no DMA patterns defined
194 * - The length of the DMA read request is zero
195 * - The DMA read is hitting an MR other than the machine's main RAM
196 * - The DMA request hits past the bounds of our RAM
198 if (dma_patterns
->len
== 0
200 || (mr
!= current_machine
->ram
&& mr
!= sparse_mem_mr
)) {
205 * If we overlap with any existing dma_regions, split the range and only
206 * populate the non-overlapping parts.
208 address_range region
;
209 bool double_fetch
= false;
211 i
< dma_regions
->len
&& (avoid_double_fetches
|| qtest_log_enabled
);
213 region
= g_array_index(dma_regions
, address_range
, i
);
214 if (addr
< region
.addr
+ region
.size
&& addr
+ len
> region
.addr
) {
216 if (addr
< region
.addr
217 && avoid_double_fetches
) {
218 fuzz_dma_read_cb(addr
, region
.addr
- addr
, mr
);
220 if (addr
+ len
> region
.addr
+ region
.size
221 && avoid_double_fetches
) {
222 fuzz_dma_read_cb(region
.addr
+ region
.size
,
223 addr
+ len
- (region
.addr
+ region
.size
), mr
);
229 /* Cap the length of the DMA access to something reasonable */
230 len
= MIN(len
, MAX_DMA_FILL_SIZE
);
232 address_range ar
= {addr
, len
};
233 g_array_append_val(dma_regions
, ar
);
234 pattern p
= g_array_index(dma_patterns
, pattern
, dma_pattern_index
);
235 void *buf_base
= pattern_alloc(p
, ar
.size
);
236 void *buf
= buf_base
;
241 mr1
= address_space_translate(first_cpu
->as
,
242 addr
, &addr1
, &l
, true,
243 MEMTXATTRS_UNSPECIFIED
);
245 if (!(memory_region_is_ram(mr1
) ||
246 memory_region_is_romd(mr1
)) && mr1
!= sparse_mem_mr
) {
247 l
= memory_access_size(mr1
, l
, addr1
);
250 if (qtest_log_enabled
) {
252 * With QTEST_LOG, use a normal, slow QTest memwrite. Prefix the log
253 * that will be written by qtest.c with a DMA tag, so we can reorder
254 * the resulting QTest trace so the DMA fills precede the last PIO/MMIO
257 fprintf(stderr
, "[DMA] ");
259 fprintf(stderr
, "[DOUBLE-FETCH] ");
263 qtest_memwrite(qts_global
, addr
, buf
, l
);
272 /* Increment the index of the pattern for the next DMA access */
273 dma_pattern_index
= (dma_pattern_index
+ 1) % dma_patterns
->len
;
277 * Here we want to convert a fuzzer-provided [io-region-index, offset] to
278 * a physical address. To do this, we iterate over all of the matched
279 * MemoryRegions. Check whether each region exists within the particular io
280 * space. Return the absolute address of the offset within the index'th region
281 * that is a subregion of the io_space and the distance until the end of the
284 static bool get_io_address(address_range
*result
, AddressSpace
*as
,
288 view
= as
->current_map
;
290 struct get_io_cb_info cb_info
= {};
292 cb_info
.index
= index
;
295 * Loop around the FlatView until we match "index" number of
296 * fuzzable_memoryregions, or until we know that there are no matching
300 flatview_for_each_range(view
, get_io_address_cb
, &cb_info
);
301 } while (cb_info
.index
!= index
&& !cb_info
.found
);
303 *result
= cb_info
.result
;
305 offset
= offset
% result
->size
;
306 result
->addr
+= offset
;
307 result
->size
-= offset
;
309 return cb_info
.found
;
312 static bool get_pio_address(address_range
*result
,
313 uint8_t index
, uint16_t offset
)
316 * PIO BARs can be set past the maximum port address (0xFFFF). Thus, result
317 * can contain an addr that extends past the PIO space. When we pass this
318 * address to qtest_in/qtest_out, it is cast to a uint16_t, so we might end
319 * up fuzzing a completely different MemoryRegion/Device. Therefore, check
320 * that the address here is within the PIO space limits.
322 bool found
= get_io_address(result
, &address_space_io
, index
, offset
);
323 return result
->addr
<= 0xFFFF ? found
: false;
326 static bool get_mmio_address(address_range
*result
,
327 uint8_t index
, uint32_t offset
)
329 return get_io_address(result
, &address_space_memory
, index
, offset
);
332 static void op_in(QTestState
*s
, const unsigned char * data
, size_t len
)
334 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
342 if (len
< sizeof(a
)) {
345 memcpy(&a
, data
, sizeof(a
));
346 if (get_pio_address(&abs
, a
.base
, a
.offset
) == 0) {
350 switch (a
.size
%= end_sizes
) {
352 qtest_inb(s
, abs
.addr
);
356 qtest_inw(s
, abs
.addr
);
361 qtest_inl(s
, abs
.addr
);
367 static void op_out(QTestState
*s
, const unsigned char * data
, size_t len
)
369 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
378 if (len
< sizeof(a
)) {
381 memcpy(&a
, data
, sizeof(a
));
383 if (get_pio_address(&abs
, a
.base
, a
.offset
) == 0) {
387 switch (a
.size
%= end_sizes
) {
389 qtest_outb(s
, abs
.addr
, a
.value
& 0xFF);
393 qtest_outw(s
, abs
.addr
, a
.value
& 0xFFFF);
398 qtest_outl(s
, abs
.addr
, a
.value
);
404 static void op_read(QTestState
*s
, const unsigned char * data
, size_t len
)
406 enum Sizes
{Byte
, Word
, Long
, Quad
, end_sizes
};
414 if (len
< sizeof(a
)) {
417 memcpy(&a
, data
, sizeof(a
));
419 if (get_mmio_address(&abs
, a
.base
, a
.offset
) == 0) {
423 switch (a
.size
%= end_sizes
) {
425 qtest_readb(s
, abs
.addr
);
429 qtest_readw(s
, abs
.addr
);
434 qtest_readl(s
, abs
.addr
);
439 qtest_readq(s
, abs
.addr
);
445 static void op_write(QTestState
*s
, const unsigned char * data
, size_t len
)
447 enum Sizes
{Byte
, Word
, Long
, Quad
, end_sizes
};
456 if (len
< sizeof(a
)) {
459 memcpy(&a
, data
, sizeof(a
));
461 if (get_mmio_address(&abs
, a
.base
, a
.offset
) == 0) {
465 switch (a
.size
%= end_sizes
) {
467 qtest_writeb(s
, abs
.addr
, a
.value
& 0xFF);
471 qtest_writew(s
, abs
.addr
, a
.value
& 0xFFFF);
476 qtest_writel(s
, abs
.addr
, a
.value
& 0xFFFFFFFF);
481 qtest_writeq(s
, abs
.addr
, a
.value
);
487 static void op_pci_read(QTestState
*s
, const unsigned char * data
, size_t len
)
489 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
495 if (len
< sizeof(a
) || fuzzable_pci_devices
->len
== 0 || pci_disabled
) {
498 memcpy(&a
, data
, sizeof(a
));
499 PCIDevice
*dev
= g_ptr_array_index(fuzzable_pci_devices
,
500 a
.base
% fuzzable_pci_devices
->len
);
501 int devfn
= dev
->devfn
;
502 qtest_outl(s
, PCI_HOST_BRIDGE_CFG
, (1U << 31) | (devfn
<< 8) | a
.offset
);
503 switch (a
.size
%= end_sizes
) {
505 qtest_inb(s
, PCI_HOST_BRIDGE_DATA
);
508 qtest_inw(s
, PCI_HOST_BRIDGE_DATA
);
511 qtest_inl(s
, PCI_HOST_BRIDGE_DATA
);
516 static void op_pci_write(QTestState
*s
, const unsigned char * data
, size_t len
)
518 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
525 if (len
< sizeof(a
) || fuzzable_pci_devices
->len
== 0 || pci_disabled
) {
528 memcpy(&a
, data
, sizeof(a
));
529 PCIDevice
*dev
= g_ptr_array_index(fuzzable_pci_devices
,
530 a
.base
% fuzzable_pci_devices
->len
);
531 int devfn
= dev
->devfn
;
532 qtest_outl(s
, PCI_HOST_BRIDGE_CFG
, (1U << 31) | (devfn
<< 8) | a
.offset
);
533 switch (a
.size
%= end_sizes
) {
535 qtest_outb(s
, PCI_HOST_BRIDGE_DATA
, a
.value
& 0xFF);
538 qtest_outw(s
, PCI_HOST_BRIDGE_DATA
, a
.value
& 0xFFFF);
541 qtest_outl(s
, PCI_HOST_BRIDGE_DATA
, a
.value
& 0xFFFFFFFF);
546 static void op_add_dma_pattern(QTestState
*s
,
547 const unsigned char *data
, size_t len
)
551 * index and stride can be used to increment the index-th byte of the
552 * pattern by the value stride, for each loop of the pattern.
558 if (len
< sizeof(a
) + 1) {
561 memcpy(&a
, data
, sizeof(a
));
562 pattern p
= {a
.index
, a
.stride
, len
- sizeof(a
), data
+ sizeof(a
)};
563 p
.index
= a
.index
% p
.len
;
564 g_array_append_val(dma_patterns
, p
);
568 static void op_clear_dma_patterns(QTestState
*s
,
569 const unsigned char *data
, size_t len
)
571 g_array_set_size(dma_patterns
, 0);
572 dma_pattern_index
= 0;
575 static void op_clock_step(QTestState
*s
, const unsigned char *data
, size_t len
)
577 qtest_clock_step_next(s
);
580 static void op_disable_pci(QTestState
*s
, const unsigned char *data
, size_t len
)
585 static void handle_timeout(int sig
)
587 if (qtest_log_enabled
) {
588 fprintf(stderr
, "[Timeout]\n");
593 * If there is a crash, libfuzzer/ASAN forks a child to run an
594 * "llvm-symbolizer" process for printing out a pretty stacktrace. It
595 * communicates with this child using a pipe. If we timeout+Exit, while
596 * libfuzzer is still communicating with the llvm-symbolizer child, we will
597 * be left with an orphan llvm-symbolizer process. Sometimes, this appears
598 * to lead to a deadlock in the forkserver. Use waitpid to check if there
599 * are any waitable children. If so, exit out of the signal-handler, and
600 * let libfuzzer finish communicating with the child, and exit, on its own.
602 if (waitpid(-1, NULL
, WNOHANG
) == 0) {
610 * Here, we interpret random bytes from the fuzzer, as a sequence of commands.
611 * Some commands can be variable-width, so we use a separator, SEPARATOR, to
612 * specify the boundaries between commands. SEPARATOR is used to separate
613 * "operations" in the fuzz input. Why use a separator, instead of just using
614 * the operations' length to identify operation boundaries?
615 * 1. This is a simple way to support variable-length operations
616 * 2. This adds "stability" to the input.
617 * For example take the input "AbBcgDefg", where there is no separator and
618 * Opcodes are capitalized.
619 * Simply, by removing the first byte, we end up with a very different
622 * By adding a separator, we avoid this problem:
623 * Ab SEP Bcg SEP Defg -> B SEP Bcg SEP Defg
624 * Since B uses two additional bytes as operands, the first "B" will be
625 * ignored. The fuzzer actively tries to reduce inputs, so such unused
626 * bytes are likely to be pruned, eventually.
628 * SEPARATOR is trivial for the fuzzer to discover when using ASan. Optionally,
629 * SEPARATOR can be manually specified as a dictionary value (see libfuzzer's
630 * -dict), though this should not be necessary.
632 * As a result, the stream of bytes is converted into a sequence of commands.
633 * In a simplified example where SEPARATOR is 0xFF:
634 * 00 01 02 FF 03 04 05 06 FF 01 FF ...
635 * becomes this sequence of commands:
636 * 00 01 02 -> op00 (0102) -> in (0102, 2)
637 * 03 04 05 06 -> op03 (040506) -> write (040506, 3)
638 * 01 -> op01 (-,0) -> out (-,0)
641 * Note here that it is the job of the individual opcode functions to check
642 * that enough data was provided. I.e. in the last command out (,0), out needs
643 * to check that there is not enough data provided to select an address/value
646 static void generic_fuzz(QTestState
*s
, const unsigned char *Data
, size_t Size
)
648 void (*ops
[]) (QTestState
*s
, const unsigned char* , size_t) = {
652 [OP_WRITE
] = op_write
,
653 [OP_PCI_READ
] = op_pci_read
,
654 [OP_PCI_WRITE
] = op_pci_write
,
655 [OP_DISABLE_PCI
] = op_disable_pci
,
656 [OP_ADD_DMA_PATTERN
] = op_add_dma_pattern
,
657 [OP_CLEAR_DMA_PATTERNS
] = op_clear_dma_patterns
,
658 [OP_CLOCK_STEP
] = op_clock_step
,
660 const unsigned char *cmd
= Data
;
661 const unsigned char *nextcmd
;
667 * Sometimes the fuzzer will find inputs that take quite a long time to
668 * process. Often times, these inputs do not result in new coverage.
669 * Even if these inputs might be interesting, they can slow down the
670 * fuzzer, overall. Set a timeout to avoid hurting performance, too much
673 struct sigaction sact
;
674 struct itimerval timer
;
676 sigemptyset(&sact
.sa_mask
);
677 sact
.sa_flags
= SA_NODEFER
;
678 sact
.sa_handler
= handle_timeout
;
679 sigaction(SIGALRM
, &sact
, NULL
);
681 memset(&timer
, 0, sizeof(timer
));
682 timer
.it_value
.tv_sec
= timeout
/ USEC_IN_SEC
;
683 timer
.it_value
.tv_usec
= timeout
% USEC_IN_SEC
;
684 setitimer(ITIMER_VIRTUAL
, &timer
, NULL
);
687 op_clear_dma_patterns(s
, NULL
, 0);
688 pci_disabled
= false;
690 while (cmd
&& Size
) {
691 /* Get the length until the next command or end of input */
692 nextcmd
= memmem(cmd
, Size
, SEPARATOR
, strlen(SEPARATOR
));
693 cmd_len
= nextcmd
? nextcmd
- cmd
: Size
;
696 /* Interpret the first byte of the command as an opcode */
697 op
= *cmd
% (sizeof(ops
) / sizeof((ops
)[0]));
698 ops
[op
](s
, cmd
+ 1, cmd_len
- 1);
700 /* Run the main loop */
703 /* Advance to the next command */
704 cmd
= nextcmd
? nextcmd
+ sizeof(SEPARATOR
) - 1 : nextcmd
;
705 Size
= Size
- (cmd_len
+ sizeof(SEPARATOR
) - 1);
706 g_array_set_size(dma_regions
, 0);
715 static void usage(void)
717 printf("Please specify the following environment variables:\n");
718 printf("QEMU_FUZZ_ARGS= the command line arguments passed to qemu\n");
719 printf("QEMU_FUZZ_OBJECTS= "
720 "a space separated list of QOM type names for objects to fuzz\n");
721 printf("Optionally: QEMU_AVOID_DOUBLE_FETCH= "
722 "Try to avoid racy DMA double fetch bugs? %d by default\n",
723 avoid_double_fetches
);
724 printf("Optionally: QEMU_FUZZ_TIMEOUT= Specify a custom timeout (us). "
725 "0 to disable. %d by default\n", timeout
);
729 static int locate_fuzz_memory_regions(Object
*child
, void *opaque
)
733 if (object_dynamic_cast(child
, TYPE_MEMORY_REGION
)) {
734 mr
= MEMORY_REGION(child
);
735 if ((memory_region_is_ram(mr
) ||
736 memory_region_is_ram_device(mr
) ||
737 memory_region_is_rom(mr
)) == false) {
738 name
= object_get_canonical_path_component(child
);
740 * We don't want duplicate pointers to the same MemoryRegion, so
741 * try to remove copies of the pointer, before adding it.
743 g_hash_table_insert(fuzzable_memoryregions
, mr
, (gpointer
)true);
749 static int locate_fuzz_objects(Object
*child
, void *opaque
)
751 char *pattern
= opaque
;
752 if (g_pattern_match_simple(pattern
, object_get_typename(child
))) {
753 /* Find and save ptrs to any child MemoryRegions */
754 object_child_foreach_recursive(child
, locate_fuzz_memory_regions
, NULL
);
757 * We matched an object. If its a PCI device, store a pointer to it so
758 * we can map BARs and fuzz its config space.
760 if (object_dynamic_cast(OBJECT(child
), TYPE_PCI_DEVICE
)) {
762 * Don't want duplicate pointers to the same PCIDevice, so remove
763 * copies of the pointer, before adding it.
765 g_ptr_array_remove_fast(fuzzable_pci_devices
, PCI_DEVICE(child
));
766 g_ptr_array_add(fuzzable_pci_devices
, PCI_DEVICE(child
));
768 } else if (object_dynamic_cast(OBJECT(child
), TYPE_MEMORY_REGION
)) {
769 if (g_pattern_match_simple(pattern
,
770 object_get_canonical_path_component(child
))) {
772 mr
= MEMORY_REGION(child
);
773 if ((memory_region_is_ram(mr
) ||
774 memory_region_is_ram_device(mr
) ||
775 memory_region_is_rom(mr
)) == false) {
776 g_hash_table_insert(fuzzable_memoryregions
, mr
, (gpointer
)true);
784 static void pci_enum(gpointer pcidev
, gpointer bus
)
786 PCIDevice
*dev
= pcidev
;
790 qdev
= qpci_device_find(bus
, dev
->devfn
);
791 g_assert(qdev
!= NULL
);
792 for (i
= 0; i
< 6; i
++) {
793 if (dev
->io_regions
[i
].size
) {
794 qpci_iomap(qdev
, i
, NULL
);
797 qpci_device_enable(qdev
);
801 static void generic_pre_fuzz(QTestState
*s
)
808 if (!getenv("QEMU_FUZZ_OBJECTS")) {
811 if (getenv("QTEST_LOG")) {
812 qtest_log_enabled
= 1;
814 if (getenv("QEMU_AVOID_DOUBLE_FETCH")) {
815 avoid_double_fetches
= 1;
817 if (getenv("QEMU_FUZZ_TIMEOUT")) {
818 timeout
= g_ascii_strtoll(getenv("QEMU_FUZZ_TIMEOUT"), NULL
, 0);
823 * Create a special device that we can use to back DMA buffers at very
824 * high memory addresses
826 sparse_mem_mr
= sparse_mem_init(0, UINT64_MAX
);
828 dma_regions
= g_array_new(false, false, sizeof(address_range
));
829 dma_patterns
= g_array_new(false, false, sizeof(pattern
));
831 fuzzable_memoryregions
= g_hash_table_new(NULL
, NULL
);
832 fuzzable_pci_devices
= g_ptr_array_new();
834 result
= g_strsplit(getenv("QEMU_FUZZ_OBJECTS"), " ", -1);
835 for (int i
= 0; result
[i
] != NULL
; i
++) {
836 printf("Matching objects by name %s\n", result
[i
]);
837 object_child_foreach_recursive(qdev_get_machine(),
842 printf("This process will try to fuzz the following MemoryRegions:\n");
844 g_hash_table_iter_init(&iter
, fuzzable_memoryregions
);
845 while (g_hash_table_iter_next(&iter
, (gpointer
)&mr
, NULL
)) {
846 printf(" * %s (size %lx)\n",
847 object_get_canonical_path_component(&(mr
->parent_obj
)),
851 if (!g_hash_table_size(fuzzable_memoryregions
)) {
852 printf("No fuzzable memory regions found...\n");
856 pcibus
= qpci_new_pc(s
, NULL
);
857 g_ptr_array_foreach(fuzzable_pci_devices
, pci_enum
, pcibus
);
858 qpci_free_pc(pcibus
);
864 * When libfuzzer gives us two inputs to combine, return a new input with the
865 * following structure:
869 * Clear out the DMA Patterns
871 * Disable the pci_read/write instructions
875 * The idea is to collate the core behaviors of the two inputs.
877 * Input 1: maps a device's BARs, sets up three DMA patterns, and triggers
878 * device functionality A
879 * Input 2: maps a device's BARs, sets up one DMA pattern, and triggers device
882 * This function attempts to produce an input that:
883 * Ouptut: maps a device's BARs, set up three DMA patterns, triggers
884 * functionality A device, replaces the DMA patterns with a single
885 * patten, and triggers device functionality B.
887 static size_t generic_fuzz_crossover(const uint8_t *data1
, size_t size1
, const
888 uint8_t *data2
, size_t size2
, uint8_t *out
,
889 size_t max_out_size
, unsigned int seed
)
891 size_t copy_len
= 0, size
= 0;
893 /* Check that we have enough space for data1 and at least part of data2 */
894 if (max_out_size
<= size1
+ strlen(SEPARATOR
) * 3 + 2) {
898 /* Copy_Len in the first input */
900 memcpy(out
+ size
, data1
, copy_len
);
902 max_out_size
-= copy_len
;
904 /* Append a separator */
905 copy_len
= strlen(SEPARATOR
);
906 memcpy(out
+ size
, SEPARATOR
, copy_len
);
908 max_out_size
-= copy_len
;
910 /* Clear out the DMA Patterns */
913 out
[size
] = OP_CLEAR_DMA_PATTERNS
;
916 max_out_size
-= copy_len
;
918 /* Append a separator */
919 copy_len
= strlen(SEPARATOR
);
920 memcpy(out
+ size
, SEPARATOR
, copy_len
);
922 max_out_size
-= copy_len
;
924 /* Disable PCI ops. Assume data1 took care of setting up PCI */
927 out
[size
] = OP_DISABLE_PCI
;
930 max_out_size
-= copy_len
;
932 /* Append a separator */
933 copy_len
= strlen(SEPARATOR
);
934 memcpy(out
+ size
, SEPARATOR
, copy_len
);
936 max_out_size
-= copy_len
;
938 /* Copy_Len over the second input */
939 copy_len
= MIN(size2
, max_out_size
);
940 memcpy(out
+ size
, data2
, copy_len
);
942 max_out_size
-= copy_len
;
948 static GString
*generic_fuzz_cmdline(FuzzTarget
*t
)
950 GString
*cmd_line
= g_string_new(TARGET_NAME
);
951 if (!getenv("QEMU_FUZZ_ARGS")) {
954 g_string_append_printf(cmd_line
, " -display none \
955 -machine accel=qtest, \
956 -m 512M %s ", getenv("QEMU_FUZZ_ARGS"));
960 static GString
*generic_fuzz_predefined_config_cmdline(FuzzTarget
*t
)
963 const generic_fuzz_config
*config
;
967 setenv("QEMU_AVOID_DOUBLE_FETCH", "1", 1);
968 if (config
->argfunc
) {
969 args
= config
->argfunc();
970 setenv("QEMU_FUZZ_ARGS", args
, 1);
973 g_assert_nonnull(config
->args
);
974 setenv("QEMU_FUZZ_ARGS", config
->args
, 1);
976 setenv("QEMU_FUZZ_OBJECTS", config
->objects
, 1);
977 return generic_fuzz_cmdline(t
);
980 static void register_generic_fuzz_targets(void)
982 fuzz_add_target(&(FuzzTarget
){
983 .name
= "generic-fuzz",
984 .description
= "Fuzz based on any qemu command-line args. ",
985 .get_init_cmdline
= generic_fuzz_cmdline
,
986 .pre_fuzz
= generic_pre_fuzz
,
987 .fuzz
= generic_fuzz
,
988 .crossover
= generic_fuzz_crossover
992 const generic_fuzz_config
*config
;
995 i
< sizeof(predefined_configs
) / sizeof(generic_fuzz_config
);
997 config
= predefined_configs
+ i
;
998 name
= g_string_new("generic-fuzz");
999 g_string_append_printf(name
, "-%s", config
->name
);
1000 fuzz_add_target(&(FuzzTarget
){
1002 .description
= "Predefined generic-fuzz config.",
1003 .get_init_cmdline
= generic_fuzz_predefined_config_cmdline
,
1004 .pre_fuzz
= generic_pre_fuzz
,
1005 .fuzz
= generic_fuzz
,
1006 .crossover
= generic_fuzz_crossover
,
1007 .opaque
= (void *)config
1012 fuzz_target_init(register_generic_fuzz_targets
);