2 * Generic Virtual-Device Fuzzing Target
4 * Copyright Red Hat Inc., 2020
7 * Alexander Bulekov <alxndr@bu.edu>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
17 #include "hw/core/cpu.h"
18 #include "tests/qtest/libqtest.h"
19 #include "tests/qtest/libqos/pci-pc.h"
21 #include "fork_fuzz.h"
23 #include "exec/memory.h"
24 #include "exec/ramblock.h"
25 #include "hw/qdev-core.h"
26 #include "hw/pci/pci.h"
27 #include "hw/boards.h"
28 #include "generic_fuzz_configs.h"
29 #include "hw/mem/sparse-mem.h"
32 * SEPARATOR is used to separate "operations" in the fuzz input
34 #define SEPARATOR "FUZZ"
45 OP_CLEAR_DMA_PATTERNS
,
49 #define DEFAULT_TIMEOUT_US 100000
50 #define USEC_IN_SEC 1000000000
52 #define MAX_DMA_FILL_SIZE 0x10000
54 #define PCI_HOST_BRIDGE_CFG 0xcf8
55 #define PCI_HOST_BRIDGE_DATA 0xcfc
59 ram_addr_t size
; /* The number of bytes until the end of the I/O region */
62 static useconds_t timeout
= DEFAULT_TIMEOUT_US
;
64 static bool qtest_log_enabled
;
66 MemoryRegion
*sparse_mem_mr
;
69 * A pattern used to populate a DMA region or perform a memwrite. This is
70 * useful for e.g. populating tables of unique addresses.
71 * Example {.index = 1; .stride = 2; .len = 3; .data = "\x00\x01\x02"}
72 * Renders as: 00 01 02 00 03 02 00 05 02 00 07 02 ...
75 uint8_t index
; /* Index of a byte to increment by stride */
76 uint8_t stride
; /* Increment each index'th byte by this amount */
81 /* Avoid filling the same DMA region between MMIO/PIO commands ? */
82 static bool avoid_double_fetches
;
84 static QTestState
*qts_global
; /* Need a global for the DMA callback */
87 * List of memory regions that are children of QOM objects specified by the
90 static GHashTable
*fuzzable_memoryregions
;
91 static GPtrArray
*fuzzable_pci_devices
;
93 struct get_io_cb_info
{
99 static bool get_io_address_cb(Int128 start
, Int128 size
,
100 const MemoryRegion
*mr
,
101 hwaddr offset_in_region
,
104 struct get_io_cb_info
*info
= opaque
;
105 if (g_hash_table_lookup(fuzzable_memoryregions
, mr
)) {
106 if (info
->index
== 0) {
107 info
->result
.addr
= (ram_addr_t
)start
;
108 info
->result
.size
= (ram_addr_t
)size
;
118 * List of dma regions populated since the last fuzzing command. Used to ensure
119 * that we only write to each DMA address once, to avoid race conditions when
120 * building reproducers.
122 static GArray
*dma_regions
;
124 static GArray
*dma_patterns
;
125 static int dma_pattern_index
;
126 static bool pci_disabled
;
129 * Allocate a block of memory and populate it with a pattern.
131 static void *pattern_alloc(pattern p
, size_t len
)
134 uint8_t *buf
= g_malloc(len
);
137 for (i
= 0; i
< len
; ++i
) {
138 buf
[i
] = p
.data
[i
% p
.len
];
139 if ((i
% p
.len
) == p
.index
) {
147 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
149 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
152 * Regions are assumed to support 1-4 byte accesses unless
153 * otherwise specified.
155 if (access_size_max
== 0) {
159 /* Bound the maximum access by the alignment of the address. */
160 if (!mr
->ops
->impl
.unaligned
) {
161 unsigned align_size_max
= addr
& -addr
;
162 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
163 access_size_max
= align_size_max
;
167 /* Don't attempt accesses larger than the maximum. */
168 if (l
> access_size_max
) {
177 * Call-back for functions that perform DMA reads from guest memory. Confirm
178 * that the region has not already been populated since the last loop in
179 * generic_fuzz(), avoiding potential race-conditions, which we don't have
180 * a good way for reproducing right now.
182 void fuzz_dma_read_cb(size_t addr
, size_t len
, MemoryRegion
*mr
)
184 /* Are we in the generic-fuzzer or are we using another fuzz-target? */
190 * Return immediately if:
191 * - We have no DMA patterns defined
192 * - The length of the DMA read request is zero
193 * - The DMA read is hitting an MR other than the machine's main RAM
194 * - The DMA request hits past the bounds of our RAM
196 if (dma_patterns
->len
== 0
198 || (mr
!= current_machine
->ram
&& mr
!= sparse_mem_mr
)) {
203 * If we overlap with any existing dma_regions, split the range and only
204 * populate the non-overlapping parts.
206 address_range region
;
207 bool double_fetch
= false;
209 i
< dma_regions
->len
&& (avoid_double_fetches
|| qtest_log_enabled
);
211 region
= g_array_index(dma_regions
, address_range
, i
);
212 if (addr
< region
.addr
+ region
.size
&& addr
+ len
> region
.addr
) {
214 if (addr
< region
.addr
215 && avoid_double_fetches
) {
216 fuzz_dma_read_cb(addr
, region
.addr
- addr
, mr
);
218 if (addr
+ len
> region
.addr
+ region
.size
219 && avoid_double_fetches
) {
220 fuzz_dma_read_cb(region
.addr
+ region
.size
,
221 addr
+ len
- (region
.addr
+ region
.size
), mr
);
227 /* Cap the length of the DMA access to something reasonable */
228 len
= MIN(len
, MAX_DMA_FILL_SIZE
);
230 address_range ar
= {addr
, len
};
231 g_array_append_val(dma_regions
, ar
);
232 pattern p
= g_array_index(dma_patterns
, pattern
, dma_pattern_index
);
233 void *buf_base
= pattern_alloc(p
, ar
.size
);
234 void *buf
= buf_base
;
239 mr1
= address_space_translate(first_cpu
->as
,
240 addr
, &addr1
, &l
, true,
241 MEMTXATTRS_UNSPECIFIED
);
244 * If mr1 isn't RAM, address_space_translate doesn't update l. Use
245 * memory_access_size to identify the number of bytes that it is safe
246 * to write without accidentally writing to another MemoryRegion.
248 if (!memory_region_is_ram(mr1
)) {
249 l
= memory_access_size(mr1
, l
, addr1
);
251 if (memory_region_is_ram(mr1
) ||
252 memory_region_is_romd(mr1
) ||
253 mr1
== sparse_mem_mr
) {
255 if (qtest_log_enabled
) {
257 * With QTEST_LOG, use a normal, slow QTest memwrite. Prefix the log
258 * that will be written by qtest.c with a DMA tag, so we can reorder
259 * the resulting QTest trace so the DMA fills precede the last PIO/MMIO
262 fprintf(stderr
, "[DMA] ");
264 fprintf(stderr
, "[DOUBLE-FETCH] ");
268 qtest_memwrite(qts_global
, addr
, buf
, l
);
277 /* Increment the index of the pattern for the next DMA access */
278 dma_pattern_index
= (dma_pattern_index
+ 1) % dma_patterns
->len
;
282 * Here we want to convert a fuzzer-provided [io-region-index, offset] to
283 * a physical address. To do this, we iterate over all of the matched
284 * MemoryRegions. Check whether each region exists within the particular io
285 * space. Return the absolute address of the offset within the index'th region
286 * that is a subregion of the io_space and the distance until the end of the
289 static bool get_io_address(address_range
*result
, AddressSpace
*as
,
293 view
= as
->current_map
;
295 struct get_io_cb_info cb_info
= {};
297 cb_info
.index
= index
;
300 * Loop around the FlatView until we match "index" number of
301 * fuzzable_memoryregions, or until we know that there are no matching
305 flatview_for_each_range(view
, get_io_address_cb
, &cb_info
);
306 } while (cb_info
.index
!= index
&& !cb_info
.found
);
308 *result
= cb_info
.result
;
310 offset
= offset
% result
->size
;
311 result
->addr
+= offset
;
312 result
->size
-= offset
;
314 return cb_info
.found
;
317 static bool get_pio_address(address_range
*result
,
318 uint8_t index
, uint16_t offset
)
321 * PIO BARs can be set past the maximum port address (0xFFFF). Thus, result
322 * can contain an addr that extends past the PIO space. When we pass this
323 * address to qtest_in/qtest_out, it is cast to a uint16_t, so we might end
324 * up fuzzing a completely different MemoryRegion/Device. Therefore, check
325 * that the address here is within the PIO space limits.
327 bool found
= get_io_address(result
, &address_space_io
, index
, offset
);
328 return result
->addr
<= 0xFFFF ? found
: false;
331 static bool get_mmio_address(address_range
*result
,
332 uint8_t index
, uint32_t offset
)
334 return get_io_address(result
, &address_space_memory
, index
, offset
);
337 static void op_in(QTestState
*s
, const unsigned char * data
, size_t len
)
339 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
347 if (len
< sizeof(a
)) {
350 memcpy(&a
, data
, sizeof(a
));
351 if (get_pio_address(&abs
, a
.base
, a
.offset
) == 0) {
355 switch (a
.size
%= end_sizes
) {
357 qtest_inb(s
, abs
.addr
);
361 qtest_inw(s
, abs
.addr
);
366 qtest_inl(s
, abs
.addr
);
372 static void op_out(QTestState
*s
, const unsigned char * data
, size_t len
)
374 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
383 if (len
< sizeof(a
)) {
386 memcpy(&a
, data
, sizeof(a
));
388 if (get_pio_address(&abs
, a
.base
, a
.offset
) == 0) {
392 switch (a
.size
%= end_sizes
) {
394 qtest_outb(s
, abs
.addr
, a
.value
& 0xFF);
398 qtest_outw(s
, abs
.addr
, a
.value
& 0xFFFF);
403 qtest_outl(s
, abs
.addr
, a
.value
);
409 static void op_read(QTestState
*s
, const unsigned char * data
, size_t len
)
411 enum Sizes
{Byte
, Word
, Long
, Quad
, end_sizes
};
419 if (len
< sizeof(a
)) {
422 memcpy(&a
, data
, sizeof(a
));
424 if (get_mmio_address(&abs
, a
.base
, a
.offset
) == 0) {
428 switch (a
.size
%= end_sizes
) {
430 qtest_readb(s
, abs
.addr
);
434 qtest_readw(s
, abs
.addr
);
439 qtest_readl(s
, abs
.addr
);
444 qtest_readq(s
, abs
.addr
);
450 static void op_write(QTestState
*s
, const unsigned char * data
, size_t len
)
452 enum Sizes
{Byte
, Word
, Long
, Quad
, end_sizes
};
461 if (len
< sizeof(a
)) {
464 memcpy(&a
, data
, sizeof(a
));
466 if (get_mmio_address(&abs
, a
.base
, a
.offset
) == 0) {
470 switch (a
.size
%= end_sizes
) {
472 qtest_writeb(s
, abs
.addr
, a
.value
& 0xFF);
476 qtest_writew(s
, abs
.addr
, a
.value
& 0xFFFF);
481 qtest_writel(s
, abs
.addr
, a
.value
& 0xFFFFFFFF);
486 qtest_writeq(s
, abs
.addr
, a
.value
);
492 static void op_pci_read(QTestState
*s
, const unsigned char * data
, size_t len
)
494 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
500 if (len
< sizeof(a
) || fuzzable_pci_devices
->len
== 0 || pci_disabled
) {
503 memcpy(&a
, data
, sizeof(a
));
504 PCIDevice
*dev
= g_ptr_array_index(fuzzable_pci_devices
,
505 a
.base
% fuzzable_pci_devices
->len
);
506 int devfn
= dev
->devfn
;
507 qtest_outl(s
, PCI_HOST_BRIDGE_CFG
, (1U << 31) | (devfn
<< 8) | a
.offset
);
508 switch (a
.size
%= end_sizes
) {
510 qtest_inb(s
, PCI_HOST_BRIDGE_DATA
);
513 qtest_inw(s
, PCI_HOST_BRIDGE_DATA
);
516 qtest_inl(s
, PCI_HOST_BRIDGE_DATA
);
521 static void op_pci_write(QTestState
*s
, const unsigned char * data
, size_t len
)
523 enum Sizes
{Byte
, Word
, Long
, end_sizes
};
530 if (len
< sizeof(a
) || fuzzable_pci_devices
->len
== 0 || pci_disabled
) {
533 memcpy(&a
, data
, sizeof(a
));
534 PCIDevice
*dev
= g_ptr_array_index(fuzzable_pci_devices
,
535 a
.base
% fuzzable_pci_devices
->len
);
536 int devfn
= dev
->devfn
;
537 qtest_outl(s
, PCI_HOST_BRIDGE_CFG
, (1U << 31) | (devfn
<< 8) | a
.offset
);
538 switch (a
.size
%= end_sizes
) {
540 qtest_outb(s
, PCI_HOST_BRIDGE_DATA
, a
.value
& 0xFF);
543 qtest_outw(s
, PCI_HOST_BRIDGE_DATA
, a
.value
& 0xFFFF);
546 qtest_outl(s
, PCI_HOST_BRIDGE_DATA
, a
.value
& 0xFFFFFFFF);
551 static void op_add_dma_pattern(QTestState
*s
,
552 const unsigned char *data
, size_t len
)
556 * index and stride can be used to increment the index-th byte of the
557 * pattern by the value stride, for each loop of the pattern.
563 if (len
< sizeof(a
) + 1) {
566 memcpy(&a
, data
, sizeof(a
));
567 pattern p
= {a
.index
, a
.stride
, len
- sizeof(a
), data
+ sizeof(a
)};
568 p
.index
= a
.index
% p
.len
;
569 g_array_append_val(dma_patterns
, p
);
573 static void op_clear_dma_patterns(QTestState
*s
,
574 const unsigned char *data
, size_t len
)
576 g_array_set_size(dma_patterns
, 0);
577 dma_pattern_index
= 0;
580 static void op_clock_step(QTestState
*s
, const unsigned char *data
, size_t len
)
582 qtest_clock_step_next(s
);
585 static void op_disable_pci(QTestState
*s
, const unsigned char *data
, size_t len
)
590 static void handle_timeout(int sig
)
592 if (qtest_log_enabled
) {
593 fprintf(stderr
, "[Timeout]\n");
598 * If there is a crash, libfuzzer/ASAN forks a child to run an
599 * "llvm-symbolizer" process for printing out a pretty stacktrace. It
600 * communicates with this child using a pipe. If we timeout+Exit, while
601 * libfuzzer is still communicating with the llvm-symbolizer child, we will
602 * be left with an orphan llvm-symbolizer process. Sometimes, this appears
603 * to lead to a deadlock in the forkserver. Use waitpid to check if there
604 * are any waitable children. If so, exit out of the signal-handler, and
605 * let libfuzzer finish communicating with the child, and exit, on its own.
607 if (waitpid(-1, NULL
, WNOHANG
) == 0) {
615 * Here, we interpret random bytes from the fuzzer, as a sequence of commands.
616 * Some commands can be variable-width, so we use a separator, SEPARATOR, to
617 * specify the boundaries between commands. SEPARATOR is used to separate
618 * "operations" in the fuzz input. Why use a separator, instead of just using
619 * the operations' length to identify operation boundaries?
620 * 1. This is a simple way to support variable-length operations
621 * 2. This adds "stability" to the input.
622 * For example take the input "AbBcgDefg", where there is no separator and
623 * Opcodes are capitalized.
624 * Simply, by removing the first byte, we end up with a very different
627 * By adding a separator, we avoid this problem:
628 * Ab SEP Bcg SEP Defg -> B SEP Bcg SEP Defg
629 * Since B uses two additional bytes as operands, the first "B" will be
630 * ignored. The fuzzer actively tries to reduce inputs, so such unused
631 * bytes are likely to be pruned, eventually.
633 * SEPARATOR is trivial for the fuzzer to discover when using ASan. Optionally,
634 * SEPARATOR can be manually specified as a dictionary value (see libfuzzer's
635 * -dict), though this should not be necessary.
637 * As a result, the stream of bytes is converted into a sequence of commands.
638 * In a simplified example where SEPARATOR is 0xFF:
639 * 00 01 02 FF 03 04 05 06 FF 01 FF ...
640 * becomes this sequence of commands:
641 * 00 01 02 -> op00 (0102) -> in (0102, 2)
642 * 03 04 05 06 -> op03 (040506) -> write (040506, 3)
643 * 01 -> op01 (-,0) -> out (-,0)
646 * Note here that it is the job of the individual opcode functions to check
647 * that enough data was provided. I.e. in the last command out (,0), out needs
648 * to check that there is not enough data provided to select an address/value
651 static void generic_fuzz(QTestState
*s
, const unsigned char *Data
, size_t Size
)
653 void (*ops
[]) (QTestState
*s
, const unsigned char* , size_t) = {
657 [OP_WRITE
] = op_write
,
658 [OP_PCI_READ
] = op_pci_read
,
659 [OP_PCI_WRITE
] = op_pci_write
,
660 [OP_DISABLE_PCI
] = op_disable_pci
,
661 [OP_ADD_DMA_PATTERN
] = op_add_dma_pattern
,
662 [OP_CLEAR_DMA_PATTERNS
] = op_clear_dma_patterns
,
663 [OP_CLOCK_STEP
] = op_clock_step
,
665 const unsigned char *cmd
= Data
;
666 const unsigned char *nextcmd
;
671 struct sigaction sact
;
672 struct itimerval timer
;
675 * Sometimes the fuzzer will find inputs that take quite a long time to
676 * process. Often times, these inputs do not result in new coverage.
677 * Even if these inputs might be interesting, they can slow down the
678 * fuzzer, overall. Set a timeout for each command to avoid hurting
679 * performance, too much
683 sigemptyset(&sact
.sa_mask
);
684 sact
.sa_flags
= SA_NODEFER
;
685 sact
.sa_handler
= handle_timeout
;
686 sigaction(SIGALRM
, &sact
, NULL
);
689 sigaddset(&set
, SIGALRM
);
690 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
692 memset(&timer
, 0, sizeof(timer
));
693 timer
.it_value
.tv_sec
= timeout
/ USEC_IN_SEC
;
694 timer
.it_value
.tv_usec
= timeout
% USEC_IN_SEC
;
697 op_clear_dma_patterns(s
, NULL
, 0);
698 pci_disabled
= false;
700 while (cmd
&& Size
) {
701 /* Reset the timeout, each time we run a new command */
703 setitimer(ITIMER_REAL
, &timer
, NULL
);
706 /* Get the length until the next command or end of input */
707 nextcmd
= memmem(cmd
, Size
, SEPARATOR
, strlen(SEPARATOR
));
708 cmd_len
= nextcmd
? nextcmd
- cmd
: Size
;
711 /* Interpret the first byte of the command as an opcode */
712 op
= *cmd
% (sizeof(ops
) / sizeof((ops
)[0]));
713 ops
[op
](s
, cmd
+ 1, cmd_len
- 1);
715 /* Run the main loop */
718 /* Advance to the next command */
719 cmd
= nextcmd
? nextcmd
+ sizeof(SEPARATOR
) - 1 : nextcmd
;
720 Size
= Size
- (cmd_len
+ sizeof(SEPARATOR
) - 1);
721 g_array_set_size(dma_regions
, 0);
730 static void usage(void)
732 printf("Please specify the following environment variables:\n");
733 printf("QEMU_FUZZ_ARGS= the command line arguments passed to qemu\n");
734 printf("QEMU_FUZZ_OBJECTS= "
735 "a space separated list of QOM type names for objects to fuzz\n");
736 printf("Optionally: QEMU_AVOID_DOUBLE_FETCH= "
737 "Try to avoid racy DMA double fetch bugs? %d by default\n",
738 avoid_double_fetches
);
739 printf("Optionally: QEMU_FUZZ_TIMEOUT= Specify a custom timeout (us). "
740 "0 to disable. %d by default\n", timeout
);
744 static int locate_fuzz_memory_regions(Object
*child
, void *opaque
)
747 if (object_dynamic_cast(child
, TYPE_MEMORY_REGION
)) {
748 mr
= MEMORY_REGION(child
);
749 if ((memory_region_is_ram(mr
) ||
750 memory_region_is_ram_device(mr
) ||
751 memory_region_is_rom(mr
)) == false) {
753 * We don't want duplicate pointers to the same MemoryRegion, so
754 * try to remove copies of the pointer, before adding it.
756 g_hash_table_insert(fuzzable_memoryregions
, mr
, (gpointer
)true);
762 static int locate_fuzz_objects(Object
*child
, void *opaque
)
766 char *pattern
= opaque
;
768 type_name
= g_string_new(object_get_typename(child
));
769 g_string_ascii_down(type_name
);
770 if (g_pattern_match_simple(pattern
, type_name
->str
)) {
771 /* Find and save ptrs to any child MemoryRegions */
772 object_child_foreach_recursive(child
, locate_fuzz_memory_regions
, NULL
);
775 * We matched an object. If its a PCI device, store a pointer to it so
776 * we can map BARs and fuzz its config space.
778 if (object_dynamic_cast(OBJECT(child
), TYPE_PCI_DEVICE
)) {
780 * Don't want duplicate pointers to the same PCIDevice, so remove
781 * copies of the pointer, before adding it.
783 g_ptr_array_remove_fast(fuzzable_pci_devices
, PCI_DEVICE(child
));
784 g_ptr_array_add(fuzzable_pci_devices
, PCI_DEVICE(child
));
786 } else if (object_dynamic_cast(OBJECT(child
), TYPE_MEMORY_REGION
)) {
787 path_name
= g_string_new(object_get_canonical_path_component(child
));
788 g_string_ascii_down(path_name
);
789 if (g_pattern_match_simple(pattern
, path_name
->str
)) {
791 mr
= MEMORY_REGION(child
);
792 if ((memory_region_is_ram(mr
) ||
793 memory_region_is_ram_device(mr
) ||
794 memory_region_is_rom(mr
)) == false) {
795 g_hash_table_insert(fuzzable_memoryregions
, mr
, (gpointer
)true);
798 g_string_free(path_name
, true);
800 g_string_free(type_name
, true);
805 static void pci_enum(gpointer pcidev
, gpointer bus
)
807 PCIDevice
*dev
= pcidev
;
811 qdev
= qpci_device_find(bus
, dev
->devfn
);
812 g_assert(qdev
!= NULL
);
813 for (i
= 0; i
< 6; i
++) {
814 if (dev
->io_regions
[i
].size
) {
815 qpci_iomap(qdev
, i
, NULL
);
818 qpci_device_enable(qdev
);
822 static void generic_pre_fuzz(QTestState
*s
)
828 GString
*name_pattern
;
830 if (!getenv("QEMU_FUZZ_OBJECTS")) {
833 if (getenv("QTEST_LOG")) {
834 qtest_log_enabled
= 1;
836 if (getenv("QEMU_AVOID_DOUBLE_FETCH")) {
837 avoid_double_fetches
= 1;
839 if (getenv("QEMU_FUZZ_TIMEOUT")) {
840 timeout
= g_ascii_strtoll(getenv("QEMU_FUZZ_TIMEOUT"), NULL
, 0);
845 * Create a special device that we can use to back DMA buffers at very
846 * high memory addresses
848 sparse_mem_mr
= sparse_mem_init(0, UINT64_MAX
);
850 dma_regions
= g_array_new(false, false, sizeof(address_range
));
851 dma_patterns
= g_array_new(false, false, sizeof(pattern
));
853 fuzzable_memoryregions
= g_hash_table_new(NULL
, NULL
);
854 fuzzable_pci_devices
= g_ptr_array_new();
856 result
= g_strsplit(getenv("QEMU_FUZZ_OBJECTS"), " ", -1);
857 for (int i
= 0; result
[i
] != NULL
; i
++) {
858 name_pattern
= g_string_new(result
[i
]);
860 * Make the pattern lowercase. We do the same for all the MemoryRegion
861 * and Type names so the configs are case-insensitive.
863 g_string_ascii_down(name_pattern
);
864 printf("Matching objects by name %s\n", result
[i
]);
865 object_child_foreach_recursive(qdev_get_machine(),
868 g_string_free(name_pattern
, true);
871 printf("This process will try to fuzz the following MemoryRegions:\n");
873 g_hash_table_iter_init(&iter
, fuzzable_memoryregions
);
874 while (g_hash_table_iter_next(&iter
, (gpointer
)&mr
, NULL
)) {
875 printf(" * %s (size 0x%" PRIx64
")\n",
876 object_get_canonical_path_component(&(mr
->parent_obj
)),
877 memory_region_size(mr
));
880 if (!g_hash_table_size(fuzzable_memoryregions
)) {
881 printf("No fuzzable memory regions found...\n");
885 pcibus
= qpci_new_pc(s
, NULL
);
886 g_ptr_array_foreach(fuzzable_pci_devices
, pci_enum
, pcibus
);
887 qpci_free_pc(pcibus
);
893 * When libfuzzer gives us two inputs to combine, return a new input with the
894 * following structure:
898 * Clear out the DMA Patterns
900 * Disable the pci_read/write instructions
904 * The idea is to collate the core behaviors of the two inputs.
906 * Input 1: maps a device's BARs, sets up three DMA patterns, and triggers
907 * device functionality A
908 * Input 2: maps a device's BARs, sets up one DMA pattern, and triggers device
911 * This function attempts to produce an input that:
912 * Ouptut: maps a device's BARs, set up three DMA patterns, triggers
913 * functionality A device, replaces the DMA patterns with a single
914 * patten, and triggers device functionality B.
916 static size_t generic_fuzz_crossover(const uint8_t *data1
, size_t size1
, const
917 uint8_t *data2
, size_t size2
, uint8_t *out
,
918 size_t max_out_size
, unsigned int seed
)
920 size_t copy_len
= 0, size
= 0;
922 /* Check that we have enough space for data1 and at least part of data2 */
923 if (max_out_size
<= size1
+ strlen(SEPARATOR
) * 3 + 2) {
927 /* Copy_Len in the first input */
929 memcpy(out
+ size
, data1
, copy_len
);
931 max_out_size
-= copy_len
;
933 /* Append a separator */
934 copy_len
= strlen(SEPARATOR
);
935 memcpy(out
+ size
, SEPARATOR
, copy_len
);
937 max_out_size
-= copy_len
;
939 /* Clear out the DMA Patterns */
942 out
[size
] = OP_CLEAR_DMA_PATTERNS
;
945 max_out_size
-= copy_len
;
947 /* Append a separator */
948 copy_len
= strlen(SEPARATOR
);
949 memcpy(out
+ size
, SEPARATOR
, copy_len
);
951 max_out_size
-= copy_len
;
953 /* Disable PCI ops. Assume data1 took care of setting up PCI */
956 out
[size
] = OP_DISABLE_PCI
;
959 max_out_size
-= copy_len
;
961 /* Append a separator */
962 copy_len
= strlen(SEPARATOR
);
963 memcpy(out
+ size
, SEPARATOR
, copy_len
);
965 max_out_size
-= copy_len
;
967 /* Copy_Len over the second input */
968 copy_len
= MIN(size2
, max_out_size
);
969 memcpy(out
+ size
, data2
, copy_len
);
971 max_out_size
-= copy_len
;
977 static GString
*generic_fuzz_cmdline(FuzzTarget
*t
)
979 GString
*cmd_line
= g_string_new(TARGET_NAME
);
980 if (!getenv("QEMU_FUZZ_ARGS")) {
983 g_string_append_printf(cmd_line
, " -display none \
984 -machine accel=qtest, \
985 -m 512M %s ", getenv("QEMU_FUZZ_ARGS"));
989 static GString
*generic_fuzz_predefined_config_cmdline(FuzzTarget
*t
)
992 const generic_fuzz_config
*config
;
996 setenv("QEMU_AVOID_DOUBLE_FETCH", "1", 1);
997 if (config
->argfunc
) {
998 args
= config
->argfunc();
999 setenv("QEMU_FUZZ_ARGS", args
, 1);
1002 g_assert_nonnull(config
->args
);
1003 setenv("QEMU_FUZZ_ARGS", config
->args
, 1);
1005 setenv("QEMU_FUZZ_OBJECTS", config
->objects
, 1);
1006 return generic_fuzz_cmdline(t
);
1009 static void register_generic_fuzz_targets(void)
1011 fuzz_add_target(&(FuzzTarget
){
1012 .name
= "generic-fuzz",
1013 .description
= "Fuzz based on any qemu command-line args. ",
1014 .get_init_cmdline
= generic_fuzz_cmdline
,
1015 .pre_fuzz
= generic_pre_fuzz
,
1016 .fuzz
= generic_fuzz
,
1017 .crossover
= generic_fuzz_crossover
1021 const generic_fuzz_config
*config
;
1024 i
< sizeof(predefined_configs
) / sizeof(generic_fuzz_config
);
1026 config
= predefined_configs
+ i
;
1027 name
= g_string_new("generic-fuzz");
1028 g_string_append_printf(name
, "-%s", config
->name
);
1029 fuzz_add_target(&(FuzzTarget
){
1031 .description
= "Predefined generic-fuzz config.",
1032 .get_init_cmdline
= generic_fuzz_predefined_config_cmdline
,
1033 .pre_fuzz
= generic_pre_fuzz
,
1034 .fuzz
= generic_fuzz
,
1035 .crossover
= generic_fuzz_crossover
,
1036 .opaque
= (void *)config
1041 fuzz_target_init(register_generic_fuzz_targets
);