arm64: kaslr: Reserve size of ARM64_MEMSTART_ALIGN in linear region
[linux-stable.git] / arch / arm64 / mm / init.c
blob9e6c822d458dd825c1cadfb8083f403a4292970d
1 /*
2 * Based on arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/errno.h>
23 #include <linux/swap.h>
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
26 #include <linux/cache.h>
27 #include <linux/mman.h>
28 #include <linux/nodemask.h>
29 #include <linux/initrd.h>
30 #include <linux/gfp.h>
31 #include <linux/memblock.h>
32 #include <linux/sort.h>
33 #include <linux/of.h>
34 #include <linux/of_fdt.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/dma-contiguous.h>
37 #include <linux/efi.h>
38 #include <linux/swiotlb.h>
39 #include <linux/vmalloc.h>
40 #include <linux/mm.h>
41 #include <linux/kexec.h>
42 #include <linux/crash_dump.h>
44 #include <asm/boot.h>
45 #include <asm/fixmap.h>
46 #include <asm/kasan.h>
47 #include <asm/kernel-pgtable.h>
48 #include <asm/memory.h>
49 #include <asm/numa.h>
50 #include <asm/sections.h>
51 #include <asm/setup.h>
52 #include <asm/sizes.h>
53 #include <asm/tlb.h>
54 #include <asm/alternative.h>
57 * We need to be able to catch inadvertent references to memstart_addr
58 * that occur (potentially in generic code) before arm64_memblock_init()
59 * executes, which assigns it its actual value. So use a default value
60 * that cannot be mistaken for a real physical address.
62 s64 memstart_addr __ro_after_init = -1;
63 phys_addr_t arm64_dma_phys_limit __ro_after_init;
65 #ifdef CONFIG_BLK_DEV_INITRD
66 static int __init early_initrd(char *p)
68 unsigned long start, size;
69 char *endp;
71 start = memparse(p, &endp);
72 if (*endp == ',') {
73 size = memparse(endp + 1, NULL);
75 initrd_start = start;
76 initrd_end = start + size;
78 return 0;
80 early_param("initrd", early_initrd);
81 #endif
83 #ifdef CONFIG_KEXEC_CORE
85 * reserve_crashkernel() - reserves memory for crash kernel
87 * This function reserves memory area given in "crashkernel=" kernel command
88 * line parameter. The memory reserved is used by dump capture kernel when
89 * primary kernel is crashing.
91 static void __init reserve_crashkernel(void)
93 unsigned long long crash_base, crash_size;
94 int ret;
96 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
97 &crash_size, &crash_base);
98 /* no crashkernel= or invalid value specified */
99 if (ret || !crash_size)
100 return;
102 crash_size = PAGE_ALIGN(crash_size);
104 if (crash_base == 0) {
105 /* Current arm64 boot protocol requires 2MB alignment */
106 crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
107 crash_size, SZ_2M);
108 if (crash_base == 0) {
109 pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
110 crash_size);
111 return;
113 } else {
114 /* User specifies base address explicitly. */
115 if (!memblock_is_region_memory(crash_base, crash_size)) {
116 pr_warn("cannot reserve crashkernel: region is not memory\n");
117 return;
120 if (memblock_is_region_reserved(crash_base, crash_size)) {
121 pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
122 return;
125 if (!IS_ALIGNED(crash_base, SZ_2M)) {
126 pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
127 return;
130 memblock_reserve(crash_base, crash_size);
132 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
133 crash_base, crash_base + crash_size, crash_size >> 20);
135 crashk_res.start = crash_base;
136 crashk_res.end = crash_base + crash_size - 1;
139 static void __init kexec_reserve_crashkres_pages(void)
141 #ifdef CONFIG_HIBERNATION
142 phys_addr_t addr;
143 struct page *page;
145 if (!crashk_res.end)
146 return;
149 * To reduce the size of hibernation image, all the pages are
150 * marked as Reserved initially.
152 for (addr = crashk_res.start; addr < (crashk_res.end + 1);
153 addr += PAGE_SIZE) {
154 page = phys_to_page(addr);
155 SetPageReserved(page);
157 #endif
159 #else
160 static void __init reserve_crashkernel(void)
164 static void __init kexec_reserve_crashkres_pages(void)
167 #endif /* CONFIG_KEXEC_CORE */
169 #ifdef CONFIG_CRASH_DUMP
170 static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
171 const char *uname, int depth, void *data)
173 const __be32 *reg;
174 int len;
176 if (depth != 1 || strcmp(uname, "chosen") != 0)
177 return 0;
179 reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
180 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
181 return 1;
183 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &reg);
184 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &reg);
186 return 1;
190 * reserve_elfcorehdr() - reserves memory for elf core header
192 * This function reserves the memory occupied by an elf core header
193 * described in the device tree. This region contains all the
194 * information about primary kernel's core image and is used by a dump
195 * capture kernel to access the system memory on primary kernel.
197 static void __init reserve_elfcorehdr(void)
199 of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
201 if (!elfcorehdr_size)
202 return;
204 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
205 pr_warn("elfcorehdr is overlapped\n");
206 return;
209 memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
211 pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
212 elfcorehdr_size >> 10, elfcorehdr_addr);
214 #else
215 static void __init reserve_elfcorehdr(void)
218 #endif /* CONFIG_CRASH_DUMP */
220 * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
221 * currently assumes that for memory starting above 4G, 32-bit devices will
222 * use a DMA offset.
224 static phys_addr_t __init max_zone_dma_phys(void)
226 phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
227 return min(offset + (1ULL << 32), memblock_end_of_DRAM());
230 #ifdef CONFIG_NUMA
232 static void __init zone_sizes_init(unsigned long min, unsigned long max)
234 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
236 if (IS_ENABLED(CONFIG_ZONE_DMA))
237 max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys());
238 max_zone_pfns[ZONE_NORMAL] = max;
240 free_area_init_nodes(max_zone_pfns);
243 #else
245 static void __init zone_sizes_init(unsigned long min, unsigned long max)
247 struct memblock_region *reg;
248 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
249 unsigned long max_dma = min;
251 memset(zone_size, 0, sizeof(zone_size));
253 /* 4GB maximum for 32-bit only capable devices */
254 #ifdef CONFIG_ZONE_DMA
255 max_dma = PFN_DOWN(arm64_dma_phys_limit);
256 zone_size[ZONE_DMA] = max_dma - min;
257 #endif
258 zone_size[ZONE_NORMAL] = max - max_dma;
260 memcpy(zhole_size, zone_size, sizeof(zhole_size));
262 for_each_memblock(memory, reg) {
263 unsigned long start = memblock_region_memory_base_pfn(reg);
264 unsigned long end = memblock_region_memory_end_pfn(reg);
266 if (start >= max)
267 continue;
269 #ifdef CONFIG_ZONE_DMA
270 if (start < max_dma) {
271 unsigned long dma_end = min(end, max_dma);
272 zhole_size[ZONE_DMA] -= dma_end - start;
274 #endif
275 if (end > max_dma) {
276 unsigned long normal_end = min(end, max);
277 unsigned long normal_start = max(start, max_dma);
278 zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
282 free_area_init_node(0, zone_size, min, zhole_size);
285 #endif /* CONFIG_NUMA */
287 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
288 int pfn_valid(unsigned long pfn)
290 phys_addr_t addr = pfn << PAGE_SHIFT;
292 if ((addr >> PAGE_SHIFT) != pfn)
293 return 0;
294 return memblock_is_map_memory(addr);
296 EXPORT_SYMBOL(pfn_valid);
297 #endif
299 #ifndef CONFIG_SPARSEMEM
300 static void __init arm64_memory_present(void)
303 #else
304 static void __init arm64_memory_present(void)
306 struct memblock_region *reg;
308 for_each_memblock(memory, reg) {
309 int nid = memblock_get_region_node(reg);
311 memory_present(nid, memblock_region_memory_base_pfn(reg),
312 memblock_region_memory_end_pfn(reg));
315 #endif
317 static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX;
320 * Limit the memory size that was specified via FDT.
322 static int __init early_mem(char *p)
324 if (!p)
325 return 1;
327 memory_limit = memparse(p, &p) & PAGE_MASK;
328 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
330 return 0;
332 early_param("mem", early_mem);
334 static int __init early_init_dt_scan_usablemem(unsigned long node,
335 const char *uname, int depth, void *data)
337 struct memblock_region *usablemem = data;
338 const __be32 *reg;
339 int len;
341 if (depth != 1 || strcmp(uname, "chosen") != 0)
342 return 0;
344 reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
345 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
346 return 1;
348 usablemem->base = dt_mem_next_cell(dt_root_addr_cells, &reg);
349 usablemem->size = dt_mem_next_cell(dt_root_size_cells, &reg);
351 return 1;
354 static void __init fdt_enforce_memory_region(void)
356 struct memblock_region reg = {
357 .size = 0,
360 of_scan_flat_dt(early_init_dt_scan_usablemem, &reg);
362 if (reg.size)
363 memblock_cap_memory_range(reg.base, reg.size);
366 void __init arm64_memblock_init(void)
368 const s64 linear_region_size = -(s64)PAGE_OFFSET;
370 /* Handle linux,usable-memory-range property */
371 fdt_enforce_memory_region();
374 * Ensure that the linear region takes up exactly half of the kernel
375 * virtual address space. This way, we can distinguish a linear address
376 * from a kernel/module/vmalloc address by testing a single bit.
378 BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
381 * Select a suitable value for the base of physical memory.
383 memstart_addr = round_down(memblock_start_of_DRAM(),
384 ARM64_MEMSTART_ALIGN);
387 * Remove the memory that we will not be able to cover with the
388 * linear mapping. Take care not to clip the kernel which may be
389 * high in memory.
391 memblock_remove(max_t(u64, memstart_addr + linear_region_size,
392 __pa_symbol(_end)), ULLONG_MAX);
393 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
394 /* ensure that memstart_addr remains sufficiently aligned */
395 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
396 ARM64_MEMSTART_ALIGN);
397 memblock_remove(0, memstart_addr);
401 * Apply the memory limit if it was set. Since the kernel may be loaded
402 * high up in memory, add back the kernel region that must be accessible
403 * via the linear mapping.
405 if (memory_limit != (phys_addr_t)ULLONG_MAX) {
406 memblock_mem_limit_remove_map(memory_limit);
407 memblock_add(__pa_symbol(_text), (u64)(_end - _text));
410 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
412 * Add back the memory we just removed if it results in the
413 * initrd to become inaccessible via the linear mapping.
414 * Otherwise, this is a no-op
416 u64 base = initrd_start & PAGE_MASK;
417 u64 size = PAGE_ALIGN(initrd_end) - base;
420 * We can only add back the initrd memory if we don't end up
421 * with more memory than we can address via the linear mapping.
422 * It is up to the bootloader to position the kernel and the
423 * initrd reasonably close to each other (i.e., within 32 GB of
424 * each other) so that all granule/#levels combinations can
425 * always access both.
427 if (WARN(base < memblock_start_of_DRAM() ||
428 base + size > memblock_start_of_DRAM() +
429 linear_region_size,
430 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
431 initrd_start = 0;
432 } else {
433 memblock_remove(base, size); /* clear MEMBLOCK_ flags */
434 memblock_add(base, size);
435 memblock_reserve(base, size);
439 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
440 extern u16 memstart_offset_seed;
441 u64 range = linear_region_size -
442 (memblock_end_of_DRAM() - memblock_start_of_DRAM());
445 * If the size of the linear region exceeds, by a sufficient
446 * margin, the size of the region that the available physical
447 * memory spans, randomize the linear region as well.
449 if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
450 range /= ARM64_MEMSTART_ALIGN;
451 memstart_addr -= ARM64_MEMSTART_ALIGN *
452 ((range * memstart_offset_seed) >> 16);
457 * Register the kernel text, kernel data, initrd, and initial
458 * pagetables with memblock.
460 memblock_reserve(__pa_symbol(_text), _end - _text);
461 #ifdef CONFIG_BLK_DEV_INITRD
462 if (initrd_start) {
463 memblock_reserve(initrd_start, initrd_end - initrd_start);
465 /* the generic initrd code expects virtual addresses */
466 initrd_start = __phys_to_virt(initrd_start);
467 initrd_end = __phys_to_virt(initrd_end);
469 #endif
471 early_init_fdt_scan_reserved_mem();
473 /* 4GB maximum for 32-bit only capable devices */
474 if (IS_ENABLED(CONFIG_ZONE_DMA))
475 arm64_dma_phys_limit = max_zone_dma_phys();
476 else
477 arm64_dma_phys_limit = PHYS_MASK + 1;
479 reserve_crashkernel();
481 reserve_elfcorehdr();
483 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
485 dma_contiguous_reserve(arm64_dma_phys_limit);
487 memblock_allow_resize();
490 void __init bootmem_init(void)
492 unsigned long min, max;
494 min = PFN_UP(memblock_start_of_DRAM());
495 max = PFN_DOWN(memblock_end_of_DRAM());
497 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
499 max_pfn = max_low_pfn = max;
501 arm64_numa_init();
503 * Sparsemem tries to allocate bootmem in memory_present(), so must be
504 * done after the fixed reservations.
506 arm64_memory_present();
508 sparse_init();
509 zone_sizes_init(min, max);
511 memblock_dump_all();
514 #ifndef CONFIG_SPARSEMEM_VMEMMAP
515 static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
517 struct page *start_pg, *end_pg;
518 unsigned long pg, pgend;
521 * Convert start_pfn/end_pfn to a struct page pointer.
523 start_pg = pfn_to_page(start_pfn - 1) + 1;
524 end_pg = pfn_to_page(end_pfn - 1) + 1;
527 * Convert to physical addresses, and round start upwards and end
528 * downwards.
530 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
531 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
534 * If there are free pages between these, free the section of the
535 * memmap array.
537 if (pg < pgend)
538 free_bootmem(pg, pgend - pg);
542 * The mem_map array can get very big. Free the unused area of the memory map.
544 static void __init free_unused_memmap(void)
546 unsigned long start, prev_end = 0;
547 struct memblock_region *reg;
549 for_each_memblock(memory, reg) {
550 start = __phys_to_pfn(reg->base);
552 #ifdef CONFIG_SPARSEMEM
554 * Take care not to free memmap entries that don't exist due
555 * to SPARSEMEM sections which aren't present.
557 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
558 #endif
560 * If we had a previous bank, and there is a space between the
561 * current bank and the previous, free it.
563 if (prev_end && prev_end < start)
564 free_memmap(prev_end, start);
567 * Align up here since the VM subsystem insists that the
568 * memmap entries are valid from the bank end aligned to
569 * MAX_ORDER_NR_PAGES.
571 prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
572 MAX_ORDER_NR_PAGES);
575 #ifdef CONFIG_SPARSEMEM
576 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
577 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
578 #endif
580 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
583 * mem_init() marks the free areas in the mem_map and tells us how much memory
584 * is free. This is done after various parts of the system have claimed their
585 * memory after the kernel image.
587 void __init mem_init(void)
589 if (swiotlb_force == SWIOTLB_FORCE ||
590 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
591 swiotlb_init(1);
592 else
593 swiotlb_force = SWIOTLB_NO_FORCE;
595 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
597 #ifndef CONFIG_SPARSEMEM_VMEMMAP
598 free_unused_memmap();
599 #endif
600 /* this will put all unused low memory onto the freelists */
601 free_all_bootmem();
603 kexec_reserve_crashkres_pages();
605 mem_init_print_info(NULL);
607 #define MLK(b, t) b, t, ((t) - (b)) >> 10
608 #define MLM(b, t) b, t, ((t) - (b)) >> 20
609 #define MLG(b, t) b, t, ((t) - (b)) >> 30
610 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
612 pr_notice("Virtual kernel memory layout:\n");
613 #ifdef CONFIG_KASAN
614 pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
615 MLG(KASAN_SHADOW_START, KASAN_SHADOW_END));
616 #endif
617 pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
618 MLM(MODULES_VADDR, MODULES_END));
619 pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
620 MLG(VMALLOC_START, VMALLOC_END));
621 pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
622 MLK_ROUNDUP(_text, _etext));
623 pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
624 MLK_ROUNDUP(__start_rodata, __init_begin));
625 pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
626 MLK_ROUNDUP(__init_begin, __init_end));
627 pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
628 MLK_ROUNDUP(_sdata, _edata));
629 pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n",
630 MLK_ROUNDUP(__bss_start, __bss_stop));
631 pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
632 MLK(FIXADDR_START, FIXADDR_TOP));
633 pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
634 MLM(PCI_IO_START, PCI_IO_END));
635 #ifdef CONFIG_SPARSEMEM_VMEMMAP
636 pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n",
637 MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE));
638 pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
639 MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
640 (unsigned long)virt_to_page(high_memory)));
641 #endif
642 pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
643 MLM(__phys_to_virt(memblock_start_of_DRAM()),
644 (unsigned long)high_memory));
646 #undef MLK
647 #undef MLM
648 #undef MLK_ROUNDUP
651 * Check boundaries twice: Some fundamental inconsistencies can be
652 * detected at build time already.
654 #ifdef CONFIG_COMPAT
655 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
656 #endif
658 #ifdef CONFIG_SPARSEMEM_VMEMMAP
660 * Make sure we chose the upper bound of sizeof(struct page)
661 * correctly when sizing the VMEMMAP array.
663 BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
664 #endif
666 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
667 extern int sysctl_overcommit_memory;
669 * On a machine this small we won't get anywhere without
670 * overcommit, so turn it on by default.
672 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
676 void free_initmem(void)
678 free_reserved_area(lm_alias(__init_begin),
679 lm_alias(__init_end),
680 0, "unused kernel");
682 * Unmap the __init region but leave the VM area in place. This
683 * prevents the region from being reused for kernel modules, which
684 * is not supported by kallsyms.
686 unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
689 #ifdef CONFIG_BLK_DEV_INITRD
691 static int keep_initrd __initdata;
693 void __init free_initrd_mem(unsigned long start, unsigned long end)
695 if (!keep_initrd)
696 free_reserved_area((void *)start, (void *)end, 0, "initrd");
699 static int __init keepinitrd_setup(char *__unused)
701 keep_initrd = 1;
702 return 1;
705 __setup("keepinitrd", keepinitrd_setup);
706 #endif
709 * Dump out memory limit information on panic.
711 static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
713 if (memory_limit != (phys_addr_t)ULLONG_MAX) {
714 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
715 } else {
716 pr_emerg("Memory Limit: none\n");
718 return 0;
721 static struct notifier_block mem_limit_notifier = {
722 .notifier_call = dump_mem_limit,
725 static int __init register_mem_limit_dumper(void)
727 atomic_notifier_chain_register(&panic_notifier_list,
728 &mem_limit_notifier);
729 return 0;
731 __initcall(register_mem_limit_dumper);