Merge branch 'misc' into release
[linux-2.6/mini2440.git] / arch / x86 / mm / ioremap.c
blobd4c4307ff3e049f1454c3629fb2989f63395ae68
1 /*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
25 #ifdef CONFIG_X86_64
27 static inline int phys_addr_valid(unsigned long addr)
29 return addr < (1UL << boot_cpu_data.x86_phys_bits);
32 unsigned long __phys_addr(unsigned long x)
34 if (x >= __START_KERNEL_map) {
35 x -= __START_KERNEL_map;
36 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
37 x += phys_base;
38 } else {
39 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
40 x -= PAGE_OFFSET;
41 VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
42 !phys_addr_valid(x));
44 return x;
46 EXPORT_SYMBOL(__phys_addr);
48 bool __virt_addr_valid(unsigned long x)
50 if (x >= __START_KERNEL_map) {
51 x -= __START_KERNEL_map;
52 if (x >= KERNEL_IMAGE_SIZE)
53 return false;
54 x += phys_base;
55 } else {
56 if (x < PAGE_OFFSET)
57 return false;
58 x -= PAGE_OFFSET;
59 if (system_state == SYSTEM_BOOTING ?
60 x > MAXMEM : !phys_addr_valid(x)) {
61 return false;
65 return pfn_valid(x >> PAGE_SHIFT);
67 EXPORT_SYMBOL(__virt_addr_valid);
69 #else
71 static inline int phys_addr_valid(unsigned long addr)
73 return 1;
76 #ifdef CONFIG_DEBUG_VIRTUAL
77 unsigned long __phys_addr(unsigned long x)
79 /* VMALLOC_* aren't constants; not available at the boot time */
80 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
81 VIRTUAL_BUG_ON(system_state != SYSTEM_BOOTING &&
82 is_vmalloc_addr((void *) x));
83 return x - PAGE_OFFSET;
85 EXPORT_SYMBOL(__phys_addr);
86 #endif
88 bool __virt_addr_valid(unsigned long x)
90 if (x < PAGE_OFFSET)
91 return false;
92 if (system_state != SYSTEM_BOOTING && is_vmalloc_addr((void *) x))
93 return false;
94 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
96 EXPORT_SYMBOL(__virt_addr_valid);
98 #endif
100 int page_is_ram(unsigned long pagenr)
102 resource_size_t addr, end;
103 int i;
106 * A special case is the first 4Kb of memory;
107 * This is a BIOS owned area, not kernel ram, but generally
108 * not listed as such in the E820 table.
110 if (pagenr == 0)
111 return 0;
114 * Second special case: Some BIOSen report the PC BIOS
115 * area (640->1Mb) as ram even though it is not.
117 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
118 pagenr < (BIOS_END >> PAGE_SHIFT))
119 return 0;
121 for (i = 0; i < e820.nr_map; i++) {
123 * Not usable memory:
125 if (e820.map[i].type != E820_RAM)
126 continue;
127 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
128 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
131 if ((pagenr >= addr) && (pagenr < end))
132 return 1;
134 return 0;
137 int pagerange_is_ram(unsigned long start, unsigned long end)
139 int ram_page = 0, not_rampage = 0;
140 unsigned long page_nr;
142 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
143 ++page_nr) {
144 if (page_is_ram(page_nr))
145 ram_page = 1;
146 else
147 not_rampage = 1;
149 if (ram_page == not_rampage)
150 return -1;
153 return ram_page;
157 * Fix up the linear direct mapping of the kernel to avoid cache attribute
158 * conflicts.
160 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
161 unsigned long prot_val)
163 unsigned long nrpages = size >> PAGE_SHIFT;
164 int err;
166 switch (prot_val) {
167 case _PAGE_CACHE_UC:
168 default:
169 err = _set_memory_uc(vaddr, nrpages);
170 break;
171 case _PAGE_CACHE_WC:
172 err = _set_memory_wc(vaddr, nrpages);
173 break;
174 case _PAGE_CACHE_WB:
175 err = _set_memory_wb(vaddr, nrpages);
176 break;
179 return err;
183 * Remap an arbitrary physical address space into the kernel virtual
184 * address space. Needed when the kernel wants to access high addresses
185 * directly.
187 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
188 * have to convert them into an offset in a page-aligned mapping, but the
189 * caller shouldn't need to know that small detail.
191 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
192 unsigned long size, unsigned long prot_val, void *caller)
194 unsigned long pfn, offset, vaddr;
195 resource_size_t last_addr;
196 const resource_size_t unaligned_phys_addr = phys_addr;
197 const unsigned long unaligned_size = size;
198 struct vm_struct *area;
199 unsigned long new_prot_val;
200 pgprot_t prot;
201 int retval;
202 void __iomem *ret_addr;
204 /* Don't allow wraparound or zero size */
205 last_addr = phys_addr + size - 1;
206 if (!size || last_addr < phys_addr)
207 return NULL;
209 if (!phys_addr_valid(phys_addr)) {
210 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
211 (unsigned long long)phys_addr);
212 WARN_ON_ONCE(1);
213 return NULL;
217 * Don't remap the low PCI/ISA area, it's always mapped..
219 if (is_ISA_range(phys_addr, last_addr))
220 return (__force void __iomem *)phys_to_virt(phys_addr);
223 * Check if the request spans more than any BAR in the iomem resource
224 * tree.
226 WARN_ON(iomem_map_sanity_check(phys_addr, size));
229 * Don't allow anybody to remap normal RAM that we're using..
231 for (pfn = phys_addr >> PAGE_SHIFT;
232 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
233 pfn++) {
235 int is_ram = page_is_ram(pfn);
237 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
238 return NULL;
239 WARN_ON_ONCE(is_ram);
243 * Mappings have to be page-aligned
245 offset = phys_addr & ~PAGE_MASK;
246 phys_addr &= PAGE_MASK;
247 size = PAGE_ALIGN(last_addr+1) - phys_addr;
249 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
250 prot_val, &new_prot_val);
251 if (retval) {
252 pr_debug("Warning: reserve_memtype returned %d\n", retval);
253 return NULL;
256 if (prot_val != new_prot_val) {
258 * Do not fallback to certain memory types with certain
259 * requested type:
260 * - request is uc-, return cannot be write-back
261 * - request is uc-, return cannot be write-combine
262 * - request is write-combine, return cannot be write-back
264 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
265 (new_prot_val == _PAGE_CACHE_WB ||
266 new_prot_val == _PAGE_CACHE_WC)) ||
267 (prot_val == _PAGE_CACHE_WC &&
268 new_prot_val == _PAGE_CACHE_WB)) {
269 pr_debug(
270 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
271 (unsigned long long)phys_addr,
272 (unsigned long long)(phys_addr + size),
273 prot_val, new_prot_val);
274 free_memtype(phys_addr, phys_addr + size);
275 return NULL;
277 prot_val = new_prot_val;
280 switch (prot_val) {
281 case _PAGE_CACHE_UC:
282 default:
283 prot = PAGE_KERNEL_IO_NOCACHE;
284 break;
285 case _PAGE_CACHE_UC_MINUS:
286 prot = PAGE_KERNEL_IO_UC_MINUS;
287 break;
288 case _PAGE_CACHE_WC:
289 prot = PAGE_KERNEL_IO_WC;
290 break;
291 case _PAGE_CACHE_WB:
292 prot = PAGE_KERNEL_IO;
293 break;
297 * Ok, go for it..
299 area = get_vm_area_caller(size, VM_IOREMAP, caller);
300 if (!area)
301 return NULL;
302 area->phys_addr = phys_addr;
303 vaddr = (unsigned long) area->addr;
304 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
305 free_memtype(phys_addr, phys_addr + size);
306 free_vm_area(area);
307 return NULL;
310 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
311 free_memtype(phys_addr, phys_addr + size);
312 vunmap(area->addr);
313 return NULL;
316 ret_addr = (void __iomem *) (vaddr + offset);
317 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
319 return ret_addr;
323 * ioremap_nocache - map bus memory into CPU space
324 * @offset: bus address of the memory
325 * @size: size of the resource to map
327 * ioremap_nocache performs a platform specific sequence of operations to
328 * make bus memory CPU accessible via the readb/readw/readl/writeb/
329 * writew/writel functions and the other mmio helpers. The returned
330 * address is not guaranteed to be usable directly as a virtual
331 * address.
333 * This version of ioremap ensures that the memory is marked uncachable
334 * on the CPU as well as honouring existing caching rules from things like
335 * the PCI bus. Note that there are other caches and buffers on many
336 * busses. In particular driver authors should read up on PCI writes
338 * It's useful if some control registers are in such an area and
339 * write combining or read caching is not desirable:
341 * Must be freed with iounmap.
343 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
346 * Ideally, this should be:
347 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
349 * Till we fix all X drivers to use ioremap_wc(), we will use
350 * UC MINUS.
352 unsigned long val = _PAGE_CACHE_UC_MINUS;
354 return __ioremap_caller(phys_addr, size, val,
355 __builtin_return_address(0));
357 EXPORT_SYMBOL(ioremap_nocache);
360 * ioremap_wc - map memory into CPU space write combined
361 * @offset: bus address of the memory
362 * @size: size of the resource to map
364 * This version of ioremap ensures that the memory is marked write combining.
365 * Write combining allows faster writes to some hardware devices.
367 * Must be freed with iounmap.
369 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
371 if (pat_enabled)
372 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
373 __builtin_return_address(0));
374 else
375 return ioremap_nocache(phys_addr, size);
377 EXPORT_SYMBOL(ioremap_wc);
379 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
381 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
382 __builtin_return_address(0));
384 EXPORT_SYMBOL(ioremap_cache);
386 static void __iomem *ioremap_default(resource_size_t phys_addr,
387 unsigned long size)
389 unsigned long flags;
390 void __iomem *ret;
391 int err;
394 * - WB for WB-able memory and no other conflicting mappings
395 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
396 * - Inherit from confliting mappings otherwise
398 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
399 if (err < 0)
400 return NULL;
402 ret = __ioremap_caller(phys_addr, size, flags,
403 __builtin_return_address(0));
405 free_memtype(phys_addr, phys_addr + size);
406 return ret;
409 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
410 unsigned long prot_val)
412 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
413 __builtin_return_address(0));
415 EXPORT_SYMBOL(ioremap_prot);
418 * iounmap - Free a IO remapping
419 * @addr: virtual address from ioremap_*
421 * Caller must ensure there is only one unmapping for the same pointer.
423 void iounmap(volatile void __iomem *addr)
425 struct vm_struct *p, *o;
427 if ((void __force *)addr <= high_memory)
428 return;
431 * __ioremap special-cases the PCI/ISA range by not instantiating a
432 * vm_area and by simply returning an address into the kernel mapping
433 * of ISA space. So handle that here.
435 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
436 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
437 return;
439 addr = (volatile void __iomem *)
440 (PAGE_MASK & (unsigned long __force)addr);
442 mmiotrace_iounmap(addr);
444 /* Use the vm area unlocked, assuming the caller
445 ensures there isn't another iounmap for the same address
446 in parallel. Reuse of the virtual address is prevented by
447 leaving it in the global lists until we're done with it.
448 cpa takes care of the direct mappings. */
449 read_lock(&vmlist_lock);
450 for (p = vmlist; p; p = p->next) {
451 if (p->addr == (void __force *)addr)
452 break;
454 read_unlock(&vmlist_lock);
456 if (!p) {
457 printk(KERN_ERR "iounmap: bad address %p\n", addr);
458 dump_stack();
459 return;
462 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
464 /* Finally remove it */
465 o = remove_vm_area((void __force *)addr);
466 BUG_ON(p != o || o == NULL);
467 kfree(p);
469 EXPORT_SYMBOL(iounmap);
472 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
473 * access
475 void *xlate_dev_mem_ptr(unsigned long phys)
477 void *addr;
478 unsigned long start = phys & PAGE_MASK;
480 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
481 if (page_is_ram(start >> PAGE_SHIFT))
482 return __va(phys);
484 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
485 if (addr)
486 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
488 return addr;
491 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
493 if (page_is_ram(phys >> PAGE_SHIFT))
494 return;
496 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
497 return;
500 static int __initdata early_ioremap_debug;
502 static int __init early_ioremap_debug_setup(char *str)
504 early_ioremap_debug = 1;
506 return 0;
508 early_param("early_ioremap_debug", early_ioremap_debug_setup);
510 static __initdata int after_paging_init;
511 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
513 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
515 /* Don't assume we're using swapper_pg_dir at this point */
516 pgd_t *base = __va(read_cr3());
517 pgd_t *pgd = &base[pgd_index(addr)];
518 pud_t *pud = pud_offset(pgd, addr);
519 pmd_t *pmd = pmd_offset(pud, addr);
521 return pmd;
524 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
526 return &bm_pte[pte_index(addr)];
529 void __init early_ioremap_init(void)
531 pmd_t *pmd;
533 if (early_ioremap_debug)
534 printk(KERN_INFO "early_ioremap_init()\n");
536 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
537 memset(bm_pte, 0, sizeof(bm_pte));
538 pmd_populate_kernel(&init_mm, pmd, bm_pte);
541 * The boot-ioremap range spans multiple pmds, for which
542 * we are not prepared:
544 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
545 WARN_ON(1);
546 printk(KERN_WARNING "pmd %p != %p\n",
547 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
548 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
549 fix_to_virt(FIX_BTMAP_BEGIN));
550 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551 fix_to_virt(FIX_BTMAP_END));
553 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
554 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
555 FIX_BTMAP_BEGIN);
559 void __init early_ioremap_clear(void)
561 pmd_t *pmd;
563 if (early_ioremap_debug)
564 printk(KERN_INFO "early_ioremap_clear()\n");
566 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
567 pmd_clear(pmd);
568 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
569 __flush_tlb_all();
572 void __init early_ioremap_reset(void)
574 enum fixed_addresses idx;
575 unsigned long addr, phys;
576 pte_t *pte;
578 after_paging_init = 1;
579 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
580 addr = fix_to_virt(idx);
581 pte = early_ioremap_pte(addr);
582 if (pte_present(*pte)) {
583 phys = pte_val(*pte) & PAGE_MASK;
584 set_fixmap(idx, phys);
589 static void __init __early_set_fixmap(enum fixed_addresses idx,
590 unsigned long phys, pgprot_t flags)
592 unsigned long addr = __fix_to_virt(idx);
593 pte_t *pte;
595 if (idx >= __end_of_fixed_addresses) {
596 BUG();
597 return;
599 pte = early_ioremap_pte(addr);
601 if (pgprot_val(flags))
602 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
603 else
604 pte_clear(&init_mm, addr, pte);
605 __flush_tlb_one(addr);
608 static inline void __init early_set_fixmap(enum fixed_addresses idx,
609 unsigned long phys, pgprot_t prot)
611 if (after_paging_init)
612 __set_fixmap(idx, phys, prot);
613 else
614 __early_set_fixmap(idx, phys, prot);
617 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
619 if (after_paging_init)
620 clear_fixmap(idx);
621 else
622 __early_set_fixmap(idx, 0, __pgprot(0));
625 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
626 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
627 static int __init check_early_ioremap_leak(void)
629 int count = 0;
630 int i;
632 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
633 if (prev_map[i])
634 count++;
636 if (!count)
637 return 0;
638 WARN(1, KERN_WARNING
639 "Debug warning: early ioremap leak of %d areas detected.\n",
640 count);
641 printk(KERN_WARNING
642 "please boot with early_ioremap_debug and report the dmesg.\n");
644 return 1;
646 late_initcall(check_early_ioremap_leak);
648 static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
650 unsigned long offset, last_addr;
651 unsigned int nrpages;
652 enum fixed_addresses idx0, idx;
653 int i, slot;
655 WARN_ON(system_state != SYSTEM_BOOTING);
657 slot = -1;
658 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
659 if (!prev_map[i]) {
660 slot = i;
661 break;
665 if (slot < 0) {
666 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
667 phys_addr, size);
668 WARN_ON(1);
669 return NULL;
672 if (early_ioremap_debug) {
673 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
674 phys_addr, size, slot);
675 dump_stack();
678 /* Don't allow wraparound or zero size */
679 last_addr = phys_addr + size - 1;
680 if (!size || last_addr < phys_addr) {
681 WARN_ON(1);
682 return NULL;
685 prev_size[slot] = size;
687 * Mappings have to be page-aligned
689 offset = phys_addr & ~PAGE_MASK;
690 phys_addr &= PAGE_MASK;
691 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
694 * Mappings have to fit in the FIX_BTMAP area.
696 nrpages = size >> PAGE_SHIFT;
697 if (nrpages > NR_FIX_BTMAPS) {
698 WARN_ON(1);
699 return NULL;
703 * Ok, go for it..
705 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
706 idx = idx0;
707 while (nrpages > 0) {
708 early_set_fixmap(idx, phys_addr, prot);
709 phys_addr += PAGE_SIZE;
710 --idx;
711 --nrpages;
713 if (early_ioremap_debug)
714 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
716 prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
717 return prev_map[slot];
720 /* Remap an IO device */
721 void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
723 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
726 /* Remap memory */
727 void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
729 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
732 void __init early_iounmap(void __iomem *addr, unsigned long size)
734 unsigned long virt_addr;
735 unsigned long offset;
736 unsigned int nrpages;
737 enum fixed_addresses idx;
738 int i, slot;
740 slot = -1;
741 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
742 if (prev_map[i] == addr) {
743 slot = i;
744 break;
748 if (slot < 0) {
749 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
750 addr, size);
751 WARN_ON(1);
752 return;
755 if (prev_size[slot] != size) {
756 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
757 addr, size, slot, prev_size[slot]);
758 WARN_ON(1);
759 return;
762 if (early_ioremap_debug) {
763 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
764 size, slot);
765 dump_stack();
768 virt_addr = (unsigned long)addr;
769 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
770 WARN_ON(1);
771 return;
773 offset = virt_addr & ~PAGE_MASK;
774 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
776 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
777 while (nrpages > 0) {
778 early_clear_fixmap(idx);
779 --idx;
780 --nrpages;
782 prev_map[slot] = NULL;
785 void __this_fixmap_does_not_exist(void)
787 WARN_ON(1);