4 * Copyright (C) 2008 - 2009 Paul Mundt
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/mman.h>
13 #include <linux/module.h>
15 #include <asm/processor.h>
17 unsigned long shm_align_mask
= PAGE_SIZE
- 1; /* Sane caches */
18 EXPORT_SYMBOL(shm_align_mask
);
22 * To avoid cache aliases, we map the shared page with same color.
24 static inline unsigned long COLOUR_ALIGN(unsigned long addr
,
27 unsigned long base
= (addr
+ shm_align_mask
) & ~shm_align_mask
;
28 unsigned long off
= (pgoff
<< PAGE_SHIFT
) & shm_align_mask
;
33 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
34 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
36 struct mm_struct
*mm
= current
->mm
;
37 struct vm_area_struct
*vma
;
39 struct vm_unmapped_area_info info
;
41 if (flags
& MAP_FIXED
) {
42 /* We do not accept a shared mapping if it would violate
43 * cache aliasing constraints.
45 if ((flags
& MAP_SHARED
) &&
46 ((addr
- (pgoff
<< PAGE_SHIFT
)) & shm_align_mask
))
51 if (unlikely(len
> TASK_SIZE
))
55 if (filp
|| (flags
& MAP_SHARED
))
60 addr
= COLOUR_ALIGN(addr
, pgoff
);
62 addr
= PAGE_ALIGN(addr
);
64 vma
= find_vma(mm
, addr
);
65 if (TASK_SIZE
- len
>= addr
&&
66 (!vma
|| addr
+ len
<= vma
->vm_start
))
72 info
.low_limit
= TASK_UNMAPPED_BASE
;
73 info
.high_limit
= TASK_SIZE
;
74 info
.align_mask
= do_colour_align
? (PAGE_MASK
& shm_align_mask
) : 0;
75 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
76 return vm_unmapped_area(&info
);
80 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
81 const unsigned long len
, const unsigned long pgoff
,
82 const unsigned long flags
)
84 struct vm_area_struct
*vma
;
85 struct mm_struct
*mm
= current
->mm
;
86 unsigned long addr
= addr0
;
88 struct vm_unmapped_area_info info
;
90 if (flags
& MAP_FIXED
) {
91 /* We do not accept a shared mapping if it would violate
92 * cache aliasing constraints.
94 if ((flags
& MAP_SHARED
) &&
95 ((addr
- (pgoff
<< PAGE_SHIFT
)) & shm_align_mask
))
100 if (unlikely(len
> TASK_SIZE
))
104 if (filp
|| (flags
& MAP_SHARED
))
107 /* requesting a specific address */
110 addr
= COLOUR_ALIGN(addr
, pgoff
);
112 addr
= PAGE_ALIGN(addr
);
114 vma
= find_vma(mm
, addr
);
115 if (TASK_SIZE
- len
>= addr
&&
116 (!vma
|| addr
+ len
<= vma
->vm_start
))
120 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
122 info
.low_limit
= PAGE_SIZE
;
123 info
.high_limit
= mm
->mmap_base
;
124 info
.align_mask
= do_colour_align
? (PAGE_MASK
& shm_align_mask
) : 0;
125 info
.align_offset
= pgoff
<< PAGE_SHIFT
;
126 addr
= vm_unmapped_area(&info
);
129 * A failed mmap() very likely causes application failure,
130 * so fall back to the bottom-up function here. This scenario
131 * can happen with large stack limits and large mmap()
134 if (addr
& ~PAGE_MASK
) {
135 VM_BUG_ON(addr
!= -ENOMEM
);
137 info
.low_limit
= TASK_UNMAPPED_BASE
;
138 info
.high_limit
= TASK_SIZE
;
139 addr
= vm_unmapped_area(&info
);
144 #endif /* CONFIG_MMU */
147 * You really shouldn't be using read() or write() on /dev/mem. This
148 * might go away in the future.
150 int valid_phys_addr_range(unsigned long addr
, size_t count
)
152 if (addr
< __MEMORY_START
)
154 if (addr
+ count
> __pa(high_memory
))
160 int valid_mmap_phys_addr_range(unsigned long pfn
, size_t size
)