2 * This file contains various system calls that have different calling
3 * conventions on different platforms.
5 * Copyright (C) 1999-2000, 2002-2003, 2005 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 #include <linux/config.h>
9 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/sched.h>
14 #include <linux/shm.h>
15 #include <linux/file.h> /* doh, must come after sched.h... */
16 #include <linux/smp.h>
17 #include <linux/smp_lock.h>
18 #include <linux/syscalls.h>
19 #include <linux/highuid.h>
20 #include <linux/hugetlb.h>
22 #include <asm/shmparam.h>
23 #include <asm/uaccess.h>
26 arch_get_unmapped_area (struct file
*filp
, unsigned long addr
, unsigned long len
,
27 unsigned long pgoff
, unsigned long flags
)
29 long map_shared
= (flags
& MAP_SHARED
);
30 unsigned long start_addr
, align_mask
= PAGE_SIZE
- 1;
31 struct mm_struct
*mm
= current
->mm
;
32 struct vm_area_struct
*vma
;
34 if (len
> RGN_MAP_LIMIT
)
37 #ifdef CONFIG_HUGETLB_PAGE
38 if (REGION_NUMBER(addr
) == REGION_HPAGE
)
42 addr
= mm
->free_area_cache
;
44 if (map_shared
&& (TASK_SIZE
> 0xfffffffful
))
46 * For 64-bit tasks, align shared segments to 1MB to avoid potential
47 * performance penalty due to virtual aliasing (see ASDM). For 32-bit
48 * tasks, we prefer to avoid exhausting the address space too quickly by
49 * limiting alignment to a single page.
51 align_mask
= SHMLBA
- 1;
54 start_addr
= addr
= (addr
+ align_mask
) & ~align_mask
;
56 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
57 /* At this point: (!vma || addr < vma->vm_end). */
58 if (TASK_SIZE
- len
< addr
|| RGN_MAP_LIMIT
- len
< REGION_OFFSET(addr
)) {
59 if (start_addr
!= TASK_UNMAPPED_BASE
) {
60 /* Start a new search --- just in case we missed some holes. */
61 addr
= TASK_UNMAPPED_BASE
;
66 if (!vma
|| addr
+ len
<= vma
->vm_start
) {
67 /* Remember the address where we stopped this search: */
68 mm
->free_area_cache
= addr
+ len
;
71 addr
= (vma
->vm_end
+ align_mask
) & ~align_mask
;
76 ia64_getpriority (int which
, int who
)
80 prio
= sys_getpriority(which
, who
);
82 force_successful_syscall_return();
88 /* XXX obsolete, but leave it here until the old libc is gone... */
89 asmlinkage
unsigned long
90 sys_getpagesize (void)
95 asmlinkage
unsigned long
96 ia64_shmat (int shmid
, void __user
*shmaddr
, int shmflg
)
101 retval
= do_shmat(shmid
, shmaddr
, shmflg
, &raddr
);
105 force_successful_syscall_return();
109 asmlinkage
unsigned long
110 ia64_brk (unsigned long brk
)
112 unsigned long rlim
, retval
, newbrk
, oldbrk
;
113 struct mm_struct
*mm
= current
->mm
;
116 * Most of this replicates the code in sys_brk() except for an additional safety
117 * check and the clearing of r8. However, we can't call sys_brk() because we need
118 * to acquire the mmap_sem before we can do the test...
120 down_write(&mm
->mmap_sem
);
122 if (brk
< mm
->end_code
)
124 newbrk
= PAGE_ALIGN(brk
);
125 oldbrk
= PAGE_ALIGN(mm
->brk
);
126 if (oldbrk
== newbrk
)
129 /* Always allow shrinking brk. */
130 if (brk
<= mm
->brk
) {
131 if (!do_munmap(mm
, newbrk
, oldbrk
-newbrk
))
136 /* Check against unimplemented/unmapped addresses: */
137 if ((newbrk
- oldbrk
) > RGN_MAP_LIMIT
|| REGION_OFFSET(newbrk
) > RGN_MAP_LIMIT
)
140 /* Check against rlimit.. */
141 rlim
= current
->signal
->rlim
[RLIMIT_DATA
].rlim_cur
;
142 if (rlim
< RLIM_INFINITY
&& brk
- mm
->start_data
> rlim
)
145 /* Check against existing mmap mappings. */
146 if (find_vma_intersection(mm
, oldbrk
, newbrk
+PAGE_SIZE
))
149 /* Ok, looks good - let it rip. */
150 if (do_brk(oldbrk
, newbrk
-oldbrk
) != oldbrk
)
156 up_write(&mm
->mmap_sem
);
157 force_successful_syscall_return();
162 * On IA-64, we return the two file descriptors in ret0 and ret1 (r8
163 * and r9) as this is faster than doing a copy_to_user().
168 struct pt_regs
*regs
= ia64_task_regs(current
);
172 retval
= do_pipe(fd
);
181 static inline unsigned long
182 do_mmap2 (unsigned long addr
, unsigned long len
, int prot
, int flags
, int fd
, unsigned long pgoff
)
185 struct file
*file
= NULL
;
187 flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
188 if (!(flags
& MAP_ANONYMOUS
)) {
193 if (!file
->f_op
|| !file
->f_op
->mmap
) {
200 * A zero mmap always succeeds in Linux, independent of whether or not the
201 * remaining arguments are valid.
206 /* Careful about overflows.. */
207 len
= PAGE_ALIGN(len
);
208 if (!len
|| len
> TASK_SIZE
) {
214 * Don't permit mappings into unmapped space, the virtual page table of a region,
215 * or across a region boundary. Note: RGN_MAP_LIMIT is equal to 2^n-PAGE_SIZE
216 * (for some integer n <= 61) and len > 0.
218 roff
= REGION_OFFSET(addr
);
219 if ((len
> RGN_MAP_LIMIT
) || (roff
> (RGN_MAP_LIMIT
- len
))) {
224 down_write(¤t
->mm
->mmap_sem
);
225 addr
= do_mmap_pgoff(file
, addr
, len
, prot
, flags
, pgoff
);
226 up_write(¤t
->mm
->mmap_sem
);
234 * mmap2() is like mmap() except that the offset is expressed in units
235 * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces
236 * of) files that are larger than the address space of the CPU.
238 asmlinkage
unsigned long
239 sys_mmap2 (unsigned long addr
, unsigned long len
, int prot
, int flags
, int fd
, long pgoff
)
241 addr
= do_mmap2(addr
, len
, prot
, flags
, fd
, pgoff
);
242 if (!IS_ERR((void *) addr
))
243 force_successful_syscall_return();
247 asmlinkage
unsigned long
248 sys_mmap (unsigned long addr
, unsigned long len
, int prot
, int flags
, int fd
, long off
)
250 if (offset_in_page(off
) != 0)
253 addr
= do_mmap2(addr
, len
, prot
, flags
, fd
, off
>> PAGE_SHIFT
);
254 if (!IS_ERR((void *) addr
))
255 force_successful_syscall_return();
259 asmlinkage
unsigned long
260 ia64_mremap (unsigned long addr
, unsigned long old_len
, unsigned long new_len
, unsigned long flags
,
261 unsigned long new_addr
)
263 extern unsigned long do_mremap (unsigned long addr
,
264 unsigned long old_len
,
265 unsigned long new_len
,
267 unsigned long new_addr
);
269 down_write(¤t
->mm
->mmap_sem
);
271 addr
= do_mremap(addr
, old_len
, new_len
, flags
, new_addr
);
273 up_write(¤t
->mm
->mmap_sem
);
275 if (IS_ERR((void *) addr
))
278 force_successful_syscall_return();
285 sys_pciconfig_read (unsigned long bus
, unsigned long dfn
, unsigned long off
, unsigned long len
,
292 sys_pciconfig_write (unsigned long bus
, unsigned long dfn
, unsigned long off
, unsigned long len
,
298 #endif /* CONFIG_PCI */