3 * PARISC specific syscalls
5 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
6 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
7 * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <asm/uaccess.h>
26 #include <linux/file.h>
28 #include <linux/linkage.h>
30 #include <linux/mman.h>
31 #include <linux/shm.h>
32 #include <linux/syscalls.h>
33 #include <linux/utsname.h>
34 #include <linux/personality.h>
36 static unsigned long get_unshared_area(unsigned long addr
, unsigned long len
)
38 struct vm_area_struct
*vma
;
40 addr
= PAGE_ALIGN(addr
);
42 for (vma
= find_vma(current
->mm
, addr
); ; vma
= vma
->vm_next
) {
43 /* At this point: (!vma || addr < vma->vm_end). */
44 if (TASK_SIZE
- len
< addr
)
46 if (!vma
|| addr
+ len
<= vma
->vm_start
)
52 #define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1))
55 * We need to know the offset to use. Old scheme was to look for
56 * existing mapping and use the same offset. New scheme is to use the
57 * address of the kernel data structure as the seed for the offset.
58 * We'll see how that works...
60 * The mapping is cacheline aligned, so there's no information in the bottom
61 * few bits of the address. We're looking for 10 bits (4MB / 4k), so let's
62 * drop the bottom 8 bits and use bits 8-17.
64 static int get_offset(struct address_space
*mapping
)
66 int offset
= (unsigned long) mapping
<< (PAGE_SHIFT
- 8);
67 return offset
& 0x3FF000;
70 static unsigned long get_shared_area(struct address_space
*mapping
,
71 unsigned long addr
, unsigned long len
, unsigned long pgoff
)
73 struct vm_area_struct
*vma
;
74 int offset
= mapping
? get_offset(mapping
) : 0;
76 addr
= DCACHE_ALIGN(addr
- offset
) + offset
;
78 for (vma
= find_vma(current
->mm
, addr
); ; vma
= vma
->vm_next
) {
79 /* At this point: (!vma || addr < vma->vm_end). */
80 if (TASK_SIZE
- len
< addr
)
82 if (!vma
|| addr
+ len
<= vma
->vm_start
)
84 addr
= DCACHE_ALIGN(vma
->vm_end
- offset
) + offset
;
85 if (addr
< vma
->vm_end
) /* handle wraparound */
90 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
91 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
95 /* Might want to check for cache aliasing issues for MAP_FIXED case
96 * like ARM or MIPS ??? --BenH.
98 if (flags
& MAP_FIXED
)
101 addr
= TASK_UNMAPPED_BASE
;
104 addr
= get_shared_area(filp
->f_mapping
, addr
, len
, pgoff
);
105 } else if(flags
& MAP_SHARED
) {
106 addr
= get_shared_area(NULL
, addr
, len
, pgoff
);
108 addr
= get_unshared_area(addr
, len
);
113 static unsigned long do_mmap2(unsigned long addr
, unsigned long len
,
114 unsigned long prot
, unsigned long flags
, unsigned long fd
,
117 struct file
* file
= NULL
;
118 unsigned long error
= -EBADF
;
119 if (!(flags
& MAP_ANONYMOUS
)) {
125 flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
127 down_write(¤t
->mm
->mmap_sem
);
128 error
= do_mmap_pgoff(file
, addr
, len
, prot
, flags
, pgoff
);
129 up_write(¤t
->mm
->mmap_sem
);
137 asmlinkage
unsigned long sys_mmap2(unsigned long addr
, unsigned long len
,
138 unsigned long prot
, unsigned long flags
, unsigned long fd
,
141 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
143 return do_mmap2(addr
, len
, prot
, flags
, fd
, pgoff
>> (PAGE_SHIFT
- 12));
146 asmlinkage
unsigned long sys_mmap(unsigned long addr
, unsigned long len
,
147 unsigned long prot
, unsigned long flags
, unsigned long fd
,
148 unsigned long offset
)
150 if (!(offset
& ~PAGE_MASK
)) {
151 return do_mmap2(addr
, len
, prot
, flags
, fd
, offset
>> PAGE_SHIFT
);
157 /* Fucking broken ABI */
160 asmlinkage
long parisc_truncate64(const char __user
* path
,
161 unsigned int high
, unsigned int low
)
163 return sys_truncate(path
, (long)high
<< 32 | low
);
166 asmlinkage
long parisc_ftruncate64(unsigned int fd
,
167 unsigned int high
, unsigned int low
)
169 return sys_ftruncate(fd
, (long)high
<< 32 | low
);
172 /* stubs for the benefit of the syscall_table since truncate64 and truncate
173 * are identical on LP64 */
174 asmlinkage
long sys_truncate64(const char __user
* path
, unsigned long length
)
176 return sys_truncate(path
, length
);
178 asmlinkage
long sys_ftruncate64(unsigned int fd
, unsigned long length
)
180 return sys_ftruncate(fd
, length
);
182 asmlinkage
long sys_fcntl64(unsigned int fd
, unsigned int cmd
, unsigned long arg
)
184 return sys_fcntl(fd
, cmd
, arg
);
188 asmlinkage
long parisc_truncate64(const char __user
* path
,
189 unsigned int high
, unsigned int low
)
191 return sys_truncate64(path
, (loff_t
)high
<< 32 | low
);
194 asmlinkage
long parisc_ftruncate64(unsigned int fd
,
195 unsigned int high
, unsigned int low
)
197 return sys_ftruncate64(fd
, (loff_t
)high
<< 32 | low
);
201 asmlinkage ssize_t
parisc_pread64(unsigned int fd
, char __user
*buf
, size_t count
,
202 unsigned int high
, unsigned int low
)
204 return sys_pread64(fd
, buf
, count
, (loff_t
)high
<< 32 | low
);
207 asmlinkage ssize_t
parisc_pwrite64(unsigned int fd
, const char __user
*buf
,
208 size_t count
, unsigned int high
, unsigned int low
)
210 return sys_pwrite64(fd
, buf
, count
, (loff_t
)high
<< 32 | low
);
213 asmlinkage ssize_t
parisc_readahead(int fd
, unsigned int high
, unsigned int low
,
216 return sys_readahead(fd
, (loff_t
)high
<< 32 | low
, count
);
219 asmlinkage
long parisc_fadvise64_64(int fd
,
220 unsigned int high_off
, unsigned int low_off
,
221 unsigned int high_len
, unsigned int low_len
, int advice
)
223 return sys_fadvise64_64(fd
, (loff_t
)high_off
<< 32 | low_off
,
224 (loff_t
)high_len
<< 32 | low_len
, advice
);
227 asmlinkage
long parisc_sync_file_range(int fd
,
228 u32 hi_off
, u32 lo_off
, u32 hi_nbytes
, u32 lo_nbytes
,
231 return sys_sync_file_range(fd
, (loff_t
)hi_off
<< 32 | lo_off
,
232 (loff_t
)hi_nbytes
<< 32 | lo_nbytes
, flags
);
235 asmlinkage
unsigned long sys_alloc_hugepages(int key
, unsigned long addr
, unsigned long len
, int prot
, int flag
)
240 asmlinkage
int sys_free_hugepages(unsigned long addr
)
245 long parisc_personality(unsigned long personality
)
249 if (personality(current
->personality
) == PER_LINUX32
250 && personality
== PER_LINUX
)
251 personality
= PER_LINUX32
;
253 err
= sys_personality(personality
);
254 if (err
== PER_LINUX32
)
260 long parisc_newuname(struct new_utsname __user
*name
)
262 int err
= sys_newuname(name
);
265 if (!err
&& personality(current
->personality
) == PER_LINUX32
) {
266 if (__put_user(0, name
->machine
+ 6) ||
267 __put_user(0, name
->machine
+ 7))