2 * linux/arch/sh/kernel/sys_sh.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/SuperH
8 * Taken from i386 version.
11 #include <linux/errno.h>
12 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/utsname.h>
24 #include <asm/cacheflush.h>
25 #include <asm/uaccess.h>
29 * sys_pipe() is the normal C calling standard for creating
30 * a pipe. It's not the way Unix traditionally does this, though.
32 asmlinkage
int sys_pipe(unsigned long r4
, unsigned long r5
,
33 unsigned long r6
, unsigned long r7
,
47 #if defined(HAVE_ARCH_UNMAPPED_AREA) && defined(CONFIG_MMU)
49 * To avoid cache alias, we map the shard page with same color.
51 #define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
53 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
54 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
56 struct mm_struct
*mm
= current
->mm
;
57 struct vm_area_struct
*vma
;
58 unsigned long start_addr
;
60 if (flags
& MAP_FIXED
) {
61 /* We do not accept a shared mapping if it would violate
62 * cache aliasing constraints.
64 if ((flags
& MAP_SHARED
) && (addr
& (SHMLBA
- 1)))
73 if (flags
& MAP_PRIVATE
)
74 addr
= PAGE_ALIGN(addr
);
76 addr
= COLOUR_ALIGN(addr
);
77 vma
= find_vma(mm
, addr
);
78 if (TASK_SIZE
- len
>= addr
&&
79 (!vma
|| addr
+ len
<= vma
->vm_start
))
82 if (len
<= mm
->cached_hole_size
) {
83 mm
->cached_hole_size
= 0;
84 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
86 if (flags
& MAP_PRIVATE
)
87 addr
= PAGE_ALIGN(mm
->free_area_cache
);
89 addr
= COLOUR_ALIGN(mm
->free_area_cache
);
93 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
94 /* At this point: (!vma || addr < vma->vm_end). */
95 if (TASK_SIZE
- len
< addr
) {
97 * Start a new search - just in case we missed
100 if (start_addr
!= TASK_UNMAPPED_BASE
) {
101 start_addr
= addr
= TASK_UNMAPPED_BASE
;
102 mm
->cached_hole_size
= 0;
107 if (!vma
|| addr
+ len
<= vma
->vm_start
) {
109 * Remember the place where we stopped the search:
111 mm
->free_area_cache
= addr
+ len
;
114 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
115 mm
->cached_hole_size
= vma
->vm_start
- addr
;
118 if (!(flags
& MAP_PRIVATE
))
119 addr
= COLOUR_ALIGN(addr
);
125 do_mmap2(unsigned long addr
, unsigned long len
, unsigned long prot
,
126 unsigned long flags
, int fd
, unsigned long pgoff
)
129 struct file
*file
= NULL
;
131 flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
132 if (!(flags
& MAP_ANONYMOUS
)) {
138 down_write(¤t
->mm
->mmap_sem
);
139 error
= do_mmap_pgoff(file
, addr
, len
, prot
, flags
, pgoff
);
140 up_write(¤t
->mm
->mmap_sem
);
148 asmlinkage
int old_mmap(unsigned long addr
, unsigned long len
,
149 unsigned long prot
, unsigned long flags
,
150 int fd
, unsigned long off
)
152 if (off
& ~PAGE_MASK
)
154 return do_mmap2(addr
, len
, prot
, flags
, fd
, off
>>PAGE_SHIFT
);
157 asmlinkage
long sys_mmap2(unsigned long addr
, unsigned long len
,
158 unsigned long prot
, unsigned long flags
,
159 unsigned long fd
, unsigned long pgoff
)
161 return do_mmap2(addr
, len
, prot
, flags
, fd
, pgoff
);
165 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
167 * This is really horribly ugly.
169 asmlinkage
int sys_ipc(uint call
, int first
, int second
,
170 int third
, void __user
*ptr
, long fifth
)
174 version
= call
>> 16; /* hack for backward compatibility */
180 return sys_semtimedop(first
, (struct sembuf __user
*)ptr
,
183 return sys_semtimedop(first
, (struct sembuf __user
*)ptr
,
185 (const struct timespec __user
*)fifth
);
187 return sys_semget (first
, second
, third
);
192 if (get_user(fourth
.__pad
, (void * __user
*) ptr
))
194 return sys_semctl (first
, second
, third
, fourth
);
203 return sys_msgsnd (first
, (struct msgbuf __user
*) ptr
,
208 struct ipc_kludge tmp
;
212 if (copy_from_user(&tmp
,
213 (struct ipc_kludge __user
*) ptr
,
216 return sys_msgrcv (first
, tmp
.msgp
, second
,
220 return sys_msgrcv (first
,
221 (struct msgbuf __user
*) ptr
,
222 second
, fifth
, third
);
225 return sys_msgget ((key_t
) first
, second
);
227 return sys_msgctl (first
, second
,
228 (struct msqid_ds __user
*) ptr
);
238 ret
= do_shmat (first
, (char __user
*) ptr
,
242 return put_user (raddr
, (ulong __user
*) third
);
244 case 1: /* iBCS2 emulator entry point */
245 if (!segment_eq(get_fs(), get_ds()))
247 return do_shmat (first
, (char __user
*) ptr
,
248 second
, (ulong
*) third
);
251 return sys_shmdt ((char __user
*)ptr
);
253 return sys_shmget (first
, second
, third
);
255 return sys_shmctl (first
, second
,
256 (struct shmid_ds __user
*) ptr
);
264 asmlinkage
int sys_uname(struct old_utsname
* name
)
270 err
=copy_to_user(name
, &system_utsname
, sizeof (*name
));
272 return err
?-EFAULT
:0;
275 asmlinkage ssize_t
sys_pread_wrapper(unsigned int fd
, char * buf
,
276 size_t count
, long dummy
, loff_t pos
)
278 return sys_pread64(fd
, buf
, count
, pos
);
281 asmlinkage ssize_t
sys_pwrite_wrapper(unsigned int fd
, const char * buf
,
282 size_t count
, long dummy
, loff_t pos
)
284 return sys_pwrite64(fd
, buf
, count
, pos
);
287 asmlinkage
int sys_fadvise64_64_wrapper(int fd
, u32 offset0
, u32 offset1
,
288 u32 len0
, u32 len1
, int advice
)
290 #ifdef __LITTLE_ENDIAN__
291 return sys_fadvise64_64(fd
, (u64
)offset1
<< 32 | offset0
,
292 (u64
)len1
<< 32 | len0
, advice
);
294 return sys_fadvise64_64(fd
, (u64
)offset0
<< 32 | offset1
,
295 (u64
)len0
<< 32 | len1
, advice
);