sh: __addr_ok() and other misc nommu fixups.
[linux-2.6/mini2440.git] / arch / sh / kernel / sys_sh.c
blob0ee7bf4cb2384e2f11c0ae06a01b85102d1168be
1 /*
2 * linux/arch/sh/kernel/sys_sh.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/SuperH
6 * platform.
8 * Taken from i386 version.
9 */
11 #include <linux/errno.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/utsname.h>
24 #include <asm/cacheflush.h>
25 #include <asm/uaccess.h>
26 #include <asm/ipc.h>
29 * sys_pipe() is the normal C calling standard for creating
30 * a pipe. It's not the way Unix traditionally does this, though.
32 asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
33 unsigned long r6, unsigned long r7,
34 struct pt_regs regs)
36 int fd[2];
37 int error;
39 error = do_pipe(fd);
40 if (!error) {
41 regs.regs[1] = fd[1];
42 return fd[0];
44 return error;
47 #if defined(HAVE_ARCH_UNMAPPED_AREA) && defined(CONFIG_MMU)
49 * To avoid cache alias, we map the shard page with same color.
51 #define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
53 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
54 unsigned long len, unsigned long pgoff, unsigned long flags)
56 struct mm_struct *mm = current->mm;
57 struct vm_area_struct *vma;
58 unsigned long start_addr;
60 if (flags & MAP_FIXED) {
61 /* We do not accept a shared mapping if it would violate
62 * cache aliasing constraints.
64 if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
65 return -EINVAL;
66 return addr;
69 if (len > TASK_SIZE)
70 return -ENOMEM;
72 if (addr) {
73 if (flags & MAP_PRIVATE)
74 addr = PAGE_ALIGN(addr);
75 else
76 addr = COLOUR_ALIGN(addr);
77 vma = find_vma(mm, addr);
78 if (TASK_SIZE - len >= addr &&
79 (!vma || addr + len <= vma->vm_start))
80 return addr;
82 if (len <= mm->cached_hole_size) {
83 mm->cached_hole_size = 0;
84 mm->free_area_cache = TASK_UNMAPPED_BASE;
86 if (flags & MAP_PRIVATE)
87 addr = PAGE_ALIGN(mm->free_area_cache);
88 else
89 addr = COLOUR_ALIGN(mm->free_area_cache);
90 start_addr = addr;
92 full_search:
93 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
94 /* At this point: (!vma || addr < vma->vm_end). */
95 if (TASK_SIZE - len < addr) {
97 * Start a new search - just in case we missed
98 * some holes.
100 if (start_addr != TASK_UNMAPPED_BASE) {
101 start_addr = addr = TASK_UNMAPPED_BASE;
102 mm->cached_hole_size = 0;
103 goto full_search;
105 return -ENOMEM;
107 if (!vma || addr + len <= vma->vm_start) {
109 * Remember the place where we stopped the search:
111 mm->free_area_cache = addr + len;
112 return addr;
114 if (addr + mm->cached_hole_size < vma->vm_start)
115 mm->cached_hole_size = vma->vm_start - addr;
117 addr = vma->vm_end;
118 if (!(flags & MAP_PRIVATE))
119 addr = COLOUR_ALIGN(addr);
122 #endif
124 static inline long
125 do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
126 unsigned long flags, int fd, unsigned long pgoff)
128 int error = -EBADF;
129 struct file *file = NULL;
131 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
132 if (!(flags & MAP_ANONYMOUS)) {
133 file = fget(fd);
134 if (!file)
135 goto out;
138 down_write(&current->mm->mmap_sem);
139 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
140 up_write(&current->mm->mmap_sem);
142 if (file)
143 fput(file);
144 out:
145 return error;
148 asmlinkage int old_mmap(unsigned long addr, unsigned long len,
149 unsigned long prot, unsigned long flags,
150 int fd, unsigned long off)
152 if (off & ~PAGE_MASK)
153 return -EINVAL;
154 return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
157 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
158 unsigned long prot, unsigned long flags,
159 unsigned long fd, unsigned long pgoff)
161 return do_mmap2(addr, len, prot, flags, fd, pgoff);
165 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
167 * This is really horribly ugly.
169 asmlinkage int sys_ipc(uint call, int first, int second,
170 int third, void __user *ptr, long fifth)
172 int version, ret;
174 version = call >> 16; /* hack for backward compatibility */
175 call &= 0xffff;
177 if (call <= SEMCTL)
178 switch (call) {
179 case SEMOP:
180 return sys_semtimedop(first, (struct sembuf __user *)ptr,
181 second, NULL);
182 case SEMTIMEDOP:
183 return sys_semtimedop(first, (struct sembuf __user *)ptr,
184 second,
185 (const struct timespec __user *)fifth);
186 case SEMGET:
187 return sys_semget (first, second, third);
188 case SEMCTL: {
189 union semun fourth;
190 if (!ptr)
191 return -EINVAL;
192 if (get_user(fourth.__pad, (void * __user *) ptr))
193 return -EFAULT;
194 return sys_semctl (first, second, third, fourth);
196 default:
197 return -EINVAL;
200 if (call <= MSGCTL)
201 switch (call) {
202 case MSGSND:
203 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
204 second, third);
205 case MSGRCV:
206 switch (version) {
207 case 0: {
208 struct ipc_kludge tmp;
209 if (!ptr)
210 return -EINVAL;
212 if (copy_from_user(&tmp,
213 (struct ipc_kludge __user *) ptr,
214 sizeof (tmp)))
215 return -EFAULT;
216 return sys_msgrcv (first, tmp.msgp, second,
217 tmp.msgtyp, third);
219 default:
220 return sys_msgrcv (first,
221 (struct msgbuf __user *) ptr,
222 second, fifth, third);
224 case MSGGET:
225 return sys_msgget ((key_t) first, second);
226 case MSGCTL:
227 return sys_msgctl (first, second,
228 (struct msqid_ds __user *) ptr);
229 default:
230 return -EINVAL;
232 if (call <= SHMCTL)
233 switch (call) {
234 case SHMAT:
235 switch (version) {
236 default: {
237 ulong raddr;
238 ret = do_shmat (first, (char __user *) ptr,
239 second, &raddr);
240 if (ret)
241 return ret;
242 return put_user (raddr, (ulong __user *) third);
244 case 1: /* iBCS2 emulator entry point */
245 if (!segment_eq(get_fs(), get_ds()))
246 return -EINVAL;
247 return do_shmat (first, (char __user *) ptr,
248 second, (ulong *) third);
250 case SHMDT:
251 return sys_shmdt ((char __user *)ptr);
252 case SHMGET:
253 return sys_shmget (first, second, third);
254 case SHMCTL:
255 return sys_shmctl (first, second,
256 (struct shmid_ds __user *) ptr);
257 default:
258 return -EINVAL;
261 return -EINVAL;
264 asmlinkage int sys_uname(struct old_utsname * name)
266 int err;
267 if (!name)
268 return -EFAULT;
269 down_read(&uts_sem);
270 err=copy_to_user(name, &system_utsname, sizeof (*name));
271 up_read(&uts_sem);
272 return err?-EFAULT:0;
275 asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
276 size_t count, long dummy, loff_t pos)
278 return sys_pread64(fd, buf, count, pos);
281 asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
282 size_t count, long dummy, loff_t pos)
284 return sys_pwrite64(fd, buf, count, pos);
287 asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
288 u32 len0, u32 len1, int advice)
290 #ifdef __LITTLE_ENDIAN__
291 return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
292 (u64)len1 << 32 | len0, advice);
293 #else
294 return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
295 (u64)len0 << 32 | len1, advice);
296 #endif