powerpc: Merge spinlock.h
[linux-2.6/zen-sources.git] / arch / x86_64 / kernel / sys_x86_64.c
blob6449ea8fe756475b7316bdaa51ddd16e3c2c6dd9
1 /*
2 * linux/arch/x86_64/kernel/sys_x86_64.c
3 */
5 #include <linux/errno.h>
6 #include <linux/sched.h>
7 #include <linux/syscalls.h>
8 #include <linux/mm.h>
9 #include <linux/smp.h>
10 #include <linux/smp_lock.h>
11 #include <linux/sem.h>
12 #include <linux/msg.h>
13 #include <linux/shm.h>
14 #include <linux/stat.h>
15 #include <linux/mman.h>
16 #include <linux/file.h>
17 #include <linux/utsname.h>
18 #include <linux/personality.h>
20 #include <asm/uaccess.h>
21 #include <asm/ia32.h>
24 * sys_pipe() is the normal C calling standard for creating
25 * a pipe. It's not the way Unix traditionally does this, though.
27 asmlinkage long sys_pipe(int __user *fildes)
29 int fd[2];
30 int error;
32 error = do_pipe(fd);
33 if (!error) {
34 if (copy_to_user(fildes, fd, 2*sizeof(int)))
35 error = -EFAULT;
37 return error;
40 asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags,
41 unsigned long fd, unsigned long off)
43 long error;
44 struct file * file;
46 error = -EINVAL;
47 if (off & ~PAGE_MASK)
48 goto out;
50 error = -EBADF;
51 file = NULL;
52 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
53 if (!(flags & MAP_ANONYMOUS)) {
54 file = fget(fd);
55 if (!file)
56 goto out;
58 down_write(&current->mm->mmap_sem);
59 error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
60 up_write(&current->mm->mmap_sem);
62 if (file)
63 fput(file);
64 out:
65 return error;
68 static void find_start_end(unsigned long flags, unsigned long *begin,
69 unsigned long *end)
71 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
72 /* This is usually used needed to map code in small
73 model, so it needs to be in the first 31bit. Limit
74 it to that. This means we need to move the
75 unmapped base down for this case. This can give
76 conflicts with the heap, but we assume that glibc
77 malloc knows how to fall back to mmap. Give it 1GB
78 of playground for now. -AK */
79 *begin = 0x40000000;
80 *end = 0x80000000;
81 } else {
82 *begin = TASK_UNMAPPED_BASE;
83 *end = TASK_SIZE;
87 unsigned long
88 arch_get_unmapped_area(struct file *filp, unsigned long addr,
89 unsigned long len, unsigned long pgoff, unsigned long flags)
91 struct mm_struct *mm = current->mm;
92 struct vm_area_struct *vma;
93 unsigned long start_addr;
94 unsigned long begin, end;
96 find_start_end(flags, &begin, &end);
98 if (len > end)
99 return -ENOMEM;
101 if (addr) {
102 addr = PAGE_ALIGN(addr);
103 vma = find_vma(mm, addr);
104 if (end - len >= addr &&
105 (!vma || addr + len <= vma->vm_start))
106 return addr;
108 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
109 && len <= mm->cached_hole_size) {
110 mm->cached_hole_size = 0;
111 mm->free_area_cache = begin;
113 addr = mm->free_area_cache;
114 if (addr < begin)
115 addr = begin;
116 start_addr = addr;
118 full_search:
119 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
120 /* At this point: (!vma || addr < vma->vm_end). */
121 if (end - len < addr) {
123 * Start a new search - just in case we missed
124 * some holes.
126 if (start_addr != begin) {
127 start_addr = addr = begin;
128 mm->cached_hole_size = 0;
129 goto full_search;
131 return -ENOMEM;
133 if (!vma || addr + len <= vma->vm_start) {
135 * Remember the place where we stopped the search:
137 mm->free_area_cache = addr + len;
138 return addr;
140 if (addr + mm->cached_hole_size < vma->vm_start)
141 mm->cached_hole_size = vma->vm_start - addr;
143 addr = vma->vm_end;
147 asmlinkage long sys_uname(struct new_utsname __user * name)
149 int err;
150 down_read(&uts_sem);
151 err = copy_to_user(name, &system_utsname, sizeof (*name));
152 up_read(&uts_sem);
153 if (personality(current->personality) == PER_LINUX32)
154 err |= copy_to_user(&name->machine, "i686", 5);
155 return err ? -EFAULT : 0;