x86: apic - unify lapic_suspend
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / sys_x86_64.c
blobc9288c883e20cf21f0de84a2d61df7e6b69e12fd
1 #include <linux/errno.h>
2 #include <linux/sched.h>
3 #include <linux/syscalls.h>
4 #include <linux/mm.h>
5 #include <linux/fs.h>
6 #include <linux/smp.h>
7 #include <linux/sem.h>
8 #include <linux/msg.h>
9 #include <linux/shm.h>
10 #include <linux/stat.h>
11 #include <linux/mman.h>
12 #include <linux/file.h>
13 #include <linux/utsname.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
17 #include <asm/uaccess.h>
18 #include <asm/ia32.h>
19 #include <asm/syscalls.h>
21 asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags,
22 unsigned long fd, unsigned long off)
24 long error;
25 struct file * file;
27 error = -EINVAL;
28 if (off & ~PAGE_MASK)
29 goto out;
31 error = -EBADF;
32 file = NULL;
33 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
34 if (!(flags & MAP_ANONYMOUS)) {
35 file = fget(fd);
36 if (!file)
37 goto out;
39 down_write(&current->mm->mmap_sem);
40 error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
41 up_write(&current->mm->mmap_sem);
43 if (file)
44 fput(file);
45 out:
46 return error;
49 static void find_start_end(unsigned long flags, unsigned long *begin,
50 unsigned long *end)
52 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
53 unsigned long new_begin;
54 /* This is usually used needed to map code in small
55 model, so it needs to be in the first 31bit. Limit
56 it to that. This means we need to move the
57 unmapped base down for this case. This can give
58 conflicts with the heap, but we assume that glibc
59 malloc knows how to fall back to mmap. Give it 1GB
60 of playground for now. -AK */
61 *begin = 0x40000000;
62 *end = 0x80000000;
63 if (current->flags & PF_RANDOMIZE) {
64 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
65 if (new_begin)
66 *begin = new_begin;
68 } else {
69 *begin = TASK_UNMAPPED_BASE;
70 *end = TASK_SIZE;
74 unsigned long
75 arch_get_unmapped_area(struct file *filp, unsigned long addr,
76 unsigned long len, unsigned long pgoff, unsigned long flags)
78 struct mm_struct *mm = current->mm;
79 struct vm_area_struct *vma;
80 unsigned long start_addr;
81 unsigned long begin, end;
83 if (flags & MAP_FIXED)
84 return addr;
86 find_start_end(flags, &begin, &end);
88 if (len > end)
89 return -ENOMEM;
91 if (addr) {
92 addr = PAGE_ALIGN(addr);
93 vma = find_vma(mm, addr);
94 if (end - len >= addr &&
95 (!vma || addr + len <= vma->vm_start))
96 return addr;
98 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
99 && len <= mm->cached_hole_size) {
100 mm->cached_hole_size = 0;
101 mm->free_area_cache = begin;
103 addr = mm->free_area_cache;
104 if (addr < begin)
105 addr = begin;
106 start_addr = addr;
108 full_search:
109 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
110 /* At this point: (!vma || addr < vma->vm_end). */
111 if (end - len < addr) {
113 * Start a new search - just in case we missed
114 * some holes.
116 if (start_addr != begin) {
117 start_addr = addr = begin;
118 mm->cached_hole_size = 0;
119 goto full_search;
121 return -ENOMEM;
123 if (!vma || addr + len <= vma->vm_start) {
125 * Remember the place where we stopped the search:
127 mm->free_area_cache = addr + len;
128 return addr;
130 if (addr + mm->cached_hole_size < vma->vm_start)
131 mm->cached_hole_size = vma->vm_start - addr;
133 addr = vma->vm_end;
138 unsigned long
139 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
140 const unsigned long len, const unsigned long pgoff,
141 const unsigned long flags)
143 struct vm_area_struct *vma;
144 struct mm_struct *mm = current->mm;
145 unsigned long addr = addr0;
147 /* requested length too big for entire address space */
148 if (len > TASK_SIZE)
149 return -ENOMEM;
151 if (flags & MAP_FIXED)
152 return addr;
154 /* for MAP_32BIT mappings we force the legact mmap base */
155 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
156 goto bottomup;
158 /* requesting a specific address */
159 if (addr) {
160 addr = PAGE_ALIGN(addr);
161 vma = find_vma(mm, addr);
162 if (TASK_SIZE - len >= addr &&
163 (!vma || addr + len <= vma->vm_start))
164 return addr;
167 /* check if free_area_cache is useful for us */
168 if (len <= mm->cached_hole_size) {
169 mm->cached_hole_size = 0;
170 mm->free_area_cache = mm->mmap_base;
173 /* either no address requested or can't fit in requested address hole */
174 addr = mm->free_area_cache;
176 /* make sure it can fit in the remaining address space */
177 if (addr > len) {
178 vma = find_vma(mm, addr-len);
179 if (!vma || addr <= vma->vm_start)
180 /* remember the address as a hint for next time */
181 return (mm->free_area_cache = addr-len);
184 if (mm->mmap_base < len)
185 goto bottomup;
187 addr = mm->mmap_base-len;
189 do {
191 * Lookup failure means no vma is above this address,
192 * else if new region fits below vma->vm_start,
193 * return with success:
195 vma = find_vma(mm, addr);
196 if (!vma || addr+len <= vma->vm_start)
197 /* remember the address as a hint for next time */
198 return (mm->free_area_cache = addr);
200 /* remember the largest hole we saw so far */
201 if (addr + mm->cached_hole_size < vma->vm_start)
202 mm->cached_hole_size = vma->vm_start - addr;
204 /* try just below the current vma->vm_start */
205 addr = vma->vm_start-len;
206 } while (len < vma->vm_start);
208 bottomup:
210 * A failed mmap() very likely causes application failure,
211 * so fall back to the bottom-up function here. This scenario
212 * can happen with large stack limits and large mmap()
213 * allocations.
215 mm->cached_hole_size = ~0UL;
216 mm->free_area_cache = TASK_UNMAPPED_BASE;
217 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
219 * Restore the topdown base:
221 mm->free_area_cache = mm->mmap_base;
222 mm->cached_hole_size = ~0UL;
224 return addr;
228 asmlinkage long sys_uname(struct new_utsname __user * name)
230 int err;
231 down_read(&uts_sem);
232 err = copy_to_user(name, utsname(), sizeof (*name));
233 up_read(&uts_sem);
234 if (personality(current->personality) == PER_LINUX32)
235 err |= copy_to_user(&name->machine, "i686", 5);
236 return err ? -EFAULT : 0;