[XFS] Barriers need to be dynamically checked and switched off
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / um / sys-i386 / ldt.c
bloba939a7ef02274eff96f5e3ca46d3e14772b80975
1 /*
2 * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
6 #include "linux/sched.h"
7 #include "linux/slab.h"
8 #include "linux/types.h"
9 #include "linux/errno.h"
10 #include "linux/spinlock.h"
11 #include "asm/uaccess.h"
12 #include "asm/smp.h"
13 #include "asm/ldt.h"
14 #include "asm/unistd.h"
15 #include "choose-mode.h"
16 #include "kern.h"
17 #include "mode_kern.h"
18 #include "os.h"
20 extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
22 #ifdef CONFIG_MODE_TT
24 static long do_modify_ldt_tt(int func, void __user *ptr,
25 unsigned long bytecount)
27 struct user_desc info;
28 int res = 0;
29 void *buf = NULL;
30 void *p = NULL; /* What we pass to host. */
32 switch(func){
33 case 1:
34 case 0x11: /* write_ldt */
35 /* Do this check now to avoid overflows. */
36 if (bytecount != sizeof(struct user_desc)) {
37 res = -EINVAL;
38 goto out;
41 if(copy_from_user(&info, ptr, sizeof(info))) {
42 res = -EFAULT;
43 goto out;
46 p = &info;
47 break;
48 case 0:
49 case 2: /* read_ldt */
51 /* The use of info avoids kmalloc on the write case, not on the
52 * read one. */
53 buf = kmalloc(bytecount, GFP_KERNEL);
54 if (!buf) {
55 res = -ENOMEM;
56 goto out;
58 p = buf;
59 break;
60 default:
61 res = -ENOSYS;
62 goto out;
65 res = modify_ldt(func, p, bytecount);
66 if(res < 0)
67 goto out;
69 switch(func){
70 case 0:
71 case 2:
72 /* Modify_ldt was for reading and returned the number of read
73 * bytes.*/
74 if(copy_to_user(ptr, p, res))
75 res = -EFAULT;
76 break;
79 out:
80 kfree(buf);
81 return res;
84 #endif
86 #ifdef CONFIG_MODE_SKAS
88 #include "skas.h"
89 #include "skas_ptrace.h"
90 #include "asm/mmu_context.h"
91 #include "proc_mm.h"
93 long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc,
94 void **addr, int done)
96 long res;
98 if(proc_mm){
99 /* This is a special handling for the case, that the mm to
100 * modify isn't current->active_mm.
101 * If this is called directly by modify_ldt,
102 * (current->active_mm->context.skas.u == mm_idp)
103 * will be true. So no call to switch_mm_skas(mm_idp) is done.
104 * If this is called in case of init_new_ldt or PTRACE_LDT,
105 * mm_idp won't belong to current->active_mm, but child->mm.
106 * So we need to switch child's mm into our userspace, then
107 * later switch back.
109 * Note: I'm unsure: should interrupts be disabled here?
111 if(!current->active_mm || current->active_mm == &init_mm ||
112 mm_idp != &current->active_mm->context.skas.id)
113 switch_mm_skas(mm_idp);
116 if(ptrace_ldt) {
117 struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
118 .func = func,
119 .ptr = desc,
120 .bytecount = sizeof(*desc)};
121 u32 cpu;
122 int pid;
124 if(!proc_mm)
125 pid = mm_idp->u.pid;
126 else {
127 cpu = get_cpu();
128 pid = userspace_pid[cpu];
131 res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
133 if(proc_mm)
134 put_cpu();
136 else {
137 void *stub_addr;
138 res = syscall_stub_data(mm_idp, (unsigned long *)desc,
139 (sizeof(*desc) + sizeof(long) - 1) &
140 ~(sizeof(long) - 1),
141 addr, &stub_addr);
142 if(!res){
143 unsigned long args[] = { func,
144 (unsigned long)stub_addr,
145 sizeof(*desc),
146 0, 0, 0 };
147 res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
148 0, addr, done);
152 if(proc_mm){
153 /* This is the second part of special handling, that makes
154 * PTRACE_LDT possible to implement.
156 if(current->active_mm && current->active_mm != &init_mm &&
157 mm_idp != &current->active_mm->context.skas.id)
158 switch_mm_skas(&current->active_mm->context.skas.id);
161 return res;
164 static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
166 int res, n;
167 struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
168 .func = 0,
169 .bytecount = bytecount,
170 .ptr = kmalloc(bytecount, GFP_KERNEL)};
171 u32 cpu;
173 if(ptrace_ldt.ptr == NULL)
174 return -ENOMEM;
176 /* This is called from sys_modify_ldt only, so userspace_pid gives
177 * us the right number
180 cpu = get_cpu();
181 res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
182 put_cpu();
183 if(res < 0)
184 goto out;
186 n = copy_to_user(ptr, ptrace_ldt.ptr, res);
187 if(n != 0)
188 res = -EFAULT;
190 out:
191 kfree(ptrace_ldt.ptr);
193 return res;
197 * In skas mode, we hold our own ldt data in UML.
198 * Thus, the code implementing sys_modify_ldt_skas
199 * is very similar to (and mostly stolen from) sys_modify_ldt
200 * for arch/i386/kernel/ldt.c
201 * The routines copied and modified in part are:
202 * - read_ldt
203 * - read_default_ldt
204 * - write_ldt
205 * - sys_modify_ldt_skas
208 static int read_ldt(void __user * ptr, unsigned long bytecount)
210 int i, err = 0;
211 unsigned long size;
212 uml_ldt_t * ldt = &current->mm->context.skas.ldt;
214 if(!ldt->entry_count)
215 goto out;
216 if(bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
217 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
218 err = bytecount;
220 if(ptrace_ldt){
221 return read_ldt_from_host(ptr, bytecount);
224 down(&ldt->semaphore);
225 if(ldt->entry_count <= LDT_DIRECT_ENTRIES){
226 size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
227 if(size > bytecount)
228 size = bytecount;
229 if(copy_to_user(ptr, ldt->u.entries, size))
230 err = -EFAULT;
231 bytecount -= size;
232 ptr += size;
234 else {
235 for(i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
236 i++){
237 size = PAGE_SIZE;
238 if(size > bytecount)
239 size = bytecount;
240 if(copy_to_user(ptr, ldt->u.pages[i], size)){
241 err = -EFAULT;
242 break;
244 bytecount -= size;
245 ptr += size;
248 up(&ldt->semaphore);
250 if(bytecount == 0 || err == -EFAULT)
251 goto out;
253 if(clear_user(ptr, bytecount))
254 err = -EFAULT;
256 out:
257 return err;
260 static int read_default_ldt(void __user * ptr, unsigned long bytecount)
262 int err;
264 if(bytecount > 5*LDT_ENTRY_SIZE)
265 bytecount = 5*LDT_ENTRY_SIZE;
267 err = bytecount;
268 /* UML doesn't support lcall7 and lcall27.
269 * So, we don't really have a default ldt, but emulate
270 * an empty ldt of common host default ldt size.
272 if(clear_user(ptr, bytecount))
273 err = -EFAULT;
275 return err;
278 static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
280 uml_ldt_t * ldt = &current->mm->context.skas.ldt;
281 struct mm_id * mm_idp = &current->mm->context.skas.id;
282 int i, err;
283 struct user_desc ldt_info;
284 struct ldt_entry entry0, *ldt_p;
285 void *addr = NULL;
287 err = -EINVAL;
288 if(bytecount != sizeof(ldt_info))
289 goto out;
290 err = -EFAULT;
291 if(copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
292 goto out;
294 err = -EINVAL;
295 if(ldt_info.entry_number >= LDT_ENTRIES)
296 goto out;
297 if(ldt_info.contents == 3){
298 if (func == 1)
299 goto out;
300 if (ldt_info.seg_not_present == 0)
301 goto out;
304 if(!ptrace_ldt)
305 down(&ldt->semaphore);
307 err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
308 if(err)
309 goto out_unlock;
310 else if(ptrace_ldt) {
311 /* With PTRACE_LDT available, this is used as a flag only */
312 ldt->entry_count = 1;
313 goto out;
316 if(ldt_info.entry_number >= ldt->entry_count &&
317 ldt_info.entry_number >= LDT_DIRECT_ENTRIES){
318 for(i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
319 i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
320 i++){
321 if(i == 0)
322 memcpy(&entry0, ldt->u.entries,
323 sizeof(entry0));
324 ldt->u.pages[i] = (struct ldt_entry *)
325 __get_free_page(GFP_KERNEL|__GFP_ZERO);
326 if(!ldt->u.pages[i]){
327 err = -ENOMEM;
328 /* Undo the change in host */
329 memset(&ldt_info, 0, sizeof(ldt_info));
330 write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
331 goto out_unlock;
333 if(i == 0) {
334 memcpy(ldt->u.pages[0], &entry0,
335 sizeof(entry0));
336 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
337 sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
339 ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
342 if(ldt->entry_count <= ldt_info.entry_number)
343 ldt->entry_count = ldt_info.entry_number + 1;
345 if(ldt->entry_count <= LDT_DIRECT_ENTRIES)
346 ldt_p = ldt->u.entries + ldt_info.entry_number;
347 else
348 ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
349 ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
351 if(ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
352 (func == 1 || LDT_empty(&ldt_info))){
353 ldt_p->a = 0;
354 ldt_p->b = 0;
356 else{
357 if (func == 1)
358 ldt_info.useable = 0;
359 ldt_p->a = LDT_entry_a(&ldt_info);
360 ldt_p->b = LDT_entry_b(&ldt_info);
362 err = 0;
364 out_unlock:
365 up(&ldt->semaphore);
366 out:
367 return err;
370 static long do_modify_ldt_skas(int func, void __user *ptr,
371 unsigned long bytecount)
373 int ret = -ENOSYS;
375 switch (func) {
376 case 0:
377 ret = read_ldt(ptr, bytecount);
378 break;
379 case 1:
380 case 0x11:
381 ret = write_ldt(ptr, bytecount, func);
382 break;
383 case 2:
384 ret = read_default_ldt(ptr, bytecount);
385 break;
387 return ret;
390 static DEFINE_SPINLOCK(host_ldt_lock);
391 static short dummy_list[9] = {0, -1};
392 static short * host_ldt_entries = NULL;
394 static void ldt_get_host_info(void)
396 long ret;
397 struct ldt_entry * ldt;
398 short *tmp;
399 int i, size, k, order;
401 spin_lock(&host_ldt_lock);
403 if(host_ldt_entries != NULL){
404 spin_unlock(&host_ldt_lock);
405 return;
407 host_ldt_entries = dummy_list+1;
409 spin_unlock(&host_ldt_lock);
411 for(i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++);
413 ldt = (struct ldt_entry *)
414 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
415 if(ldt == NULL) {
416 printk("ldt_get_host_info: couldn't allocate buffer for host "
417 "ldt\n");
418 return;
421 ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
422 if(ret < 0) {
423 printk("ldt_get_host_info: couldn't read host ldt\n");
424 goto out_free;
426 if(ret == 0) {
427 /* default_ldt is active, simply write an empty entry 0 */
428 host_ldt_entries = dummy_list;
429 goto out_free;
432 for(i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++){
433 if(ldt[i].a != 0 || ldt[i].b != 0)
434 size++;
437 if(size < ARRAY_SIZE(dummy_list))
438 host_ldt_entries = dummy_list;
439 else {
440 size = (size + 1) * sizeof(dummy_list[0]);
441 tmp = kmalloc(size, GFP_KERNEL);
442 if(tmp == NULL) {
443 printk("ldt_get_host_info: couldn't allocate host ldt "
444 "list\n");
445 goto out_free;
447 host_ldt_entries = tmp;
450 for(i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++){
451 if(ldt[i].a != 0 || ldt[i].b != 0) {
452 host_ldt_entries[k++] = i;
455 host_ldt_entries[k] = -1;
457 out_free:
458 free_pages((unsigned long)ldt, order);
461 long init_new_ldt(struct mmu_context_skas * new_mm,
462 struct mmu_context_skas * from_mm)
464 struct user_desc desc;
465 short * num_p;
466 int i;
467 long page, err=0;
468 void *addr = NULL;
469 struct proc_mm_op copy;
472 if(!ptrace_ldt)
473 init_MUTEX(&new_mm->ldt.semaphore);
475 if(!from_mm){
476 memset(&desc, 0, sizeof(desc));
478 * We have to initialize a clean ldt.
480 if(proc_mm) {
482 * If the new mm was created using proc_mm, host's
483 * default-ldt currently is assigned, which normally
484 * contains the call-gates for lcall7 and lcall27.
485 * To remove these gates, we simply write an empty
486 * entry as number 0 to the host.
488 err = write_ldt_entry(&new_mm->id, 1, &desc,
489 &addr, 1);
491 else{
493 * Now we try to retrieve info about the ldt, we
494 * inherited from the host. All ldt-entries found
495 * will be reset in the following loop
497 ldt_get_host_info();
498 for(num_p=host_ldt_entries; *num_p != -1; num_p++){
499 desc.entry_number = *num_p;
500 err = write_ldt_entry(&new_mm->id, 1, &desc,
501 &addr, *(num_p + 1) == -1);
502 if(err)
503 break;
506 new_mm->ldt.entry_count = 0;
508 goto out;
511 if(proc_mm){
512 /* We have a valid from_mm, so we now have to copy the LDT of
513 * from_mm to new_mm, because using proc_mm an new mm with
514 * an empty/default LDT was created in new_mm()
516 copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS,
517 .u =
518 { .copy_segments =
519 from_mm->id.u.mm_fd } } );
520 i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
521 if(i != sizeof(copy))
522 printk("new_mm : /proc/mm copy_segments failed, "
523 "err = %d\n", -i);
526 if(!ptrace_ldt) {
527 /* Our local LDT is used to supply the data for
528 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
529 * i.e., we have to use the stub for modify_ldt, which
530 * can't handle the big read buffer of up to 64kB.
532 down(&from_mm->ldt.semaphore);
533 if(from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES){
534 memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries,
535 sizeof(new_mm->ldt.u.entries));
537 else{
538 i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
539 while(i-->0){
540 page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
541 if (!page){
542 err = -ENOMEM;
543 break;
545 new_mm->ldt.u.pages[i] =
546 (struct ldt_entry *) page;
547 memcpy(new_mm->ldt.u.pages[i],
548 from_mm->ldt.u.pages[i], PAGE_SIZE);
551 new_mm->ldt.entry_count = from_mm->ldt.entry_count;
552 up(&from_mm->ldt.semaphore);
555 out:
556 return err;
560 void free_ldt(struct mmu_context_skas * mm)
562 int i;
564 if(!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES){
565 i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
566 while(i-- > 0){
567 free_page((long )mm->ldt.u.pages[i]);
570 mm->ldt.entry_count = 0;
572 #endif
574 int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
576 return CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func,
577 ptr, bytecount);