2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <asm/unistd.h>
13 #include <skas_ptrace.h>
14 #include <sysdep/tls.h>
16 extern int modify_ldt(int func
, void *ptr
, unsigned long bytecount
);
18 static long write_ldt_entry(struct mm_id
*mm_idp
, int func
,
19 struct user_desc
*desc
, void **addr
, int done
)
25 * This is a special handling for the case, that the mm to
26 * modify isn't current->active_mm.
27 * If this is called directly by modify_ldt,
28 * (current->active_mm->context.skas.u == mm_idp)
29 * will be true. So no call to __switch_mm(mm_idp) is done.
30 * If this is called in case of init_new_ldt or PTRACE_LDT,
31 * mm_idp won't belong to current->active_mm, but child->mm.
32 * So we need to switch child's mm into our userspace, then
35 * Note: I'm unsure: should interrupts be disabled here?
37 if (!current
->active_mm
|| current
->active_mm
== &init_mm
||
38 mm_idp
!= ¤t
->active_mm
->context
.id
)
43 struct ptrace_ldt ldt_op
= (struct ptrace_ldt
) {
46 .bytecount
= sizeof(*desc
)};
54 pid
= userspace_pid
[cpu
];
57 res
= os_ptrace_ldt(pid
, 0, (unsigned long) &ldt_op
);
64 res
= syscall_stub_data(mm_idp
, (unsigned long *)desc
,
65 (sizeof(*desc
) + sizeof(long) - 1) &
69 unsigned long args
[] = { func
,
70 (unsigned long)stub_addr
,
73 res
= run_syscall_stub(mm_idp
, __NR_modify_ldt
, args
,
80 * This is the second part of special handling, that makes
81 * PTRACE_LDT possible to implement.
83 if (current
->active_mm
&& current
->active_mm
!= &init_mm
&&
84 mm_idp
!= ¤t
->active_mm
->context
.id
)
85 __switch_mm(¤t
->active_mm
->context
.id
);
91 static long read_ldt_from_host(void __user
* ptr
, unsigned long bytecount
)
94 struct ptrace_ldt ptrace_ldt
= (struct ptrace_ldt
) {
96 .bytecount
= bytecount
,
97 .ptr
= kmalloc(bytecount
, GFP_KERNEL
)};
100 if (ptrace_ldt
.ptr
== NULL
)
104 * This is called from sys_modify_ldt only, so userspace_pid gives
105 * us the right number
109 res
= os_ptrace_ldt(userspace_pid
[cpu
], 0, (unsigned long) &ptrace_ldt
);
114 n
= copy_to_user(ptr
, ptrace_ldt
.ptr
, res
);
119 kfree(ptrace_ldt
.ptr
);
125 * In skas mode, we hold our own ldt data in UML.
126 * Thus, the code implementing sys_modify_ldt_skas
127 * is very similar to (and mostly stolen from) sys_modify_ldt
128 * for arch/i386/kernel/ldt.c
129 * The routines copied and modified in part are:
133 * - sys_modify_ldt_skas
136 static int read_ldt(void __user
* ptr
, unsigned long bytecount
)
140 uml_ldt_t
*ldt
= ¤t
->mm
->context
.arch
.ldt
;
142 if (!ldt
->entry_count
)
144 if (bytecount
> LDT_ENTRY_SIZE
*LDT_ENTRIES
)
145 bytecount
= LDT_ENTRY_SIZE
*LDT_ENTRIES
;
149 return read_ldt_from_host(ptr
, bytecount
);
151 mutex_lock(&ldt
->lock
);
152 if (ldt
->entry_count
<= LDT_DIRECT_ENTRIES
) {
153 size
= LDT_ENTRY_SIZE
*LDT_DIRECT_ENTRIES
;
154 if (size
> bytecount
)
156 if (copy_to_user(ptr
, ldt
->u
.entries
, size
))
162 for (i
=0; i
<ldt
->entry_count
/LDT_ENTRIES_PER_PAGE
&& bytecount
;
165 if (size
> bytecount
)
167 if (copy_to_user(ptr
, ldt
->u
.pages
[i
], size
)) {
175 mutex_unlock(&ldt
->lock
);
177 if (bytecount
== 0 || err
== -EFAULT
)
180 if (clear_user(ptr
, bytecount
))
187 static int read_default_ldt(void __user
* ptr
, unsigned long bytecount
)
191 if (bytecount
> 5*LDT_ENTRY_SIZE
)
192 bytecount
= 5*LDT_ENTRY_SIZE
;
196 * UML doesn't support lcall7 and lcall27.
197 * So, we don't really have a default ldt, but emulate
198 * an empty ldt of common host default ldt size.
200 if (clear_user(ptr
, bytecount
))
206 static int write_ldt(void __user
* ptr
, unsigned long bytecount
, int func
)
208 uml_ldt_t
*ldt
= ¤t
->mm
->context
.arch
.ldt
;
209 struct mm_id
* mm_idp
= ¤t
->mm
->context
.id
;
211 struct user_desc ldt_info
;
212 struct ldt_entry entry0
, *ldt_p
;
216 if (bytecount
!= sizeof(ldt_info
))
219 if (copy_from_user(&ldt_info
, ptr
, sizeof(ldt_info
)))
223 if (ldt_info
.entry_number
>= LDT_ENTRIES
)
225 if (ldt_info
.contents
== 3) {
228 if (ldt_info
.seg_not_present
== 0)
233 mutex_lock(&ldt
->lock
);
235 err
= write_ldt_entry(mm_idp
, func
, &ldt_info
, &addr
, 1);
238 else if (ptrace_ldt
) {
239 /* With PTRACE_LDT available, this is used as a flag only */
240 ldt
->entry_count
= 1;
244 if (ldt_info
.entry_number
>= ldt
->entry_count
&&
245 ldt_info
.entry_number
>= LDT_DIRECT_ENTRIES
) {
246 for (i
=ldt
->entry_count
/LDT_ENTRIES_PER_PAGE
;
247 i
*LDT_ENTRIES_PER_PAGE
<= ldt_info
.entry_number
;
250 memcpy(&entry0
, ldt
->u
.entries
,
252 ldt
->u
.pages
[i
] = (struct ldt_entry
*)
253 __get_free_page(GFP_KERNEL
|__GFP_ZERO
);
254 if (!ldt
->u
.pages
[i
]) {
256 /* Undo the change in host */
257 memset(&ldt_info
, 0, sizeof(ldt_info
));
258 write_ldt_entry(mm_idp
, 1, &ldt_info
, &addr
, 1);
262 memcpy(ldt
->u
.pages
[0], &entry0
,
264 memcpy(ldt
->u
.pages
[0]+1, ldt
->u
.entries
+1,
265 sizeof(entry0
)*(LDT_DIRECT_ENTRIES
-1));
267 ldt
->entry_count
= (i
+ 1) * LDT_ENTRIES_PER_PAGE
;
270 if (ldt
->entry_count
<= ldt_info
.entry_number
)
271 ldt
->entry_count
= ldt_info
.entry_number
+ 1;
273 if (ldt
->entry_count
<= LDT_DIRECT_ENTRIES
)
274 ldt_p
= ldt
->u
.entries
+ ldt_info
.entry_number
;
276 ldt_p
= ldt
->u
.pages
[ldt_info
.entry_number
/LDT_ENTRIES_PER_PAGE
] +
277 ldt_info
.entry_number
%LDT_ENTRIES_PER_PAGE
;
279 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0 &&
280 (func
== 1 || LDT_empty(&ldt_info
))) {
286 ldt_info
.useable
= 0;
287 ldt_p
->a
= LDT_entry_a(&ldt_info
);
288 ldt_p
->b
= LDT_entry_b(&ldt_info
);
293 mutex_unlock(&ldt
->lock
);
298 static long do_modify_ldt_skas(int func
, void __user
*ptr
,
299 unsigned long bytecount
)
305 ret
= read_ldt(ptr
, bytecount
);
309 ret
= write_ldt(ptr
, bytecount
, func
);
312 ret
= read_default_ldt(ptr
, bytecount
);
318 static DEFINE_SPINLOCK(host_ldt_lock
);
319 static short dummy_list
[9] = {0, -1};
320 static short * host_ldt_entries
= NULL
;
322 static void ldt_get_host_info(void)
325 struct ldt_entry
* ldt
;
327 int i
, size
, k
, order
;
329 spin_lock(&host_ldt_lock
);
331 if (host_ldt_entries
!= NULL
) {
332 spin_unlock(&host_ldt_lock
);
335 host_ldt_entries
= dummy_list
+1;
337 spin_unlock(&host_ldt_lock
);
339 for (i
= LDT_PAGES_MAX
-1, order
=0; i
; i
>>=1, order
++)
342 ldt
= (struct ldt_entry
*)
343 __get_free_pages(GFP_KERNEL
|__GFP_ZERO
, order
);
345 printk(KERN_ERR
"ldt_get_host_info: couldn't allocate buffer "
350 ret
= modify_ldt(0, ldt
, (1<<order
)*PAGE_SIZE
);
352 printk(KERN_ERR
"ldt_get_host_info: couldn't read host ldt\n");
356 /* default_ldt is active, simply write an empty entry 0 */
357 host_ldt_entries
= dummy_list
;
361 for (i
=0, size
=0; i
<ret
/LDT_ENTRY_SIZE
; i
++) {
362 if (ldt
[i
].a
!= 0 || ldt
[i
].b
!= 0)
366 if (size
< ARRAY_SIZE(dummy_list
))
367 host_ldt_entries
= dummy_list
;
369 size
= (size
+ 1) * sizeof(dummy_list
[0]);
370 tmp
= kmalloc(size
, GFP_KERNEL
);
372 printk(KERN_ERR
"ldt_get_host_info: couldn't allocate "
376 host_ldt_entries
= tmp
;
379 for (i
=0, k
=0; i
<ret
/LDT_ENTRY_SIZE
; i
++) {
380 if (ldt
[i
].a
!= 0 || ldt
[i
].b
!= 0)
381 host_ldt_entries
[k
++] = i
;
383 host_ldt_entries
[k
] = -1;
386 free_pages((unsigned long)ldt
, order
);
389 long init_new_ldt(struct mm_context
*new_mm
, struct mm_context
*from_mm
)
391 struct user_desc desc
;
396 struct proc_mm_op copy
;
400 mutex_init(&new_mm
->arch
.ldt
.lock
);
403 memset(&desc
, 0, sizeof(desc
));
405 * We have to initialize a clean ldt.
409 * If the new mm was created using proc_mm, host's
410 * default-ldt currently is assigned, which normally
411 * contains the call-gates for lcall7 and lcall27.
412 * To remove these gates, we simply write an empty
413 * entry as number 0 to the host.
415 err
= write_ldt_entry(&new_mm
->id
, 1, &desc
, &addr
, 1);
419 * Now we try to retrieve info about the ldt, we
420 * inherited from the host. All ldt-entries found
421 * will be reset in the following loop
424 for (num_p
=host_ldt_entries
; *num_p
!= -1; num_p
++) {
425 desc
.entry_number
= *num_p
;
426 err
= write_ldt_entry(&new_mm
->id
, 1, &desc
,
427 &addr
, *(num_p
+ 1) == -1);
432 new_mm
->arch
.ldt
.entry_count
= 0;
439 * We have a valid from_mm, so we now have to copy the LDT of
440 * from_mm to new_mm, because using proc_mm an new mm with
441 * an empty/default LDT was created in new_mm()
443 copy
= ((struct proc_mm_op
) { .op
= MM_COPY_SEGMENTS
,
446 from_mm
->id
.u
.mm_fd
} } );
447 i
= os_write_file(new_mm
->id
.u
.mm_fd
, ©
, sizeof(copy
));
448 if (i
!= sizeof(copy
))
449 printk(KERN_ERR
"new_mm : /proc/mm copy_segments "
450 "failed, err = %d\n", -i
);
455 * Our local LDT is used to supply the data for
456 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
457 * i.e., we have to use the stub for modify_ldt, which
458 * can't handle the big read buffer of up to 64kB.
460 mutex_lock(&from_mm
->arch
.ldt
.lock
);
461 if (from_mm
->arch
.ldt
.entry_count
<= LDT_DIRECT_ENTRIES
)
462 memcpy(new_mm
->arch
.ldt
.u
.entries
, from_mm
->arch
.ldt
.u
.entries
,
463 sizeof(new_mm
->arch
.ldt
.u
.entries
));
465 i
= from_mm
->arch
.ldt
.entry_count
/ LDT_ENTRIES_PER_PAGE
;
467 page
= __get_free_page(GFP_KERNEL
|__GFP_ZERO
);
472 new_mm
->arch
.ldt
.u
.pages
[i
] =
473 (struct ldt_entry
*) page
;
474 memcpy(new_mm
->arch
.ldt
.u
.pages
[i
],
475 from_mm
->arch
.ldt
.u
.pages
[i
], PAGE_SIZE
);
478 new_mm
->arch
.ldt
.entry_count
= from_mm
->arch
.ldt
.entry_count
;
479 mutex_unlock(&from_mm
->arch
.ldt
.lock
);
487 void free_ldt(struct mm_context
*mm
)
491 if (!ptrace_ldt
&& mm
->arch
.ldt
.entry_count
> LDT_DIRECT_ENTRIES
) {
492 i
= mm
->arch
.ldt
.entry_count
/ LDT_ENTRIES_PER_PAGE
;
494 free_page((long) mm
->arch
.ldt
.u
.pages
[i
]);
496 mm
->arch
.ldt
.entry_count
= 0;
499 int sys_modify_ldt(int func
, void __user
*ptr
, unsigned long bytecount
)
501 return do_modify_ldt_skas(func
, ptr
, bytecount
);