2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <asm/unistd.h>
12 #include <sysdep/tls.h>
14 extern int modify_ldt(int func
, void *ptr
, unsigned long bytecount
);
16 static long write_ldt_entry(struct mm_id
*mm_idp
, int func
,
17 struct user_desc
*desc
, void **addr
, int done
)
21 res
= syscall_stub_data(mm_idp
, (unsigned long *)desc
,
22 (sizeof(*desc
) + sizeof(long) - 1) &
26 unsigned long args
[] = { func
,
27 (unsigned long)stub_addr
,
30 res
= run_syscall_stub(mm_idp
, __NR_modify_ldt
, args
,
38 * In skas mode, we hold our own ldt data in UML.
39 * Thus, the code implementing sys_modify_ldt_skas
40 * is very similar to (and mostly stolen from) sys_modify_ldt
41 * for arch/i386/kernel/ldt.c
42 * The routines copied and modified in part are:
46 * - sys_modify_ldt_skas
49 static int read_ldt(void __user
* ptr
, unsigned long bytecount
)
53 uml_ldt_t
*ldt
= ¤t
->mm
->context
.arch
.ldt
;
55 if (!ldt
->entry_count
)
57 if (bytecount
> LDT_ENTRY_SIZE
*LDT_ENTRIES
)
58 bytecount
= LDT_ENTRY_SIZE
*LDT_ENTRIES
;
61 mutex_lock(&ldt
->lock
);
62 if (ldt
->entry_count
<= LDT_DIRECT_ENTRIES
) {
63 size
= LDT_ENTRY_SIZE
*LDT_DIRECT_ENTRIES
;
66 if (copy_to_user(ptr
, ldt
->u
.entries
, size
))
72 for (i
=0; i
<ldt
->entry_count
/LDT_ENTRIES_PER_PAGE
&& bytecount
;
77 if (copy_to_user(ptr
, ldt
->u
.pages
[i
], size
)) {
85 mutex_unlock(&ldt
->lock
);
87 if (bytecount
== 0 || err
== -EFAULT
)
90 if (clear_user(ptr
, bytecount
))
97 static int read_default_ldt(void __user
* ptr
, unsigned long bytecount
)
101 if (bytecount
> 5*LDT_ENTRY_SIZE
)
102 bytecount
= 5*LDT_ENTRY_SIZE
;
106 * UML doesn't support lcall7 and lcall27.
107 * So, we don't really have a default ldt, but emulate
108 * an empty ldt of common host default ldt size.
110 if (clear_user(ptr
, bytecount
))
116 static int write_ldt(void __user
* ptr
, unsigned long bytecount
, int func
)
118 uml_ldt_t
*ldt
= ¤t
->mm
->context
.arch
.ldt
;
119 struct mm_id
* mm_idp
= ¤t
->mm
->context
.id
;
121 struct user_desc ldt_info
;
122 struct ldt_entry entry0
, *ldt_p
;
126 if (bytecount
!= sizeof(ldt_info
))
129 if (copy_from_user(&ldt_info
, ptr
, sizeof(ldt_info
)))
133 if (ldt_info
.entry_number
>= LDT_ENTRIES
)
135 if (ldt_info
.contents
== 3) {
138 if (ldt_info
.seg_not_present
== 0)
142 mutex_lock(&ldt
->lock
);
144 err
= write_ldt_entry(mm_idp
, func
, &ldt_info
, &addr
, 1);
148 if (ldt_info
.entry_number
>= ldt
->entry_count
&&
149 ldt_info
.entry_number
>= LDT_DIRECT_ENTRIES
) {
150 for (i
=ldt
->entry_count
/LDT_ENTRIES_PER_PAGE
;
151 i
*LDT_ENTRIES_PER_PAGE
<= ldt_info
.entry_number
;
154 memcpy(&entry0
, ldt
->u
.entries
,
156 ldt
->u
.pages
[i
] = (struct ldt_entry
*)
157 __get_free_page(GFP_KERNEL
|__GFP_ZERO
);
158 if (!ldt
->u
.pages
[i
]) {
160 /* Undo the change in host */
161 memset(&ldt_info
, 0, sizeof(ldt_info
));
162 write_ldt_entry(mm_idp
, 1, &ldt_info
, &addr
, 1);
166 memcpy(ldt
->u
.pages
[0], &entry0
,
168 memcpy(ldt
->u
.pages
[0]+1, ldt
->u
.entries
+1,
169 sizeof(entry0
)*(LDT_DIRECT_ENTRIES
-1));
171 ldt
->entry_count
= (i
+ 1) * LDT_ENTRIES_PER_PAGE
;
174 if (ldt
->entry_count
<= ldt_info
.entry_number
)
175 ldt
->entry_count
= ldt_info
.entry_number
+ 1;
177 if (ldt
->entry_count
<= LDT_DIRECT_ENTRIES
)
178 ldt_p
= ldt
->u
.entries
+ ldt_info
.entry_number
;
180 ldt_p
= ldt
->u
.pages
[ldt_info
.entry_number
/LDT_ENTRIES_PER_PAGE
] +
181 ldt_info
.entry_number
%LDT_ENTRIES_PER_PAGE
;
183 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0 &&
184 (func
== 1 || LDT_empty(&ldt_info
))) {
190 ldt_info
.useable
= 0;
191 ldt_p
->a
= LDT_entry_a(&ldt_info
);
192 ldt_p
->b
= LDT_entry_b(&ldt_info
);
197 mutex_unlock(&ldt
->lock
);
202 static long do_modify_ldt_skas(int func
, void __user
*ptr
,
203 unsigned long bytecount
)
209 ret
= read_ldt(ptr
, bytecount
);
213 ret
= write_ldt(ptr
, bytecount
, func
);
216 ret
= read_default_ldt(ptr
, bytecount
);
222 static DEFINE_SPINLOCK(host_ldt_lock
);
223 static short dummy_list
[9] = {0, -1};
224 static short * host_ldt_entries
= NULL
;
226 static void ldt_get_host_info(void)
229 struct ldt_entry
* ldt
;
231 int i
, size
, k
, order
;
233 spin_lock(&host_ldt_lock
);
235 if (host_ldt_entries
!= NULL
) {
236 spin_unlock(&host_ldt_lock
);
239 host_ldt_entries
= dummy_list
+1;
241 spin_unlock(&host_ldt_lock
);
243 for (i
= LDT_PAGES_MAX
-1, order
=0; i
; i
>>=1, order
++)
246 ldt
= (struct ldt_entry
*)
247 __get_free_pages(GFP_KERNEL
|__GFP_ZERO
, order
);
249 printk(KERN_ERR
"ldt_get_host_info: couldn't allocate buffer "
254 ret
= modify_ldt(0, ldt
, (1<<order
)*PAGE_SIZE
);
256 printk(KERN_ERR
"ldt_get_host_info: couldn't read host ldt\n");
260 /* default_ldt is active, simply write an empty entry 0 */
261 host_ldt_entries
= dummy_list
;
265 for (i
=0, size
=0; i
<ret
/LDT_ENTRY_SIZE
; i
++) {
266 if (ldt
[i
].a
!= 0 || ldt
[i
].b
!= 0)
270 if (size
< ARRAY_SIZE(dummy_list
))
271 host_ldt_entries
= dummy_list
;
273 size
= (size
+ 1) * sizeof(dummy_list
[0]);
274 tmp
= kmalloc(size
, GFP_KERNEL
);
276 printk(KERN_ERR
"ldt_get_host_info: couldn't allocate "
280 host_ldt_entries
= tmp
;
283 for (i
=0, k
=0; i
<ret
/LDT_ENTRY_SIZE
; i
++) {
284 if (ldt
[i
].a
!= 0 || ldt
[i
].b
!= 0)
285 host_ldt_entries
[k
++] = i
;
287 host_ldt_entries
[k
] = -1;
290 free_pages((unsigned long)ldt
, order
);
293 long init_new_ldt(struct mm_context
*new_mm
, struct mm_context
*from_mm
)
295 struct user_desc desc
;
302 mutex_init(&new_mm
->arch
.ldt
.lock
);
305 memset(&desc
, 0, sizeof(desc
));
307 * Now we try to retrieve info about the ldt, we
308 * inherited from the host. All ldt-entries found
309 * will be reset in the following loop
312 for (num_p
=host_ldt_entries
; *num_p
!= -1; num_p
++) {
313 desc
.entry_number
= *num_p
;
314 err
= write_ldt_entry(&new_mm
->id
, 1, &desc
,
315 &addr
, *(num_p
+ 1) == -1);
319 new_mm
->arch
.ldt
.entry_count
= 0;
325 * Our local LDT is used to supply the data for
326 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
327 * i.e., we have to use the stub for modify_ldt, which
328 * can't handle the big read buffer of up to 64kB.
330 mutex_lock(&from_mm
->arch
.ldt
.lock
);
331 if (from_mm
->arch
.ldt
.entry_count
<= LDT_DIRECT_ENTRIES
)
332 memcpy(new_mm
->arch
.ldt
.u
.entries
, from_mm
->arch
.ldt
.u
.entries
,
333 sizeof(new_mm
->arch
.ldt
.u
.entries
));
335 i
= from_mm
->arch
.ldt
.entry_count
/ LDT_ENTRIES_PER_PAGE
;
337 page
= __get_free_page(GFP_KERNEL
|__GFP_ZERO
);
342 new_mm
->arch
.ldt
.u
.pages
[i
] =
343 (struct ldt_entry
*) page
;
344 memcpy(new_mm
->arch
.ldt
.u
.pages
[i
],
345 from_mm
->arch
.ldt
.u
.pages
[i
], PAGE_SIZE
);
348 new_mm
->arch
.ldt
.entry_count
= from_mm
->arch
.ldt
.entry_count
;
349 mutex_unlock(&from_mm
->arch
.ldt
.lock
);
356 void free_ldt(struct mm_context
*mm
)
360 if (mm
->arch
.ldt
.entry_count
> LDT_DIRECT_ENTRIES
) {
361 i
= mm
->arch
.ldt
.entry_count
/ LDT_ENTRIES_PER_PAGE
;
363 free_page((long) mm
->arch
.ldt
.u
.pages
[i
]);
365 mm
->arch
.ldt
.entry_count
= 0;
368 int sys_modify_ldt(int func
, void __user
*ptr
, unsigned long bytecount
)
370 return do_modify_ldt_skas(func
, ptr
, bytecount
);