2 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
3 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
4 * Copyright (C) 2002 Andi Kleen
6 * This handles calls from both 32bit and 64bit mode.
9 #include <linux/errno.h>
10 #include <linux/gfp.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
14 #include <linux/smp.h>
15 #include <linux/vmalloc.h>
16 #include <linux/uaccess.h>
18 #include <asm/system.h>
21 #include <asm/mmu_context.h>
22 #include <asm/syscalls.h>
25 static void flush_ldt(void *current_mm
)
27 if (current
->active_mm
== current_mm
)
28 load_LDT(¤t
->active_mm
->context
);
32 static int alloc_ldt(mm_context_t
*pc
, int mincount
, int reload
)
34 void *oldldt
, *newldt
;
37 if (mincount
<= pc
->size
)
40 mincount
= (mincount
+ (PAGE_SIZE
/ LDT_ENTRY_SIZE
- 1)) &
41 (~(PAGE_SIZE
/ LDT_ENTRY_SIZE
- 1));
42 if (mincount
* LDT_ENTRY_SIZE
> PAGE_SIZE
)
43 newldt
= vmalloc(mincount
* LDT_ENTRY_SIZE
);
45 newldt
= (void *)__get_free_page(GFP_KERNEL
);
51 memcpy(newldt
, pc
->ldt
, oldsize
* LDT_ENTRY_SIZE
);
53 memset(newldt
+ oldsize
* LDT_ENTRY_SIZE
, 0,
54 (mincount
- oldsize
) * LDT_ENTRY_SIZE
);
56 paravirt_alloc_ldt(newldt
, mincount
);
59 /* CHECKME: Do we really need this ? */
71 if (!cpumask_equal(mm_cpumask(current
->mm
),
72 cpumask_of(smp_processor_id())))
73 smp_call_function(flush_ldt
, current
->mm
, 1);
80 paravirt_free_ldt(oldldt
, oldsize
);
81 if (oldsize
* LDT_ENTRY_SIZE
> PAGE_SIZE
)
84 put_page(virt_to_page(oldldt
));
89 static inline int copy_ldt(mm_context_t
*new, mm_context_t
*old
)
91 int err
= alloc_ldt(new, old
->size
, 0);
97 for (i
= 0; i
< old
->size
; i
++)
98 write_ldt_entry(new->ldt
, i
, old
->ldt
+ i
* LDT_ENTRY_SIZE
);
103 * we do not have to muck with descriptors here, that is
104 * done in switch_mm() as needed.
106 int init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
108 struct mm_struct
*old_mm
;
111 mutex_init(&mm
->context
.lock
);
112 mm
->context
.size
= 0;
113 old_mm
= current
->mm
;
114 if (old_mm
&& old_mm
->context
.size
> 0) {
115 mutex_lock(&old_mm
->context
.lock
);
116 retval
= copy_ldt(&mm
->context
, &old_mm
->context
);
117 mutex_unlock(&old_mm
->context
.lock
);
123 * No need to lock the MM as we are the last user
125 * 64bit: Don't touch the LDT register - we're already in the next thread.
127 void destroy_context(struct mm_struct
*mm
)
129 if (mm
->context
.size
) {
131 /* CHECKME: Can this ever happen ? */
132 if (mm
== current
->active_mm
)
135 paravirt_free_ldt(mm
->context
.ldt
, mm
->context
.size
);
136 if (mm
->context
.size
* LDT_ENTRY_SIZE
> PAGE_SIZE
)
137 vfree(mm
->context
.ldt
);
139 put_page(virt_to_page(mm
->context
.ldt
));
140 mm
->context
.size
= 0;
144 static int read_ldt(void __user
*ptr
, unsigned long bytecount
)
148 struct mm_struct
*mm
= current
->mm
;
150 if (!mm
->context
.size
)
152 if (bytecount
> LDT_ENTRY_SIZE
* LDT_ENTRIES
)
153 bytecount
= LDT_ENTRY_SIZE
* LDT_ENTRIES
;
155 mutex_lock(&mm
->context
.lock
);
156 size
= mm
->context
.size
* LDT_ENTRY_SIZE
;
157 if (size
> bytecount
)
161 if (copy_to_user(ptr
, mm
->context
.ldt
, size
))
163 mutex_unlock(&mm
->context
.lock
);
166 if (size
!= bytecount
) {
167 /* zero-fill the rest */
168 if (clear_user(ptr
+ size
, bytecount
- size
) != 0) {
178 static int read_default_ldt(void __user
*ptr
, unsigned long bytecount
)
180 /* CHECKME: Can we use _one_ random number ? */
182 unsigned long size
= 5 * sizeof(struct desc_struct
);
184 unsigned long size
= 128;
186 if (bytecount
> size
)
188 if (clear_user(ptr
, bytecount
))
193 static int write_ldt(void __user
*ptr
, unsigned long bytecount
, int oldmode
)
195 struct mm_struct
*mm
= current
->mm
;
196 struct desc_struct ldt
;
198 struct user_desc ldt_info
;
201 if (bytecount
!= sizeof(ldt_info
))
204 if (copy_from_user(&ldt_info
, ptr
, sizeof(ldt_info
)))
208 if (ldt_info
.entry_number
>= LDT_ENTRIES
)
210 if (ldt_info
.contents
== 3) {
213 if (ldt_info
.seg_not_present
== 0)
217 mutex_lock(&mm
->context
.lock
);
218 if (ldt_info
.entry_number
>= mm
->context
.size
) {
219 error
= alloc_ldt(¤t
->mm
->context
,
220 ldt_info
.entry_number
+ 1, 1);
225 /* Allow LDTs to be cleared by the user. */
226 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
227 if (oldmode
|| LDT_empty(&ldt_info
)) {
228 memset(&ldt
, 0, sizeof(ldt
));
233 fill_ldt(&ldt
, &ldt_info
);
237 /* Install the new entry ... */
239 write_ldt_entry(mm
->context
.ldt
, ldt_info
.entry_number
, &ldt
);
243 mutex_unlock(&mm
->context
.lock
);
248 asmlinkage
int sys_modify_ldt(int func
, void __user
*ptr
,
249 unsigned long bytecount
)
255 ret
= read_ldt(ptr
, bytecount
);
258 ret
= write_ldt(ptr
, bytecount
, 1);
261 ret
= read_default_ldt(ptr
, bytecount
);
264 ret
= write_ldt(ptr
, bytecount
, 0);