2 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
3 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
4 * Copyright (C) 2002 Andi Kleen
6 * This handles calls from both 32bit and 64bit mode.
9 #include <linux/errno.h>
10 #include <linux/gfp.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
14 #include <linux/smp.h>
15 #include <linux/vmalloc.h>
16 #include <linux/uaccess.h>
20 #include <asm/mmu_context.h>
21 #include <asm/syscalls.h>
24 static void flush_ldt(void *current_mm
)
26 if (current
->active_mm
== current_mm
)
27 load_LDT(¤t
->active_mm
->context
);
31 static int alloc_ldt(mm_context_t
*pc
, int mincount
, int reload
)
33 void *oldldt
, *newldt
;
36 if (mincount
<= pc
->size
)
39 mincount
= (mincount
+ (PAGE_SIZE
/ LDT_ENTRY_SIZE
- 1)) &
40 (~(PAGE_SIZE
/ LDT_ENTRY_SIZE
- 1));
41 if (mincount
* LDT_ENTRY_SIZE
> PAGE_SIZE
)
42 newldt
= vmalloc(mincount
* LDT_ENTRY_SIZE
);
44 newldt
= (void *)__get_free_page(GFP_KERNEL
);
50 memcpy(newldt
, pc
->ldt
, oldsize
* LDT_ENTRY_SIZE
);
52 memset(newldt
+ oldsize
* LDT_ENTRY_SIZE
, 0,
53 (mincount
- oldsize
) * LDT_ENTRY_SIZE
);
55 paravirt_alloc_ldt(newldt
, mincount
);
58 /* CHECKME: Do we really need this ? */
70 if (!cpumask_equal(mm_cpumask(current
->mm
),
71 cpumask_of(smp_processor_id())))
72 smp_call_function(flush_ldt
, current
->mm
, 1);
79 paravirt_free_ldt(oldldt
, oldsize
);
80 if (oldsize
* LDT_ENTRY_SIZE
> PAGE_SIZE
)
83 put_page(virt_to_page(oldldt
));
88 static inline int copy_ldt(mm_context_t
*new, mm_context_t
*old
)
90 int err
= alloc_ldt(new, old
->size
, 0);
96 for (i
= 0; i
< old
->size
; i
++)
97 write_ldt_entry(new->ldt
, i
, old
->ldt
+ i
* LDT_ENTRY_SIZE
);
102 * we do not have to muck with descriptors here, that is
103 * done in switch_mm() as needed.
105 int init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
107 struct mm_struct
*old_mm
;
110 mutex_init(&mm
->context
.lock
);
111 mm
->context
.size
= 0;
112 old_mm
= current
->mm
;
113 if (old_mm
&& old_mm
->context
.size
> 0) {
114 mutex_lock(&old_mm
->context
.lock
);
115 retval
= copy_ldt(&mm
->context
, &old_mm
->context
);
116 mutex_unlock(&old_mm
->context
.lock
);
122 * No need to lock the MM as we are the last user
124 * 64bit: Don't touch the LDT register - we're already in the next thread.
126 void destroy_context(struct mm_struct
*mm
)
128 if (mm
->context
.size
) {
130 /* CHECKME: Can this ever happen ? */
131 if (mm
== current
->active_mm
)
134 paravirt_free_ldt(mm
->context
.ldt
, mm
->context
.size
);
135 if (mm
->context
.size
* LDT_ENTRY_SIZE
> PAGE_SIZE
)
136 vfree(mm
->context
.ldt
);
138 put_page(virt_to_page(mm
->context
.ldt
));
139 mm
->context
.size
= 0;
143 static int read_ldt(void __user
*ptr
, unsigned long bytecount
)
147 struct mm_struct
*mm
= current
->mm
;
149 if (!mm
->context
.size
)
151 if (bytecount
> LDT_ENTRY_SIZE
* LDT_ENTRIES
)
152 bytecount
= LDT_ENTRY_SIZE
* LDT_ENTRIES
;
154 mutex_lock(&mm
->context
.lock
);
155 size
= mm
->context
.size
* LDT_ENTRY_SIZE
;
156 if (size
> bytecount
)
160 if (copy_to_user(ptr
, mm
->context
.ldt
, size
))
162 mutex_unlock(&mm
->context
.lock
);
165 if (size
!= bytecount
) {
166 /* zero-fill the rest */
167 if (clear_user(ptr
+ size
, bytecount
- size
) != 0) {
177 static int read_default_ldt(void __user
*ptr
, unsigned long bytecount
)
179 /* CHECKME: Can we use _one_ random number ? */
181 unsigned long size
= 5 * sizeof(struct desc_struct
);
183 unsigned long size
= 128;
185 if (bytecount
> size
)
187 if (clear_user(ptr
, bytecount
))
192 static int write_ldt(void __user
*ptr
, unsigned long bytecount
, int oldmode
)
194 struct mm_struct
*mm
= current
->mm
;
195 struct desc_struct ldt
;
197 struct user_desc ldt_info
;
200 if (bytecount
!= sizeof(ldt_info
))
203 if (copy_from_user(&ldt_info
, ptr
, sizeof(ldt_info
)))
207 if (ldt_info
.entry_number
>= LDT_ENTRIES
)
209 if (ldt_info
.contents
== 3) {
212 if (ldt_info
.seg_not_present
== 0)
216 mutex_lock(&mm
->context
.lock
);
217 if (ldt_info
.entry_number
>= mm
->context
.size
) {
218 error
= alloc_ldt(¤t
->mm
->context
,
219 ldt_info
.entry_number
+ 1, 1);
224 /* Allow LDTs to be cleared by the user. */
225 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
226 if (oldmode
|| LDT_empty(&ldt_info
)) {
227 memset(&ldt
, 0, sizeof(ldt
));
232 fill_ldt(&ldt
, &ldt_info
);
236 /* Install the new entry ... */
238 write_ldt_entry(mm
->context
.ldt
, ldt_info
.entry_number
, &ldt
);
242 mutex_unlock(&mm
->context
.lock
);
247 asmlinkage
int sys_modify_ldt(int func
, void __user
*ptr
,
248 unsigned long bytecount
)
254 ret
= read_ldt(ptr
, bytecount
);
257 ret
= write_ldt(ptr
, bytecount
, 1);
260 ret
= read_default_ldt(ptr
, bytecount
);
263 ret
= write_ldt(ptr
, bytecount
, 0);