2 * Copyright (c) 1990 The Regents of the University of California.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_capsicum.h"
36 #include "opt_kstack_pages.h"
38 #include <sys/param.h>
39 #include <sys/capsicum.h>
40 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
47 #include <sys/sysproto.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_extern.h>
54 #include <machine/cpu.h>
55 #include <machine/pcb.h>
56 #include <machine/pcb_ext.h>
57 #include <machine/proc.h>
58 #include <machine/sysarch.h>
60 #include <security/audit/audit.h>
62 #include <vm/vm_kern.h> /* for kernel_map */
65 #define LD_PER_PAGE 512
66 #define NEW_MAX_LD(num) rounddown2(num + LD_PER_PAGE, LD_PER_PAGE)
67 #define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3)
68 #define NULL_LDT_BASE ((caddr_t)NULL)
71 static void set_user_ldt_rv(struct vmspace
*vmsp
);
73 static int i386_set_ldt_data(struct thread
*, int start
, int num
,
74 union descriptor
*descs
);
75 static int i386_ldt_grow(struct thread
*td
, int len
);
78 fill_based_sd(struct segment_descriptor
*sdp
, uint32_t base
)
81 sdp
->sd_lobase
= base
& 0xffffff;
82 sdp
->sd_hibase
= (base
>> 24) & 0xff;
83 sdp
->sd_lolimit
= 0xffff; /* 4GB limit, wraps around */
84 sdp
->sd_hilimit
= 0xf;
85 sdp
->sd_type
= SDT_MEMRWA
;
86 sdp
->sd_dpl
= SEL_UPL
;
93 #ifndef _SYS_SYSPROTO_H_
103 register struct sysarch_args
*uap
;
106 union descriptor
*lp
;
108 struct i386_ldt_args largs
;
109 struct i386_ioperm_args iargs
;
110 struct i386_get_xfpustate xfpu
;
113 struct segment_descriptor sd
, *sdp
;
115 AUDIT_ARG_CMD(uap
->op
);
117 #ifdef CAPABILITY_MODE
119 * When adding new operations, add a new case statement here to
120 * explicitly indicate whether or not the operation is safe to
121 * perform in capability mode.
123 if (IN_CAPABILITY_MODE(td
)) {
127 case I386_GET_IOPERM
:
128 case I386_GET_FSBASE
:
129 case I386_SET_FSBASE
:
130 case I386_GET_GSBASE
:
131 case I386_SET_GSBASE
:
132 case I386_GET_XFPUSTATE
:
135 case I386_SET_IOPERM
:
138 if (KTRPOINT(td
, KTR_CAPFAIL
))
139 ktrcapfail(CAPFAIL_SYSCALL
, NULL
, NULL
);
147 case I386_GET_IOPERM
:
148 case I386_SET_IOPERM
:
149 if ((error
= copyin(uap
->parms
, &kargs
.iargs
,
150 sizeof(struct i386_ioperm_args
))) != 0)
155 if ((error
= copyin(uap
->parms
, &kargs
.largs
,
156 sizeof(struct i386_ldt_args
))) != 0)
158 if (kargs
.largs
.num
> MAX_LD
|| kargs
.largs
.num
<= 0)
161 case I386_GET_XFPUSTATE
:
162 if ((error
= copyin(uap
->parms
, &kargs
.xfpu
,
163 sizeof(struct i386_get_xfpustate
))) != 0)
172 error
= i386_get_ldt(td
, &kargs
.largs
);
175 if (kargs
.largs
.descs
!= NULL
) {
176 lp
= (union descriptor
*)malloc(
177 kargs
.largs
.num
* sizeof(union descriptor
),
179 error
= copyin(kargs
.largs
.descs
, lp
,
180 kargs
.largs
.num
* sizeof(union descriptor
));
182 error
= i386_set_ldt(td
, &kargs
.largs
, lp
);
185 error
= i386_set_ldt(td
, &kargs
.largs
, NULL
);
188 case I386_GET_IOPERM
:
189 error
= i386_get_ioperm(td
, &kargs
.iargs
);
191 error
= copyout(&kargs
.iargs
, uap
->parms
,
192 sizeof(struct i386_ioperm_args
));
194 case I386_SET_IOPERM
:
195 error
= i386_set_ioperm(td
, &kargs
.iargs
);
198 error
= vm86_sysarch(td
, uap
->parms
);
200 case I386_GET_FSBASE
:
201 sdp
= &td
->td_pcb
->pcb_fsd
;
202 base
= sdp
->sd_hibase
<< 24 | sdp
->sd_lobase
;
203 error
= copyout(&base
, uap
->parms
, sizeof(base
));
205 case I386_SET_FSBASE
:
206 error
= copyin(uap
->parms
, &base
, sizeof(base
));
209 * Construct a descriptor and store it in the pcb for
210 * the next context switch. Also store it in the gdt
211 * so that the load of tf_fs into %fs will activate it
212 * at return to userland.
214 fill_based_sd(&sd
, base
);
216 td
->td_pcb
->pcb_fsd
= sd
;
217 PCPU_GET(fsgs_gdt
)[0] = sd
;
219 td
->td_frame
->tf_fs
= GSEL(GUFS_SEL
, SEL_UPL
);
222 case I386_GET_GSBASE
:
223 sdp
= &td
->td_pcb
->pcb_gsd
;
224 base
= sdp
->sd_hibase
<< 24 | sdp
->sd_lobase
;
225 error
= copyout(&base
, uap
->parms
, sizeof(base
));
227 case I386_SET_GSBASE
:
228 error
= copyin(uap
->parms
, &base
, sizeof(base
));
231 * Construct a descriptor and store it in the pcb for
232 * the next context switch. Also store it in the gdt
233 * because we have to do a load_gs() right now.
235 fill_based_sd(&sd
, base
);
237 td
->td_pcb
->pcb_gsd
= sd
;
238 PCPU_GET(fsgs_gdt
)[1] = sd
;
240 load_gs(GSEL(GUGS_SEL
, SEL_UPL
));
243 case I386_GET_XFPUSTATE
:
244 if (kargs
.xfpu
.len
> cpu_max_ext_state_size
-
245 sizeof(union savefpu
))
248 error
= copyout((char *)(get_pcb_user_save_td(td
) + 1),
249 kargs
.xfpu
.addr
, kargs
.xfpu
.len
);
259 i386_extend_pcb(struct thread
*td
)
264 struct soft_segment_descriptor ssd
= {
265 0, /* segment base address (overwritten) */
266 ctob(IOPAGES
+ 1) - 1, /* length */
267 SDT_SYS386TSS
, /* segment type */
268 0, /* priority level */
269 1, /* descriptor present */
271 0, /* default 32 size */
275 ext
= (struct pcb_ext
*)kmem_malloc(kernel_arena
, ctob(IOPAGES
+1),
277 /* -16 is so we can convert a trapframe into vm86trapframe inplace */
278 ext
->ext_tss
.tss_esp0
= (vm_offset_t
)td
->td_pcb
- 16;
279 ext
->ext_tss
.tss_ss0
= GSEL(GDATA_SEL
, SEL_KPL
);
281 * The last byte of the i/o map must be followed by an 0xff byte.
282 * We arbitrarily allocate 16 bytes here, to keep the starting
283 * address on a doubleword boundary.
285 offset
= PAGE_SIZE
- 16;
286 ext
->ext_tss
.tss_ioopt
=
287 (offset
- ((unsigned)&ext
->ext_tss
- (unsigned)ext
)) << 16;
288 ext
->ext_iomap
= (caddr_t
)ext
+ offset
;
289 ext
->ext_vm86
.vm86_intmap
= (caddr_t
)ext
+ offset
- 32;
291 addr
= (u_long
*)ext
->ext_vm86
.vm86_intmap
;
292 for (i
= 0; i
< (ctob(IOPAGES
) + 32 + 16) / sizeof(u_long
); i
++)
295 ssd
.ssd_base
= (unsigned)&ext
->ext_tss
;
296 ssd
.ssd_limit
-= ((unsigned)&ext
->ext_tss
- (unsigned)ext
);
297 ssdtosd(&ssd
, &ext
->ext_tssd
);
299 KASSERT(td
== curthread
, ("giving TSS to !curthread"));
300 KASSERT(td
->td_pcb
->pcb_ext
== 0, ("already have a TSS!"));
302 /* Switch to the new TSS. */
304 td
->td_pcb
->pcb_ext
= ext
;
305 PCPU_SET(private_tss
, 1);
306 *PCPU_GET(tss_gdt
) = ext
->ext_tssd
;
307 ltr(GSEL(GPROC0_SEL
, SEL_KPL
));
314 i386_set_ioperm(td
, uap
)
316 struct i386_ioperm_args
*uap
;
322 if ((error
= priv_check(td
, PRIV_IO
)) != 0)
324 if ((error
= securelevel_gt(td
->td_ucred
, 0)) != 0)
328 * While this is restricted to root, we should probably figure out
329 * whether any other driver is using this i/o address, as so not to
330 * cause confusion. This probably requires a global 'usage registry'.
333 if (td
->td_pcb
->pcb_ext
== 0)
334 if ((error
= i386_extend_pcb(td
)) != 0)
336 iomap
= (char *)td
->td_pcb
->pcb_ext
->ext_iomap
;
338 if (uap
->start
> uap
->start
+ uap
->length
||
339 uap
->start
+ uap
->length
> IOPAGES
* PAGE_SIZE
* NBBY
)
342 for (i
= uap
->start
; i
< uap
->start
+ uap
->length
; i
++) {
344 iomap
[i
>> 3] &= ~(1 << (i
& 7));
346 iomap
[i
>> 3] |= (1 << (i
& 7));
352 i386_get_ioperm(td
, uap
)
354 struct i386_ioperm_args
*uap
;
359 if (uap
->start
>= IOPAGES
* PAGE_SIZE
* NBBY
)
362 if (td
->td_pcb
->pcb_ext
== 0) {
367 iomap
= (char *)td
->td_pcb
->pcb_ext
->ext_iomap
;
370 state
= (iomap
[i
>> 3] >> (i
& 7)) & 1;
371 uap
->enable
= !state
;
374 for (i
= uap
->start
+ 1; i
< IOPAGES
* PAGE_SIZE
* NBBY
; i
++) {
375 if (state
!= ((iomap
[i
>> 3] >> (i
& 7)) & 1))
385 * Update the GDT entry pointing to the LDT to point to the LDT of the
386 * current process. Manage dt_lock holding/unholding autonomously.
389 set_user_ldt(struct mdproc
*mdp
)
391 struct proc_ldt
*pldt
;
395 if (!mtx_owned(&dt_lock
)) {
396 mtx_lock_spin(&dt_lock
);
402 gdt
[PCPU_GET(cpuid
) * NGDT
+ GUSERLDT_SEL
].sd
= pldt
->ldt_sd
;
404 gdt
[GUSERLDT_SEL
].sd
= pldt
->ldt_sd
;
406 lldt(GSEL(GUSERLDT_SEL
, SEL_KPL
));
407 PCPU_SET(currentldt
, GSEL(GUSERLDT_SEL
, SEL_KPL
));
409 mtx_unlock_spin(&dt_lock
);
414 set_user_ldt_rv(struct vmspace
*vmsp
)
419 if (vmsp
!= td
->td_proc
->p_vmspace
)
422 set_user_ldt(&td
->td_proc
->p_md
);
427 * dt_lock must be held. Returns with dt_lock held.
430 user_ldt_alloc(struct mdproc
*mdp
, int len
)
432 struct proc_ldt
*pldt
, *new_ldt
;
434 mtx_assert(&dt_lock
, MA_OWNED
);
435 mtx_unlock_spin(&dt_lock
);
436 new_ldt
= malloc(sizeof(struct proc_ldt
),
437 M_SUBPROC
, M_WAITOK
);
439 new_ldt
->ldt_len
= len
= NEW_MAX_LD(len
);
440 new_ldt
->ldt_base
= (caddr_t
)kmem_malloc(kernel_arena
,
441 len
* sizeof(union descriptor
), M_WAITOK
);
442 new_ldt
->ldt_refcnt
= 1;
443 new_ldt
->ldt_active
= 0;
445 mtx_lock_spin(&dt_lock
);
446 gdt_segs
[GUSERLDT_SEL
].ssd_base
= (unsigned)new_ldt
->ldt_base
;
447 gdt_segs
[GUSERLDT_SEL
].ssd_limit
= len
* sizeof(union descriptor
) - 1;
448 ssdtosd(&gdt_segs
[GUSERLDT_SEL
], &new_ldt
->ldt_sd
);
450 if ((pldt
= mdp
->md_ldt
) != NULL
) {
451 if (len
> pldt
->ldt_len
)
453 bcopy(pldt
->ldt_base
, new_ldt
->ldt_base
,
454 len
* sizeof(union descriptor
));
456 bcopy(ldt
, new_ldt
->ldt_base
, sizeof(ldt
));
462 * Must be called with dt_lock held. Returns with dt_lock unheld.
465 user_ldt_free(struct thread
*td
)
467 struct mdproc
*mdp
= &td
->td_proc
->p_md
;
468 struct proc_ldt
*pldt
;
470 mtx_assert(&dt_lock
, MA_OWNED
);
471 if ((pldt
= mdp
->md_ldt
) == NULL
) {
472 mtx_unlock_spin(&dt_lock
);
476 if (td
== curthread
) {
478 PCPU_SET(currentldt
, _default_ldt
);
482 user_ldt_deref(pldt
);
486 user_ldt_deref(struct proc_ldt
*pldt
)
489 mtx_assert(&dt_lock
, MA_OWNED
);
490 if (--pldt
->ldt_refcnt
== 0) {
491 mtx_unlock_spin(&dt_lock
);
492 kmem_free(kernel_arena
, (vm_offset_t
)pldt
->ldt_base
,
493 pldt
->ldt_len
* sizeof(union descriptor
));
494 free(pldt
, M_SUBPROC
);
496 mtx_unlock_spin(&dt_lock
);
500 * Note for the authors of compat layers (linux, etc): copyout() in
501 * the function below is not a problem since it presents data in
502 * arch-specific format (i.e. i386-specific in this case), not in
503 * the OS-specific one.
506 i386_get_ldt(td
, uap
)
508 struct i386_ldt_args
*uap
;
511 struct proc_ldt
*pldt
;
513 union descriptor
*lp
;
516 printf("i386_get_ldt: start=%d num=%d descs=%p\n",
517 uap
->start
, uap
->num
, (void *)uap
->descs
);
520 mtx_lock_spin(&dt_lock
);
521 if ((pldt
= td
->td_proc
->p_md
.md_ldt
) != NULL
) {
522 nldt
= pldt
->ldt_len
;
523 lp
= &((union descriptor
*)(pldt
->ldt_base
))[uap
->start
];
524 mtx_unlock_spin(&dt_lock
);
525 num
= min(uap
->num
, nldt
);
527 mtx_unlock_spin(&dt_lock
);
528 nldt
= sizeof(ldt
)/sizeof(ldt
[0]);
529 num
= min(uap
->num
, nldt
);
530 lp
= &ldt
[uap
->start
];
533 if ((uap
->start
> (unsigned int)nldt
) ||
534 ((unsigned int)num
> (unsigned int)nldt
) ||
535 ((unsigned int)(uap
->start
+ num
) > (unsigned int)nldt
))
538 error
= copyout(lp
, uap
->descs
, num
* sizeof(union descriptor
));
540 td
->td_retval
[0] = num
;
546 i386_set_ldt(td
, uap
, descs
)
548 struct i386_ldt_args
*uap
;
549 union descriptor
*descs
;
553 struct mdproc
*mdp
= &td
->td_proc
->p_md
;
554 struct proc_ldt
*pldt
;
555 union descriptor
*dp
;
558 printf("i386_set_ldt: start=%d num=%d descs=%p\n",
559 uap
->start
, uap
->num
, (void *)uap
->descs
);
563 /* Free descriptors */
564 if (uap
->start
== 0 && uap
->num
== 0) {
566 * Treat this as a special case, so userland needn't
567 * know magic number NLDT.
570 uap
->num
= MAX_LD
- NLDT
;
574 mtx_lock_spin(&dt_lock
);
575 if ((pldt
= mdp
->md_ldt
) == NULL
||
576 uap
->start
>= pldt
->ldt_len
) {
577 mtx_unlock_spin(&dt_lock
);
580 largest_ld
= uap
->start
+ uap
->num
;
581 if (largest_ld
> pldt
->ldt_len
)
582 largest_ld
= pldt
->ldt_len
;
583 i
= largest_ld
- uap
->start
;
584 bzero(&((union descriptor
*)(pldt
->ldt_base
))[uap
->start
],
585 sizeof(union descriptor
) * i
);
586 mtx_unlock_spin(&dt_lock
);
590 if (!(uap
->start
== LDT_AUTO_ALLOC
&& uap
->num
== 1)) {
591 /* verify range of descriptors to modify */
592 largest_ld
= uap
->start
+ uap
->num
;
593 if (uap
->start
>= MAX_LD
|| largest_ld
> MAX_LD
) {
598 /* Check descriptors for access violations */
599 for (i
= 0; i
< uap
->num
; i
++) {
602 switch (dp
->sd
.sd_type
) {
603 case SDT_SYSNULL
: /* system null */
606 case SDT_SYS286TSS
: /* system 286 TSS available */
607 case SDT_SYSLDT
: /* system local descriptor table */
608 case SDT_SYS286BSY
: /* system 286 TSS busy */
609 case SDT_SYSTASKGT
: /* system task gate */
610 case SDT_SYS286IGT
: /* system 286 interrupt gate */
611 case SDT_SYS286TGT
: /* system 286 trap gate */
612 case SDT_SYSNULL2
: /* undefined by Intel */
613 case SDT_SYS386TSS
: /* system 386 TSS available */
614 case SDT_SYSNULL3
: /* undefined by Intel */
615 case SDT_SYS386BSY
: /* system 386 TSS busy */
616 case SDT_SYSNULL4
: /* undefined by Intel */
617 case SDT_SYS386IGT
: /* system 386 interrupt gate */
618 case SDT_SYS386TGT
: /* system 386 trap gate */
619 case SDT_SYS286CGT
: /* system 286 call gate */
620 case SDT_SYS386CGT
: /* system 386 call gate */
621 /* I can't think of any reason to allow a user proc
622 * to create a segment of these types. They are
628 /* memory segment types */
629 case SDT_MEMEC
: /* memory execute only conforming */
630 case SDT_MEMEAC
: /* memory execute only accessed conforming */
631 case SDT_MEMERC
: /* memory execute read conforming */
632 case SDT_MEMERAC
: /* memory execute read accessed conforming */
633 /* Must be "present" if executable and conforming. */
634 if (dp
->sd
.sd_p
== 0)
637 case SDT_MEMRO
: /* memory read only */
638 case SDT_MEMROA
: /* memory read only accessed */
639 case SDT_MEMRW
: /* memory read write */
640 case SDT_MEMRWA
: /* memory read write accessed */
641 case SDT_MEMROD
: /* memory read only expand dwn limit */
642 case SDT_MEMRODA
: /* memory read only expand dwn lim accessed */
643 case SDT_MEMRWD
: /* memory read write expand dwn limit */
644 case SDT_MEMRWDA
: /* memory read write expand dwn lim acessed */
645 case SDT_MEME
: /* memory execute only */
646 case SDT_MEMEA
: /* memory execute only accessed */
647 case SDT_MEMER
: /* memory execute read */
648 case SDT_MEMERA
: /* memory execute read accessed */
655 /* Only user (ring-3) descriptors may be present. */
656 if ((dp
->sd
.sd_p
!= 0) && (dp
->sd
.sd_dpl
!= SEL_UPL
))
660 if (uap
->start
== LDT_AUTO_ALLOC
&& uap
->num
== 1) {
661 /* Allocate a free slot */
662 mtx_lock_spin(&dt_lock
);
663 if ((pldt
= mdp
->md_ldt
) == NULL
) {
664 if ((error
= i386_ldt_grow(td
, NLDT
+ 1))) {
665 mtx_unlock_spin(&dt_lock
);
672 * start scanning a bit up to leave room for NVidia and
673 * Wine, which still user the "Blat" method of allocation.
675 dp
= &((union descriptor
*)(pldt
->ldt_base
))[NLDT
];
676 for (i
= NLDT
; i
< pldt
->ldt_len
; ++i
) {
677 if (dp
->sd
.sd_type
== SDT_SYSNULL
)
681 if (i
>= pldt
->ldt_len
) {
682 if ((error
= i386_ldt_grow(td
, pldt
->ldt_len
+1))) {
683 mtx_unlock_spin(&dt_lock
);
689 error
= i386_set_ldt_data(td
, i
, 1, descs
);
690 mtx_unlock_spin(&dt_lock
);
692 largest_ld
= uap
->start
+ uap
->num
;
693 mtx_lock_spin(&dt_lock
);
694 if (!(error
= i386_ldt_grow(td
, largest_ld
))) {
695 error
= i386_set_ldt_data(td
, uap
->start
, uap
->num
,
698 mtx_unlock_spin(&dt_lock
);
701 td
->td_retval
[0] = uap
->start
;
706 i386_set_ldt_data(struct thread
*td
, int start
, int num
,
707 union descriptor
*descs
)
709 struct mdproc
*mdp
= &td
->td_proc
->p_md
;
710 struct proc_ldt
*pldt
= mdp
->md_ldt
;
712 mtx_assert(&dt_lock
, MA_OWNED
);
716 &((union descriptor
*)(pldt
->ldt_base
))[start
],
717 num
* sizeof(union descriptor
));
722 i386_ldt_grow(struct thread
*td
, int len
)
724 struct mdproc
*mdp
= &td
->td_proc
->p_md
;
725 struct proc_ldt
*new_ldt
, *pldt
;
726 caddr_t old_ldt_base
= NULL_LDT_BASE
;
729 mtx_assert(&dt_lock
, MA_OWNED
);
736 /* Allocate a user ldt. */
737 if ((pldt
= mdp
->md_ldt
) == NULL
|| len
> pldt
->ldt_len
) {
738 new_ldt
= user_ldt_alloc(mdp
, len
);
744 if (new_ldt
->ldt_len
<= pldt
->ldt_len
) {
746 * We just lost the race for allocation, so
747 * free the new object and return.
749 mtx_unlock_spin(&dt_lock
);
750 kmem_free(kernel_arena
,
751 (vm_offset_t
)new_ldt
->ldt_base
,
752 new_ldt
->ldt_len
* sizeof(union descriptor
));
753 free(new_ldt
, M_SUBPROC
);
754 mtx_lock_spin(&dt_lock
);
759 * We have to substitute the current LDT entry for
760 * curproc with the new one since its size grew.
762 old_ldt_base
= pldt
->ldt_base
;
763 old_ldt_len
= pldt
->ldt_len
;
764 pldt
->ldt_sd
= new_ldt
->ldt_sd
;
765 pldt
->ldt_base
= new_ldt
->ldt_base
;
766 pldt
->ldt_len
= new_ldt
->ldt_len
;
768 mdp
->md_ldt
= pldt
= new_ldt
;
771 * Signal other cpus to reload ldt. We need to unlock dt_lock
772 * here because other CPU will contest on it since their
773 * curthreads won't hold the lock and will block when trying
776 mtx_unlock_spin(&dt_lock
);
777 smp_rendezvous(NULL
, (void (*)(void *))set_user_ldt_rv
,
778 NULL
, td
->td_proc
->p_vmspace
);
780 set_user_ldt(&td
->td_proc
->p_md
);
781 mtx_unlock_spin(&dt_lock
);
783 if (old_ldt_base
!= NULL_LDT_BASE
) {
784 kmem_free(kernel_arena
, (vm_offset_t
)old_ldt_base
,
785 old_ldt_len
* sizeof(union descriptor
));
786 free(new_ldt
, M_SUBPROC
);
788 mtx_lock_spin(&dt_lock
);