4 * Copyright (C) 1994 Linus Torvalds
6 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
7 * stack - Manfred Spraul <manfreds@colorfullife.com>
9 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
10 * them correctly. Now the emulation will be in a
11 * consistent state after stackfaults - Kasper Dupont
12 * <kasperd@daimi.au.dk>
14 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
15 * <kasperd@daimi.au.dk>
17 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
18 * caused by Kasper Dupont's changes - Stas Sergeev
20 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
21 * Kasper Dupont <kasperd@daimi.au.dk>
23 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
24 * Kasper Dupont <kasperd@daimi.au.dk>
26 * 9 apr 2002 - Changed stack access macros to jump to a label
27 * instead of returning to userspace. This simplifies
28 * do_int, and is needed by handle_vm6_fault. Kasper
29 * Dupont <kasperd@daimi.au.dk>
33 #include <linux/config.h>
34 #include <linux/errno.h>
35 #include <linux/interrupt.h>
36 #include <linux/sched.h>
37 #include <linux/kernel.h>
38 #include <linux/signal.h>
39 #include <linux/string.h>
41 #include <linux/smp.h>
42 #include <linux/smp_lock.h>
43 #include <linux/highmem.h>
44 #include <linux/ptrace.h>
46 #include <asm/uaccess.h>
48 #include <asm/tlbflush.h>
54 * Interrupt handling is not guaranteed:
55 * - a real x86 will disable all interrupts for one instruction
56 * after a "mov ss,xx" to make stack handling atomic even without
57 * the 'lss' instruction. We can't guarantee this in v86 mode,
58 * as the next instruction might result in a page fault or similar.
59 * - a real x86 will have interrupts disabled for one instruction
60 * past the 'sti' that enables them. We don't bother with all the
63 * Let's hope these problems do not actually matter for anything.
67 #define KVM86 ((struct kernel_vm86_struct *)regs)
68 #define VMPI KVM86->vm86plus
72 * 8- and 16-bit register defines..
74 #define AL(regs) (((unsigned char *)&((regs)->eax))[0])
75 #define AH(regs) (((unsigned char *)&((regs)->eax))[1])
76 #define IP(regs) (*(unsigned short *)&((regs)->eip))
77 #define SP(regs) (*(unsigned short *)&((regs)->esp))
80 * virtual flags (16 and 32-bit versions)
82 #define VFLAGS (*(unsigned short *)&(current->thread.v86flags))
83 #define VEFLAGS (current->thread.v86flags)
85 #define set_flags(X,new,mask) \
86 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
88 #define SAFE_MASK (0xDD5)
89 #define RETURN_MASK (0xDFF)
91 #define VM86_REGS_PART2 orig_eax
92 #define VM86_REGS_SIZE1 \
93 ( (unsigned)( & (((struct kernel_vm86_regs *)0)->VM86_REGS_PART2) ) )
94 #define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1)
96 struct pt_regs
* FASTCALL(save_v86_state(struct kernel_vm86_regs
* regs
));
97 struct pt_regs
* fastcall
save_v86_state(struct kernel_vm86_regs
* regs
)
99 struct tss_struct
*tss
;
104 * This gets called from entry.S with interrupts disabled, but
105 * from process context. Enable interrupts here, before trying
106 * to access user space.
110 if (!current
->thread
.vm86_info
) {
111 printk("no vm86_info: BAD\n");
114 set_flags(regs
->eflags
, VEFLAGS
, VIF_MASK
| current
->thread
.v86mask
);
115 tmp
= copy_to_user(¤t
->thread
.vm86_info
->regs
,regs
, VM86_REGS_SIZE1
);
116 tmp
+= copy_to_user(¤t
->thread
.vm86_info
->regs
.VM86_REGS_PART2
,
117 ®s
->VM86_REGS_PART2
, VM86_REGS_SIZE2
);
118 tmp
+= put_user(current
->thread
.screen_bitmap
,¤t
->thread
.vm86_info
->screen_bitmap
);
120 printk("vm86: could not access userspace vm86_info\n");
124 tss
= &per_cpu(init_tss
, get_cpu());
125 current
->thread
.esp0
= current
->thread
.saved_esp0
;
126 current
->thread
.sysenter_cs
= __KERNEL_CS
;
127 load_esp0(tss
, ¤t
->thread
);
128 current
->thread
.saved_esp0
= 0;
131 loadsegment(fs
, current
->thread
.saved_fs
);
132 loadsegment(gs
, current
->thread
.saved_gs
);
137 static void mark_screen_rdonly(struct mm_struct
*mm
)
146 pgd
= pgd_offset(mm
, 0xA0000);
147 if (pgd_none_or_clear_bad(pgd
))
149 pud
= pud_offset(pgd
, 0xA0000);
150 if (pud_none_or_clear_bad(pud
))
152 pmd
= pmd_offset(pud
, 0xA0000);
153 if (pmd_none_or_clear_bad(pmd
))
155 pte
= pte_offset_map_lock(mm
, pmd
, 0xA0000, &ptl
);
156 for (i
= 0; i
< 32; i
++) {
157 if (pte_present(*pte
))
158 set_pte(pte
, pte_wrprotect(*pte
));
161 pte_unmap_unlock(pte
, ptl
);
168 static int do_vm86_irq_handling(int subfunction
, int irqnumber
);
169 static void do_sys_vm86(struct kernel_vm86_struct
*info
, struct task_struct
*tsk
);
171 asmlinkage
int sys_vm86old(struct pt_regs regs
)
173 struct vm86_struct __user
*v86
= (struct vm86_struct __user
*)regs
.ebx
;
174 struct kernel_vm86_struct info
; /* declare this _on top_,
175 * this avoids wasting of stack space.
176 * This remains on the stack until we
177 * return to 32 bit user space.
179 struct task_struct
*tsk
;
180 int tmp
, ret
= -EPERM
;
183 if (tsk
->thread
.saved_esp0
)
185 tmp
= copy_from_user(&info
, v86
, VM86_REGS_SIZE1
);
186 tmp
+= copy_from_user(&info
.regs
.VM86_REGS_PART2
, &v86
->regs
.VM86_REGS_PART2
,
187 (long)&info
.vm86plus
- (long)&info
.regs
.VM86_REGS_PART2
);
191 memset(&info
.vm86plus
, 0, (int)&info
.regs32
- (int)&info
.vm86plus
);
193 tsk
->thread
.vm86_info
= v86
;
194 do_sys_vm86(&info
, tsk
);
195 ret
= 0; /* we never return here */
201 asmlinkage
int sys_vm86(struct pt_regs regs
)
203 struct kernel_vm86_struct info
; /* declare this _on top_,
204 * this avoids wasting of stack space.
205 * This remains on the stack until we
206 * return to 32 bit user space.
208 struct task_struct
*tsk
;
210 struct vm86plus_struct __user
*v86
;
214 case VM86_REQUEST_IRQ
:
216 case VM86_GET_IRQ_BITS
:
217 case VM86_GET_AND_RESET_IRQ
:
218 ret
= do_vm86_irq_handling(regs
.ebx
, (int)regs
.ecx
);
220 case VM86_PLUS_INSTALL_CHECK
:
221 /* NOTE: on old vm86 stuff this will return the error
222 from access_ok(), because the subfunction is
223 interpreted as (invalid) address to vm86_struct.
224 So the installation check works.
230 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
232 if (tsk
->thread
.saved_esp0
)
234 v86
= (struct vm86plus_struct __user
*)regs
.ecx
;
235 tmp
= copy_from_user(&info
, v86
, VM86_REGS_SIZE1
);
236 tmp
+= copy_from_user(&info
.regs
.VM86_REGS_PART2
, &v86
->regs
.VM86_REGS_PART2
,
237 (long)&info
.regs32
- (long)&info
.regs
.VM86_REGS_PART2
);
242 info
.vm86plus
.is_vm86pus
= 1;
243 tsk
->thread
.vm86_info
= (struct vm86_struct __user
*)v86
;
244 do_sys_vm86(&info
, tsk
);
245 ret
= 0; /* we never return here */
251 static void do_sys_vm86(struct kernel_vm86_struct
*info
, struct task_struct
*tsk
)
253 struct tss_struct
*tss
;
255 * make sure the vm86() system call doesn't try to do anything silly
257 info
->regs
.__null_ds
= 0;
258 info
->regs
.__null_es
= 0;
260 /* we are clearing fs,gs later just before "jmp resume_userspace",
261 * because starting with Linux 2.1.x they aren't no longer saved/restored
265 * The eflags register is also special: we cannot trust that the user
266 * has set it up safely, so this makes sure interrupt etc flags are
267 * inherited from protected mode.
269 VEFLAGS
= info
->regs
.eflags
;
270 info
->regs
.eflags
&= SAFE_MASK
;
271 info
->regs
.eflags
|= info
->regs32
->eflags
& ~SAFE_MASK
;
272 info
->regs
.eflags
|= VM_MASK
;
274 switch (info
->cpu_type
) {
276 tsk
->thread
.v86mask
= 0;
279 tsk
->thread
.v86mask
= NT_MASK
| IOPL_MASK
;
282 tsk
->thread
.v86mask
= AC_MASK
| NT_MASK
| IOPL_MASK
;
285 tsk
->thread
.v86mask
= ID_MASK
| AC_MASK
| NT_MASK
| IOPL_MASK
;
290 * Save old state, set default return value (%eax) to 0
292 info
->regs32
->eax
= 0;
293 tsk
->thread
.saved_esp0
= tsk
->thread
.esp0
;
294 savesegment(fs
, tsk
->thread
.saved_fs
);
295 savesegment(gs
, tsk
->thread
.saved_gs
);
297 tss
= &per_cpu(init_tss
, get_cpu());
298 tsk
->thread
.esp0
= (unsigned long) &info
->VM86_TSS_ESP0
;
300 tsk
->thread
.sysenter_cs
= 0;
301 load_esp0(tss
, &tsk
->thread
);
304 tsk
->thread
.screen_bitmap
= info
->screen_bitmap
;
305 if (info
->flags
& VM86_SCREEN_BITMAP
)
306 mark_screen_rdonly(tsk
->mm
);
307 __asm__
__volatile__(
308 "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
311 "jmp resume_userspace"
313 :"r" (&info
->regs
), "r" (tsk
->thread_info
) : "ax");
314 /* we never return here */
317 static inline void return_to_32bit(struct kernel_vm86_regs
* regs16
, int retval
)
319 struct pt_regs
* regs32
;
321 regs32
= save_v86_state(regs16
);
322 regs32
->eax
= retval
;
323 __asm__
__volatile__("movl %0,%%esp\n\t"
325 "jmp resume_userspace"
326 : : "r" (regs32
), "r" (current_thread_info()));
329 static inline void set_IF(struct kernel_vm86_regs
* regs
)
332 if (VEFLAGS
& VIP_MASK
)
333 return_to_32bit(regs
, VM86_STI
);
336 static inline void clear_IF(struct kernel_vm86_regs
* regs
)
338 VEFLAGS
&= ~VIF_MASK
;
341 static inline void clear_TF(struct kernel_vm86_regs
* regs
)
343 regs
->eflags
&= ~TF_MASK
;
346 static inline void clear_AC(struct kernel_vm86_regs
* regs
)
348 regs
->eflags
&= ~AC_MASK
;
351 /* It is correct to call set_IF(regs) from the set_vflags_*
352 * functions. However someone forgot to call clear_IF(regs)
353 * in the opposite case.
354 * After the command sequence CLI PUSHF STI POPF you should
355 * end up with interrups disabled, but you ended up with
356 * interrupts enabled.
357 * ( I was testing my own changes, but the only bug I
358 * could find was in a function I had not changed. )
362 static inline void set_vflags_long(unsigned long eflags
, struct kernel_vm86_regs
* regs
)
364 set_flags(VEFLAGS
, eflags
, current
->thread
.v86mask
);
365 set_flags(regs
->eflags
, eflags
, SAFE_MASK
);
366 if (eflags
& IF_MASK
)
372 static inline void set_vflags_short(unsigned short flags
, struct kernel_vm86_regs
* regs
)
374 set_flags(VFLAGS
, flags
, current
->thread
.v86mask
);
375 set_flags(regs
->eflags
, flags
, SAFE_MASK
);
382 static inline unsigned long get_vflags(struct kernel_vm86_regs
* regs
)
384 unsigned long flags
= regs
->eflags
& RETURN_MASK
;
386 if (VEFLAGS
& VIF_MASK
)
389 return flags
| (VEFLAGS
& current
->thread
.v86mask
);
392 static inline int is_revectored(int nr
, struct revectored_struct
* bitmap
)
394 __asm__
__volatile__("btl %2,%1\n\tsbbl %0,%0"
396 :"m" (*bitmap
),"r" (nr
));
400 #define val_byte(val, n) (((__u8 *)&val)[n])
402 #define pushb(base, ptr, val, err_label) \
406 if (put_user(__val, base + ptr) < 0) \
410 #define pushw(base, ptr, val, err_label) \
414 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
417 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
421 #define pushl(base, ptr, val, err_label) \
425 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
428 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
431 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
434 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
438 #define popb(base, ptr, err_label) \
441 if (get_user(__res, base + ptr) < 0) \
447 #define popw(base, ptr, err_label) \
450 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
453 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
459 #define popl(base, ptr, err_label) \
462 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
465 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
468 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
471 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
477 /* There are so many possible reasons for this function to return
478 * VM86_INTx, so adding another doesn't bother me. We can expect
479 * userspace programs to be able to handle it. (Getting a problem
480 * in userspace is always better than an Oops anyway.) [KD]
482 static void do_int(struct kernel_vm86_regs
*regs
, int i
,
483 unsigned char __user
* ssp
, unsigned short sp
)
485 unsigned long __user
*intr_ptr
;
486 unsigned long segoffs
;
488 if (regs
->cs
== BIOSSEG
)
490 if (is_revectored(i
, &KVM86
->int_revectored
))
492 if (i
==0x21 && is_revectored(AH(regs
),&KVM86
->int21_revectored
))
494 intr_ptr
= (unsigned long __user
*) (i
<< 2);
495 if (get_user(segoffs
, intr_ptr
))
497 if ((segoffs
>> 16) == BIOSSEG
)
499 pushw(ssp
, sp
, get_vflags(regs
), cannot_handle
);
500 pushw(ssp
, sp
, regs
->cs
, cannot_handle
);
501 pushw(ssp
, sp
, IP(regs
), cannot_handle
);
502 regs
->cs
= segoffs
>> 16;
504 IP(regs
) = segoffs
& 0xffff;
511 return_to_32bit(regs
, VM86_INTx
+ (i
<< 8));
514 int handle_vm86_trap(struct kernel_vm86_regs
* regs
, long error_code
, int trapno
)
516 if (VMPI
.is_vm86pus
) {
517 if ( (trapno
==3) || (trapno
==1) )
518 return_to_32bit(regs
, VM86_TRAP
+ (trapno
<< 8));
519 do_int(regs
, trapno
, (unsigned char __user
*) (regs
->ss
<< 4), SP(regs
));
523 return 1; /* we let this handle by the calling routine */
524 if (current
->ptrace
& PT_PTRACED
) {
526 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
527 sigdelset(¤t
->blocked
, SIGTRAP
);
529 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
531 send_sig(SIGTRAP
, current
, 1);
532 current
->thread
.trap_no
= trapno
;
533 current
->thread
.error_code
= error_code
;
537 void handle_vm86_fault(struct kernel_vm86_regs
* regs
, long error_code
)
539 unsigned char opcode
;
540 unsigned char __user
*csp
;
541 unsigned char __user
*ssp
;
542 unsigned short ip
, sp
, orig_flags
;
543 int data32
, pref_done
;
545 #define CHECK_IF_IN_TRAP \
546 if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
548 #define VM86_FAULT_RETURN do { \
549 if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \
550 return_to_32bit(regs, VM86_PICRETURN); \
551 if (orig_flags & TF_MASK) \
552 handle_vm86_trap(regs, 0, 1); \
555 orig_flags
= *(unsigned short *)®s
->eflags
;
557 csp
= (unsigned char __user
*) (regs
->cs
<< 4);
558 ssp
= (unsigned char __user
*) (regs
->ss
<< 4);
565 switch (opcode
= popb(csp
, ip
, simulate_sigsegv
)) {
566 case 0x66: /* 32-bit data */ data32
=1; break;
567 case 0x67: /* 32-bit address */ break;
568 case 0x2e: /* CS */ break;
569 case 0x3e: /* DS */ break;
570 case 0x26: /* ES */ break;
571 case 0x36: /* SS */ break;
572 case 0x65: /* GS */ break;
573 case 0x64: /* FS */ break;
574 case 0xf2: /* repnz */ break;
575 case 0xf3: /* rep */ break;
576 default: pref_done
= 1;
578 } while (!pref_done
);
585 pushl(ssp
, sp
, get_vflags(regs
), simulate_sigsegv
);
588 pushw(ssp
, sp
, get_vflags(regs
), simulate_sigsegv
);
597 unsigned long newflags
;
599 newflags
=popl(ssp
, sp
, simulate_sigsegv
);
602 newflags
= popw(ssp
, sp
, simulate_sigsegv
);
608 set_vflags_long(newflags
, regs
);
610 set_vflags_short(newflags
, regs
);
617 int intno
=popb(csp
, ip
, simulate_sigsegv
);
619 if (VMPI
.vm86dbg_active
) {
620 if ( (1 << (intno
&7)) & VMPI
.vm86dbg_intxxtab
[intno
>> 3] )
621 return_to_32bit(regs
, VM86_INTx
+ (intno
<< 8));
623 do_int(regs
, intno
, ssp
, sp
);
632 unsigned long newflags
;
634 newip
=popl(ssp
, sp
, simulate_sigsegv
);
635 newcs
=popl(ssp
, sp
, simulate_sigsegv
);
636 newflags
=popl(ssp
, sp
, simulate_sigsegv
);
639 newip
= popw(ssp
, sp
, simulate_sigsegv
);
640 newcs
= popw(ssp
, sp
, simulate_sigsegv
);
641 newflags
= popw(ssp
, sp
, simulate_sigsegv
);
648 set_vflags_long(newflags
, regs
);
650 set_vflags_short(newflags
, regs
);
663 * Damn. This is incorrect: the 'sti' instruction should actually
664 * enable interrupts after the /next/ instruction. Not good.
666 * Probably needs some horsing around with the TF flag. Aiee..
674 return_to_32bit(regs
, VM86_UNKNOWN
);
680 /* FIXME: After a long discussion with Stas we finally
681 * agreed, that this is wrong. Here we should
682 * really send a SIGSEGV to the user program.
683 * But how do we create the correct context? We
684 * are inside a general protection fault handler
685 * and has just returned from a page fault handler.
686 * The correct context for the signal handler
687 * should be a mixture of the two, but how do we
688 * get the information? [KD]
690 return_to_32bit(regs
, VM86_UNKNOWN
);
693 /* ---------------- vm86 special IRQ passing stuff ----------------- */
695 #define VM86_IRQNAME "vm86irq"
697 static struct vm86_irqs
{
698 struct task_struct
*tsk
;
702 static DEFINE_SPINLOCK(irqbits_lock
);
705 #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \
706 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
709 static irqreturn_t
irq_handler(int intno
, void *dev_id
, struct pt_regs
* regs
)
714 spin_lock_irqsave(&irqbits_lock
, flags
);
715 irq_bit
= 1 << intno
;
716 if ((irqbits
& irq_bit
) || ! vm86_irqs
[intno
].tsk
)
719 if (vm86_irqs
[intno
].sig
)
720 send_sig(vm86_irqs
[intno
].sig
, vm86_irqs
[intno
].tsk
, 1);
722 * IRQ will be re-enabled when user asks for the irq (whether
723 * polling or as a result of the signal)
725 disable_irq_nosync(intno
);
726 spin_unlock_irqrestore(&irqbits_lock
, flags
);
730 spin_unlock_irqrestore(&irqbits_lock
, flags
);
734 static inline void free_vm86_irq(int irqnumber
)
738 free_irq(irqnumber
, NULL
);
739 vm86_irqs
[irqnumber
].tsk
= NULL
;
741 spin_lock_irqsave(&irqbits_lock
, flags
);
742 irqbits
&= ~(1 << irqnumber
);
743 spin_unlock_irqrestore(&irqbits_lock
, flags
);
746 void release_vm86_irqs(struct task_struct
*task
)
749 for (i
= FIRST_VM86_IRQ
; i
<= LAST_VM86_IRQ
; i
++)
750 if (vm86_irqs
[i
].tsk
== task
)
754 static inline int get_and_reset_irq(int irqnumber
)
760 if (invalid_vm86_irq(irqnumber
)) return 0;
761 if (vm86_irqs
[irqnumber
].tsk
!= current
) return 0;
762 spin_lock_irqsave(&irqbits_lock
, flags
);
763 bit
= irqbits
& (1 << irqnumber
);
766 enable_irq(irqnumber
);
770 spin_unlock_irqrestore(&irqbits_lock
, flags
);
775 static int do_vm86_irq_handling(int subfunction
, int irqnumber
)
778 switch (subfunction
) {
779 case VM86_GET_AND_RESET_IRQ
: {
780 return get_and_reset_irq(irqnumber
);
782 case VM86_GET_IRQ_BITS
: {
785 case VM86_REQUEST_IRQ
: {
786 int sig
= irqnumber
>> 8;
787 int irq
= irqnumber
& 255;
788 if (!capable(CAP_SYS_ADMIN
)) return -EPERM
;
789 if (!((1 << sig
) & ALLOWED_SIGS
)) return -EPERM
;
790 if (invalid_vm86_irq(irq
)) return -EPERM
;
791 if (vm86_irqs
[irq
].tsk
) return -EPERM
;
792 ret
= request_irq(irq
, &irq_handler
, 0, VM86_IRQNAME
, NULL
);
794 vm86_irqs
[irq
].sig
= sig
;
795 vm86_irqs
[irq
].tsk
= current
;
798 case VM86_FREE_IRQ
: {
799 if (invalid_vm86_irq(irqnumber
)) return -EPERM
;
800 if (!vm86_irqs
[irqnumber
].tsk
) return 0;
801 if (vm86_irqs
[irqnumber
].tsk
!= current
) return -EPERM
;
802 free_vm86_irq(irqnumber
);