4 * Copyright (C) 1994 Linus Torvalds
6 #include <linux/errno.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/signal.h>
10 #include <linux/string.h>
11 #include <linux/ptrace.h>
13 #include <linux/smp.h>
14 #include <linux/smp_lock.h>
16 #include <asm/uaccess.h>
17 #include <asm/pgalloc.h>
23 * Interrupt handling is not guaranteed:
24 * - a real x86 will disable all interrupts for one instruction
25 * after a "mov ss,xx" to make stack handling atomic even without
26 * the 'lss' instruction. We can't guarantee this in v86 mode,
27 * as the next instruction might result in a page fault or similar.
28 * - a real x86 will have interrupts disabled for one instruction
29 * past the 'sti' that enables them. We don't bother with all the
32 * Let's hope these problems do not actually matter for anything.
36 #define KVM86 ((struct kernel_vm86_struct *)regs)
37 #define VMPI KVM86->vm86plus
41 * 8- and 16-bit register defines..
43 #define AL(regs) (((unsigned char *)&((regs)->eax))[0])
44 #define AH(regs) (((unsigned char *)&((regs)->eax))[1])
45 #define IP(regs) (*(unsigned short *)&((regs)->eip))
46 #define SP(regs) (*(unsigned short *)&((regs)->esp))
49 * virtual flags (16 and 32-bit versions)
51 #define VFLAGS (*(unsigned short *)&(current->thread.v86flags))
52 #define VEFLAGS (current->thread.v86flags)
54 #define set_flags(X,new,mask) \
55 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
57 #define SAFE_MASK (0xDD5)
58 #define RETURN_MASK (0xDFF)
60 #define VM86_REGS_PART2 orig_eax
61 #define VM86_REGS_SIZE1 \
62 ( (unsigned)( & (((struct kernel_vm86_regs *)0)->VM86_REGS_PART2) ) )
63 #define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1)
65 asmlinkage
struct pt_regs
* FASTCALL(save_v86_state(struct kernel_vm86_regs
* regs
));
66 struct pt_regs
* save_v86_state(struct kernel_vm86_regs
* regs
)
68 struct tss_struct
*tss
;
72 if (!current
->thread
.vm86_info
) {
73 printk("no vm86_info: BAD\n");
76 set_flags(regs
->eflags
, VEFLAGS
, VIF_MASK
| current
->thread
.v86mask
);
77 tmp
= copy_to_user(¤t
->thread
.vm86_info
->regs
,regs
, VM86_REGS_SIZE1
);
78 tmp
+= copy_to_user(¤t
->thread
.vm86_info
->regs
.VM86_REGS_PART2
,
79 ®s
->VM86_REGS_PART2
, VM86_REGS_SIZE2
);
80 tmp
+= put_user(current
->thread
.screen_bitmap
,¤t
->thread
.vm86_info
->screen_bitmap
);
82 printk("vm86: could not access userspace vm86_info\n");
85 tss
= init_tss
+ smp_processor_id();
86 tss
->esp0
= current
->thread
.esp0
= current
->thread
.saved_esp0
;
87 current
->thread
.saved_esp0
= 0;
92 static void mark_screen_rdonly(struct task_struct
* tsk
)
99 pgd
= pgd_offset(tsk
->mm
, 0xA0000);
107 pmd
= pmd_offset(pgd
, 0xA0000);
115 pte
= pte_offset(pmd
, 0xA0000);
116 for (i
= 0; i
< 32; i
++) {
117 if (pte_present(*pte
))
118 set_pte(pte
, pte_wrprotect(*pte
));
126 static int do_vm86_irq_handling(int subfunction
, int irqnumber
);
127 static void do_sys_vm86(struct kernel_vm86_struct
*info
, struct task_struct
*tsk
);
129 asmlinkage
int sys_vm86old(struct vm86_struct
* v86
)
131 struct kernel_vm86_struct info
; /* declare this _on top_,
132 * this avoids wasting of stack space.
133 * This remains on the stack until we
134 * return to 32 bit user space.
136 struct task_struct
*tsk
;
137 int tmp
, ret
= -EPERM
;
140 if (tsk
->thread
.saved_esp0
)
142 tmp
= copy_from_user(&info
, v86
, VM86_REGS_SIZE1
);
143 tmp
+= copy_from_user(&info
.regs
.VM86_REGS_PART2
, &v86
->regs
.VM86_REGS_PART2
,
144 (long)&info
.vm86plus
- (long)&info
.regs
.VM86_REGS_PART2
);
148 memset(&info
.vm86plus
, 0, (int)&info
.regs32
- (int)&info
.vm86plus
);
149 info
.regs32
= (struct pt_regs
*) &v86
;
150 tsk
->thread
.vm86_info
= v86
;
151 do_sys_vm86(&info
, tsk
);
152 ret
= 0; /* we never return here */
158 asmlinkage
int sys_vm86(unsigned long subfunction
, struct vm86plus_struct
* v86
)
160 struct kernel_vm86_struct info
; /* declare this _on top_,
161 * this avoids wasting of stack space.
162 * This remains on the stack until we
163 * return to 32 bit user space.
165 struct task_struct
*tsk
;
169 switch (subfunction
) {
170 case VM86_REQUEST_IRQ
:
172 case VM86_GET_IRQ_BITS
:
173 case VM86_GET_AND_RESET_IRQ
:
174 ret
= do_vm86_irq_handling(subfunction
,(int)v86
);
176 case VM86_PLUS_INSTALL_CHECK
:
177 /* NOTE: on old vm86 stuff this will return the error
178 from verify_area(), because the subfunction is
179 interpreted as (invalid) address to vm86_struct.
180 So the installation check works.
186 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
188 if (tsk
->thread
.saved_esp0
)
190 tmp
= copy_from_user(&info
, v86
, VM86_REGS_SIZE1
);
191 tmp
+= copy_from_user(&info
.regs
.VM86_REGS_PART2
, &v86
->regs
.VM86_REGS_PART2
,
192 (long)&info
.regs32
- (long)&info
.regs
.VM86_REGS_PART2
);
196 info
.regs32
= (struct pt_regs
*) &subfunction
;
197 info
.vm86plus
.is_vm86pus
= 1;
198 tsk
->thread
.vm86_info
= (struct vm86_struct
*)v86
;
199 do_sys_vm86(&info
, tsk
);
200 ret
= 0; /* we never return here */
206 static void do_sys_vm86(struct kernel_vm86_struct
*info
, struct task_struct
*tsk
)
208 struct tss_struct
*tss
;
210 * make sure the vm86() system call doesn't try to do anything silly
212 info
->regs
.__null_ds
= 0;
213 info
->regs
.__null_es
= 0;
215 /* we are clearing fs,gs later just before "jmp ret_from_sys_call",
216 * because starting with Linux 2.1.x they aren't no longer saved/restored
220 * The eflags register is also special: we cannot trust that the user
221 * has set it up safely, so this makes sure interrupt etc flags are
222 * inherited from protected mode.
224 VEFLAGS
= info
->regs
.eflags
;
225 info
->regs
.eflags
&= SAFE_MASK
;
226 info
->regs
.eflags
|= info
->regs32
->eflags
& ~SAFE_MASK
;
227 info
->regs
.eflags
|= VM_MASK
;
229 switch (info
->cpu_type
) {
231 tsk
->thread
.v86mask
= 0;
234 tsk
->thread
.v86mask
= NT_MASK
| IOPL_MASK
;
237 tsk
->thread
.v86mask
= AC_MASK
| NT_MASK
| IOPL_MASK
;
240 tsk
->thread
.v86mask
= ID_MASK
| AC_MASK
| NT_MASK
| IOPL_MASK
;
245 * Save old state, set default return value (%eax) to 0
247 info
->regs32
->eax
= 0;
248 tsk
->thread
.saved_esp0
= tsk
->thread
.esp0
;
249 tss
= init_tss
+ smp_processor_id();
250 tss
->esp0
= tsk
->thread
.esp0
= (unsigned long) &info
->VM86_TSS_ESP0
;
252 tsk
->thread
.screen_bitmap
= info
->screen_bitmap
;
253 if (info
->flags
& VM86_SCREEN_BITMAP
)
254 mark_screen_rdonly(tsk
);
255 __asm__
__volatile__(
256 "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
258 "jmp ret_from_sys_call"
260 :"r" (&info
->regs
), "b" (tsk
) : "ax");
261 /* we never return here */
264 static inline void return_to_32bit(struct kernel_vm86_regs
* regs16
, int retval
)
266 struct pt_regs
* regs32
;
268 regs32
= save_v86_state(regs16
);
269 regs32
->eax
= retval
;
270 __asm__
__volatile__("movl %0,%%esp\n\t"
271 "jmp ret_from_sys_call"
272 : : "r" (regs32
), "b" (current
));
275 static inline void set_IF(struct kernel_vm86_regs
* regs
)
278 if (VEFLAGS
& VIP_MASK
)
279 return_to_32bit(regs
, VM86_STI
);
282 static inline void clear_IF(struct kernel_vm86_regs
* regs
)
284 VEFLAGS
&= ~VIF_MASK
;
287 static inline void clear_TF(struct kernel_vm86_regs
* regs
)
289 regs
->eflags
&= ~TF_MASK
;
292 static inline void set_vflags_long(unsigned long eflags
, struct kernel_vm86_regs
* regs
)
294 set_flags(VEFLAGS
, eflags
, current
->thread
.v86mask
);
295 set_flags(regs
->eflags
, eflags
, SAFE_MASK
);
296 if (eflags
& IF_MASK
)
300 static inline void set_vflags_short(unsigned short flags
, struct kernel_vm86_regs
* regs
)
302 set_flags(VFLAGS
, flags
, current
->thread
.v86mask
);
303 set_flags(regs
->eflags
, flags
, SAFE_MASK
);
308 static inline unsigned long get_vflags(struct kernel_vm86_regs
* regs
)
310 unsigned long flags
= regs
->eflags
& RETURN_MASK
;
312 if (VEFLAGS
& VIF_MASK
)
314 return flags
| (VEFLAGS
& current
->thread
.v86mask
);
317 static inline int is_revectored(int nr
, struct revectored_struct
* bitmap
)
319 __asm__
__volatile__("btl %2,%1\n\tsbbl %0,%0"
321 :"m" (*bitmap
),"r" (nr
));
326 * Boy are these ugly, but we need to do the correct 16-bit arithmetic.
327 * Gcc makes a mess of it, so we do it inline and use non-obvious calling
330 #define pushb(base, ptr, val) \
331 __asm__ __volatile__( \
335 : "r" (base), "q" (val), "0" (ptr))
337 #define pushw(base, ptr, val) \
338 __asm__ __volatile__( \
340 "movb %h2,0(%1,%0)\n\t" \
342 "movb %b2,0(%1,%0)" \
344 : "r" (base), "q" (val), "0" (ptr))
346 #define pushl(base, ptr, val) \
347 __asm__ __volatile__( \
350 "movb %h2,0(%1,%0)\n\t" \
352 "movb %b2,0(%1,%0)\n\t" \
355 "movb %h2,0(%1,%0)\n\t" \
357 "movb %b2,0(%1,%0)" \
359 : "r" (base), "q" (val), "0" (ptr))
361 #define popb(base, ptr) \
362 ({ unsigned long __res; \
363 __asm__ __volatile__( \
364 "movb 0(%1,%0),%b2\n\t" \
366 : "=r" (ptr), "=r" (base), "=q" (__res) \
367 : "0" (ptr), "1" (base), "2" (0)); \
370 #define popw(base, ptr) \
371 ({ unsigned long __res; \
372 __asm__ __volatile__( \
373 "movb 0(%1,%0),%b2\n\t" \
375 "movb 0(%1,%0),%h2\n\t" \
377 : "=r" (ptr), "=r" (base), "=q" (__res) \
378 : "0" (ptr), "1" (base), "2" (0)); \
381 #define popl(base, ptr) \
382 ({ unsigned long __res; \
383 __asm__ __volatile__( \
384 "movb 0(%1,%0),%b2\n\t" \
386 "movb 0(%1,%0),%h2\n\t" \
389 "movb 0(%1,%0),%b2\n\t" \
391 "movb 0(%1,%0),%h2\n\t" \
394 : "=r" (ptr), "=r" (base), "=q" (__res) \
395 : "0" (ptr), "1" (base)); \
398 static void do_int(struct kernel_vm86_regs
*regs
, int i
, unsigned char * ssp
, unsigned long sp
)
400 unsigned long *intr_ptr
, segoffs
;
402 if (regs
->cs
== BIOSSEG
)
404 if (is_revectored(i
, &KVM86
->int_revectored
))
406 if (i
==0x21 && is_revectored(AH(regs
),&KVM86
->int21_revectored
))
408 intr_ptr
= (unsigned long *) (i
<< 2);
409 if (get_user(segoffs
, intr_ptr
))
411 if ((segoffs
>> 16) == BIOSSEG
)
413 pushw(ssp
, sp
, get_vflags(regs
));
414 pushw(ssp
, sp
, regs
->cs
);
415 pushw(ssp
, sp
, IP(regs
));
416 regs
->cs
= segoffs
>> 16;
418 IP(regs
) = segoffs
& 0xffff;
424 return_to_32bit(regs
, VM86_INTx
+ (i
<< 8));
427 int handle_vm86_trap(struct kernel_vm86_regs
* regs
, long error_code
, int trapno
)
429 if (VMPI
.is_vm86pus
) {
430 if ( (trapno
==3) || (trapno
==1) )
431 return_to_32bit(regs
, VM86_TRAP
+ (trapno
<< 8));
432 do_int(regs
, trapno
, (unsigned char *) (regs
->ss
<< 4), SP(regs
));
436 return 1; /* we let this handle by the calling routine */
437 if (current
->ptrace
& PT_PTRACED
) {
439 spin_lock_irqsave(¤t
->sigmask_lock
, flags
);
440 sigdelset(¤t
->blocked
, SIGTRAP
);
441 recalc_sigpending(current
);
442 spin_unlock_irqrestore(¤t
->sigmask_lock
, flags
);
444 send_sig(SIGTRAP
, current
, 1);
445 current
->thread
.trap_no
= trapno
;
446 current
->thread
.error_code
= error_code
;
450 void handle_vm86_fault(struct kernel_vm86_regs
* regs
, long error_code
)
452 unsigned char *csp
, *ssp
;
453 unsigned long ip
, sp
;
455 #define CHECK_IF_IN_TRAP \
456 if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
457 pushw(ssp,sp,popw(ssp,sp) | TF_MASK);
458 #define VM86_FAULT_RETURN \
459 if (VMPI.force_return_for_pic && (VEFLAGS & IF_MASK)) \
460 return_to_32bit(regs, VM86_PICRETURN); \
463 csp
= (unsigned char *) (regs
->cs
<< 4);
464 ssp
= (unsigned char *) (regs
->ss
<< 4);
468 switch (popb(csp
, ip
)) {
470 /* operand size override */
472 switch (popb(csp
, ip
)) {
478 pushl(ssp
, sp
, get_vflags(regs
));
486 set_vflags_long(popl(ssp
, sp
), regs
);
492 IP(regs
) = (unsigned short)popl(ssp
, sp
);
493 regs
->cs
= (unsigned short)popl(ssp
, sp
);
495 set_vflags_long(popl(ssp
, sp
), regs
);
497 /* need this to avoid a fallthrough */
499 return_to_32bit(regs
, VM86_UNKNOWN
);
506 pushw(ssp
, sp
, get_vflags(regs
));
514 set_vflags_short(popw(ssp
, sp
), regs
);
519 int intno
=popb(csp
, ip
);
521 if (VMPI
.vm86dbg_active
) {
522 if ( (1 << (intno
&7)) & VMPI
.vm86dbg_intxxtab
[intno
>> 3] )
523 return_to_32bit(regs
, VM86_INTx
+ (intno
<< 8));
525 do_int(regs
, intno
, ssp
, sp
);
532 IP(regs
) = popw(ssp
, sp
);
533 regs
->cs
= popw(ssp
, sp
);
535 set_vflags_short(popw(ssp
, sp
), regs
);
546 * Damn. This is incorrect: the 'sti' instruction should actually
547 * enable interrupts after the /next/ instruction. Not good.
549 * Probably needs some horsing around with the TF flag. Aiee..
557 return_to_32bit(regs
, VM86_UNKNOWN
);
561 /* ---------------- vm86 special IRQ passing stuff ----------------- */
563 #define VM86_IRQNAME "vm86irq"
565 static struct vm86_irqs
{
566 struct task_struct
*tsk
;
571 #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \
572 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
575 static void irq_handler(int intno
, void *dev_id
, struct pt_regs
* regs
) {
581 irq_bit
= 1 << intno
;
582 if ((irqbits
& irq_bit
) || ! vm86_irqs
[intno
].tsk
)
585 if (vm86_irqs
[intno
].sig
)
586 send_sig(vm86_irqs
[intno
].sig
, vm86_irqs
[intno
].tsk
, 1);
587 /* else user will poll for IRQs */
589 restore_flags(flags
);
592 static inline void free_vm86_irq(int irqnumber
)
594 free_irq(irqnumber
,0);
595 vm86_irqs
[irqnumber
].tsk
= 0;
596 irqbits
&= ~(1 << irqnumber
);
599 static inline int task_valid(struct task_struct
*tsk
)
601 struct task_struct
*p
;
604 read_lock(&tasklist_lock
);
606 if ((p
== tsk
) && (p
->sig
)) {
611 read_unlock(&tasklist_lock
);
615 static inline void handle_irq_zombies(void)
618 for (i
=3; i
<16; i
++) {
619 if (vm86_irqs
[i
].tsk
) {
620 if (task_valid(vm86_irqs
[i
].tsk
)) continue;
626 static inline int get_and_reset_irq(int irqnumber
)
631 if ( (irqnumber
<3) || (irqnumber
>15) ) return 0;
632 if (vm86_irqs
[irqnumber
].tsk
!= current
) return 0;
635 bit
= irqbits
& (1 << irqnumber
);
637 restore_flags(flags
);
642 static int do_vm86_irq_handling(int subfunction
, int irqnumber
)
645 switch (subfunction
) {
646 case VM86_GET_AND_RESET_IRQ
: {
647 return get_and_reset_irq(irqnumber
);
649 case VM86_GET_IRQ_BITS
: {
652 case VM86_REQUEST_IRQ
: {
653 int sig
= irqnumber
>> 8;
654 int irq
= irqnumber
& 255;
655 handle_irq_zombies();
656 if (!capable(CAP_SYS_ADMIN
)) return -EPERM
;
657 if (!((1 << sig
) & ALLOWED_SIGS
)) return -EPERM
;
658 if ( (irq
<3) || (irq
>15) ) return -EPERM
;
659 if (vm86_irqs
[irq
].tsk
) return -EPERM
;
660 ret
= request_irq(irq
, &irq_handler
, 0, VM86_IRQNAME
, 0);
662 vm86_irqs
[irq
].sig
= sig
;
663 vm86_irqs
[irq
].tsk
= current
;
666 case VM86_FREE_IRQ
: {
667 handle_irq_zombies();
668 if ( (irqnumber
<3) || (irqnumber
>15) ) return -EPERM
;
669 if (!vm86_irqs
[irqnumber
].tsk
) return 0;
670 if (vm86_irqs
[irqnumber
].tsk
!= current
) return -EPERM
;
671 free_vm86_irq(irqnumber
);