add a proper tombstoned flag.
[AROS.git] / arch / all-pc / kernel / x86_syscalls.h
blob556930819f5e291a565434f936a359813272cffe
1 #ifndef X86_SYSCALLS_H
2 #define X86_SYSCALLS_H
4 /*
5 * These are x86 specific/private SysCalls that
6 * the kernel modules may provide/support
7 */
9 #define SC_X86CHANGEPMSTATE 0xFF
10 #define SC_X86CPUWAKE 0xFE
11 #define SC_X86CPUSPINLOCK 0xFD
12 #define SC_X86SWITCH 0xFC
13 #define SC_X86RESCHEDTASK 0xFB
15 #define krnSysCallSwitch() \
16 ({ \
17 __asm__ __volatile__ ("int $0x80"::"a"(SC_X86SWITCH):"memory"); \
20 #define krnSysCallReschedTask(task, state) \
21 ({ \
22 __asm__ __volatile__ ("int $0x80"::"a"(SC_X86RESCHEDTASK),"b"(task),"c"(state):"memory"); \
25 #define krnSysCallSpinLock(spindata) \
26 ({ \
27 spinlock_t *__value; \
28 __asm__ __volatile__ ("int $0x80":"=a"(__value):"a"(SC_X86CPUSPINLOCK),"b"(spindata):"memory"); \
29 __value; \
32 #define krnSysCallCPUWake(wakedata) \
33 ({ \
34 int __value; \
35 __asm__ __volatile__ ("int $0x80":"=a"(__value):"a"(SC_X86CPUWAKE),"b"(wakedata):"memory"); \
36 __value; \
39 #define krnSysCallChangePMState(state) \
40 ({ \
41 int __value; \
42 __asm__ __volatile__ ("int $0x80":"=a"(__value):"a"(SC_X86CHANGEPMSTATE),"b"(state):"memory"); \
43 __value; \
46 #endif /* !X86_SYSCALLS_H */