x86: get rid of _MASK flags
[linux-2.6/linux-loongson.git] / include / asm-x86 / processor_32.h
blobb9dbe4668e75d052128f9356975bcb005b963fcd
1 /*
2 * Copyright (C) 1994 Linus Torvalds
3 */
5 #ifndef __ASM_I386_PROCESSOR_H
6 #define __ASM_I386_PROCESSOR_H
8 #include <asm/vm86.h>
9 #include <asm/math_emu.h>
10 #include <asm/segment.h>
11 #include <asm/page.h>
12 #include <asm/types.h>
13 #include <asm/sigcontext.h>
14 #include <asm/cpufeature.h>
15 #include <asm/msr.h>
16 #include <asm/system.h>
17 #include <linux/cache.h>
18 #include <linux/threads.h>
19 #include <asm/percpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/init.h>
22 #include <asm/desc_defs.h>
24 static inline int desc_empty(const void *ptr)
26 const u32 *desc = ptr;
27 return !(desc[0] | desc[1]);
31 * Default implementation of macro that returns current
32 * instruction pointer ("program counter").
34 #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
37 * CPU type and hardware bug flags. Kept separately for each CPU.
38 * Members of this structure are referenced in head.S, so think twice
39 * before touching them. [mj]
42 struct cpuinfo_x86 {
43 __u8 x86; /* CPU family */
44 __u8 x86_vendor; /* CPU vendor */
45 __u8 x86_model;
46 __u8 x86_mask;
47 char wp_works_ok; /* It doesn't on 386's */
48 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
49 char hard_math;
50 char rfu;
51 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
52 unsigned long x86_capability[NCAPINTS];
53 char x86_vendor_id[16];
54 char x86_model_id[64];
55 int x86_cache_size; /* in KB - valid for CPUS which support this
56 call */
57 int x86_cache_alignment; /* In bytes */
58 char fdiv_bug;
59 char f00f_bug;
60 char coma_bug;
61 char pad0;
62 int x86_power;
63 unsigned long loops_per_jiffy;
64 #ifdef CONFIG_SMP
65 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
66 #endif
67 unsigned char x86_max_cores; /* cpuid returned max cores value */
68 unsigned char apicid;
69 unsigned short x86_clflush_size;
70 #ifdef CONFIG_SMP
71 unsigned char booted_cores; /* number of cores as seen by OS */
72 __u8 phys_proc_id; /* Physical processor id. */
73 __u8 cpu_core_id; /* Core id */
74 __u8 cpu_index; /* index into per_cpu list */
75 #endif
76 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
78 #define X86_VENDOR_INTEL 0
79 #define X86_VENDOR_CYRIX 1
80 #define X86_VENDOR_AMD 2
81 #define X86_VENDOR_UMC 3
82 #define X86_VENDOR_NEXGEN 4
83 #define X86_VENDOR_CENTAUR 5
84 #define X86_VENDOR_TRANSMETA 7
85 #define X86_VENDOR_NSC 8
86 #define X86_VENDOR_NUM 9
87 #define X86_VENDOR_UNKNOWN 0xff
90 * capabilities of CPUs
93 extern struct cpuinfo_x86 boot_cpu_data;
94 extern struct cpuinfo_x86 new_cpu_data;
95 extern struct tss_struct doublefault_tss;
96 DECLARE_PER_CPU(struct tss_struct, init_tss);
98 #ifdef CONFIG_SMP
99 DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
100 #define cpu_data(cpu) per_cpu(cpu_info, cpu)
101 #define current_cpu_data cpu_data(smp_processor_id())
102 #else
103 #define cpu_data(cpu) boot_cpu_data
104 #define current_cpu_data boot_cpu_data
105 #endif
108 * the following now lives in the per cpu area:
109 * extern int cpu_llc_id[NR_CPUS];
111 DECLARE_PER_CPU(u8, cpu_llc_id);
112 extern char ignore_fpu_irq;
114 void __init cpu_detect(struct cpuinfo_x86 *c);
116 extern void identify_boot_cpu(void);
117 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
118 extern void print_cpu_info(struct cpuinfo_x86 *);
119 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
120 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
121 extern unsigned short num_cache_leaves;
123 #ifdef CONFIG_X86_HT
124 extern void detect_ht(struct cpuinfo_x86 *c);
125 #else
126 static inline void detect_ht(struct cpuinfo_x86 *c) {}
127 #endif
129 #define load_cr3(pgdir) write_cr3(__pa(pgdir))
132 * Save the cr4 feature set we're using (ie
133 * Pentium 4MB enable and PPro Global page
134 * enable), so that any CPU's that boot up
135 * after us can get the correct flags.
137 extern unsigned long mmu_cr4_features;
139 static inline void set_in_cr4 (unsigned long mask)
141 unsigned cr4;
142 mmu_cr4_features |= mask;
143 cr4 = read_cr4();
144 cr4 |= mask;
145 write_cr4(cr4);
148 static inline void clear_in_cr4 (unsigned long mask)
150 unsigned cr4;
151 mmu_cr4_features &= ~mask;
152 cr4 = read_cr4();
153 cr4 &= ~mask;
154 write_cr4(cr4);
157 /* Stop speculative execution */
158 static inline void sync_core(void)
160 int tmp;
161 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
164 static inline void __monitor(const void *eax, unsigned long ecx,
165 unsigned long edx)
167 /* "monitor %eax,%ecx,%edx;" */
168 asm volatile(
169 ".byte 0x0f,0x01,0xc8;"
170 : :"a" (eax), "c" (ecx), "d"(edx));
173 static inline void __mwait(unsigned long eax, unsigned long ecx)
175 /* "mwait %eax,%ecx;" */
176 asm volatile(
177 ".byte 0x0f,0x01,0xc9;"
178 : :"a" (eax), "c" (ecx));
181 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
183 /* from system description table in BIOS. Mostly for MCA use, but
184 others may find it useful. */
185 extern unsigned int machine_id;
186 extern unsigned int machine_submodel_id;
187 extern unsigned int BIOS_revision;
188 extern unsigned int mca_pentium_flag;
190 /* Boot loader type from the setup header */
191 extern int bootloader_type;
194 * User space process size: 3GB (default).
196 #define TASK_SIZE (PAGE_OFFSET)
198 /* This decides where the kernel will search for a free chunk of vm
199 * space during mmap's.
201 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
203 #define HAVE_ARCH_PICK_MMAP_LAYOUT
206 * Size of io_bitmap.
208 #define IO_BITMAP_BITS 65536
209 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
210 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
211 #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
212 #define INVALID_IO_BITMAP_OFFSET 0x8000
213 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
215 struct i387_fsave_struct {
216 long cwd;
217 long swd;
218 long twd;
219 long fip;
220 long fcs;
221 long foo;
222 long fos;
223 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
224 long status; /* software status information */
227 struct i387_fxsave_struct {
228 unsigned short cwd;
229 unsigned short swd;
230 unsigned short twd;
231 unsigned short fop;
232 long fip;
233 long fcs;
234 long foo;
235 long fos;
236 long mxcsr;
237 long mxcsr_mask;
238 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
239 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
240 long padding[56];
241 } __attribute__ ((aligned (16)));
243 struct i387_soft_struct {
244 long cwd;
245 long swd;
246 long twd;
247 long fip;
248 long fcs;
249 long foo;
250 long fos;
251 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
252 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
253 struct info *info;
254 unsigned long entry_eip;
257 union i387_union {
258 struct i387_fsave_struct fsave;
259 struct i387_fxsave_struct fxsave;
260 struct i387_soft_struct soft;
263 typedef struct {
264 unsigned long seg;
265 } mm_segment_t;
267 struct thread_struct;
269 /* This is the TSS defined by the hardware. */
270 struct i386_hw_tss {
271 unsigned short back_link,__blh;
272 unsigned long sp0;
273 unsigned short ss0,__ss0h;
274 unsigned long sp1;
275 unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
276 unsigned long sp2;
277 unsigned short ss2,__ss2h;
278 unsigned long __cr3;
279 unsigned long ip;
280 unsigned long flags;
281 unsigned long ax, cx, dx, bx;
282 unsigned long sp, bp, si, di;
283 unsigned short es, __esh;
284 unsigned short cs, __csh;
285 unsigned short ss, __ssh;
286 unsigned short ds, __dsh;
287 unsigned short fs, __fsh;
288 unsigned short gs, __gsh;
289 unsigned short ldt, __ldth;
290 unsigned short trace, io_bitmap_base;
291 } __attribute__((packed));
293 struct tss_struct {
294 struct i386_hw_tss x86_tss;
297 * The extra 1 is there because the CPU will access an
298 * additional byte beyond the end of the IO permission
299 * bitmap. The extra byte must be all 1 bits, and must
300 * be within the limit.
302 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
304 * Cache the current maximum and the last task that used the bitmap:
306 unsigned long io_bitmap_max;
307 struct thread_struct *io_bitmap_owner;
309 * pads the TSS to be cacheline-aligned (size is 0x100)
311 unsigned long __cacheline_filler[35];
313 * .. and then another 0x100 bytes for emergency kernel stack
315 unsigned long stack[64];
316 } __attribute__((packed));
318 #define ARCH_MIN_TASKALIGN 16
320 struct thread_struct {
321 /* cached TLS descriptors. */
322 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
323 unsigned long sp0;
324 unsigned long sysenter_cs;
325 unsigned long ip;
326 unsigned long sp;
327 unsigned long fs;
328 unsigned long gs;
329 /* Hardware debugging registers */
330 unsigned long debugreg0;
331 unsigned long debugreg1;
332 unsigned long debugreg2;
333 unsigned long debugreg3;
334 unsigned long debugreg6;
335 unsigned long debugreg7;
336 /* fault info */
337 unsigned long cr2, trap_no, error_code;
338 /* floating point info */
339 union i387_union i387;
340 /* virtual 86 mode info */
341 struct vm86_struct __user * vm86_info;
342 unsigned long screen_bitmap;
343 unsigned long v86flags, v86mask, saved_sp0;
344 unsigned int saved_fs, saved_gs;
345 /* IO permissions */
346 unsigned long *io_bitmap_ptr;
347 unsigned long iopl;
348 /* max allowed port in the bitmap, in bytes: */
349 unsigned long io_bitmap_max;
350 /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
351 unsigned long debugctlmsr;
352 /* Debug Store - if not 0 points to a DS Save Area configuration;
353 * goes into MSR_IA32_DS_AREA */
354 unsigned long ds_area_msr;
357 #define INIT_THREAD { \
358 .sp0 = sizeof(init_stack) + (long)&init_stack, \
359 .vm86_info = NULL, \
360 .sysenter_cs = __KERNEL_CS, \
361 .io_bitmap_ptr = NULL, \
362 .fs = __KERNEL_PERCPU, \
366 * Note that the .io_bitmap member must be extra-big. This is because
367 * the CPU will access an additional byte beyond the end of the IO
368 * permission bitmap. The extra byte must be all 1 bits, and must
369 * be within the limit.
371 #define INIT_TSS { \
372 .x86_tss = { \
373 .sp0 = sizeof(init_stack) + (long)&init_stack, \
374 .ss0 = __KERNEL_DS, \
375 .ss1 = __KERNEL_CS, \
376 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
377 }, \
378 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
381 #define start_thread(regs, new_eip, new_esp) do { \
382 __asm__("movl %0,%%gs": :"r" (0)); \
383 regs->fs = 0; \
384 set_fs(USER_DS); \
385 regs->ds = __USER_DS; \
386 regs->es = __USER_DS; \
387 regs->ss = __USER_DS; \
388 regs->cs = __USER_CS; \
389 regs->ip = new_eip; \
390 regs->sp = new_esp; \
391 } while (0)
393 /* Forward declaration, a strange C thing */
394 struct task_struct;
395 struct mm_struct;
397 /* Free all resources held by a thread. */
398 extern void release_thread(struct task_struct *);
400 /* Prepare to copy thread state - unlazy all lazy status */
401 extern void prepare_to_copy(struct task_struct *tsk);
404 * create a kernel thread without removing it from tasklists
406 extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
408 extern unsigned long thread_saved_pc(struct task_struct *tsk);
410 unsigned long get_wchan(struct task_struct *p);
412 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
413 #define KSTK_TOP(info) \
414 ({ \
415 unsigned long *__ptr = (unsigned long *)(info); \
416 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
420 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
421 * This is necessary to guarantee that the entire "struct pt_regs"
422 * is accessable even if the CPU haven't stored the SS/ESP registers
423 * on the stack (interrupt gate does not save these registers
424 * when switching to the same priv ring).
425 * Therefore beware: accessing the ss/esp fields of the
426 * "struct pt_regs" is possible, but they may contain the
427 * completely wrong values.
429 #define task_pt_regs(task) \
430 ({ \
431 struct pt_regs *__regs__; \
432 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
433 __regs__ - 1; \
436 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
437 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
440 struct microcode_header {
441 unsigned int hdrver;
442 unsigned int rev;
443 unsigned int date;
444 unsigned int sig;
445 unsigned int cksum;
446 unsigned int ldrver;
447 unsigned int pf;
448 unsigned int datasize;
449 unsigned int totalsize;
450 unsigned int reserved[3];
453 struct microcode {
454 struct microcode_header hdr;
455 unsigned int bits[0];
458 typedef struct microcode microcode_t;
459 typedef struct microcode_header microcode_header_t;
461 /* microcode format is extended from prescott processors */
462 struct extended_signature {
463 unsigned int sig;
464 unsigned int pf;
465 unsigned int cksum;
468 struct extended_sigtable {
469 unsigned int count;
470 unsigned int cksum;
471 unsigned int reserved[3];
472 struct extended_signature sigs[0];
475 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
476 static inline void rep_nop(void)
478 __asm__ __volatile__("rep;nop": : :"memory");
481 #define cpu_relax() rep_nop()
483 static inline void native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
485 tss->x86_tss.sp0 = thread->sp0;
486 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
487 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
488 tss->x86_tss.ss1 = thread->sysenter_cs;
489 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
494 static inline unsigned long native_get_debugreg(int regno)
496 unsigned long val = 0; /* Damn you, gcc! */
498 switch (regno) {
499 case 0:
500 asm("movl %%db0, %0" :"=r" (val)); break;
501 case 1:
502 asm("movl %%db1, %0" :"=r" (val)); break;
503 case 2:
504 asm("movl %%db2, %0" :"=r" (val)); break;
505 case 3:
506 asm("movl %%db3, %0" :"=r" (val)); break;
507 case 6:
508 asm("movl %%db6, %0" :"=r" (val)); break;
509 case 7:
510 asm("movl %%db7, %0" :"=r" (val)); break;
511 default:
512 BUG();
514 return val;
517 static inline void native_set_debugreg(int regno, unsigned long value)
519 switch (regno) {
520 case 0:
521 asm("movl %0,%%db0" : /* no output */ :"r" (value));
522 break;
523 case 1:
524 asm("movl %0,%%db1" : /* no output */ :"r" (value));
525 break;
526 case 2:
527 asm("movl %0,%%db2" : /* no output */ :"r" (value));
528 break;
529 case 3:
530 asm("movl %0,%%db3" : /* no output */ :"r" (value));
531 break;
532 case 6:
533 asm("movl %0,%%db6" : /* no output */ :"r" (value));
534 break;
535 case 7:
536 asm("movl %0,%%db7" : /* no output */ :"r" (value));
537 break;
538 default:
539 BUG();
544 * Set IOPL bits in EFLAGS from given mask
546 static inline void native_set_iopl_mask(unsigned mask)
548 unsigned int reg;
549 __asm__ __volatile__ ("pushfl;"
550 "popl %0;"
551 "andl %1, %0;"
552 "orl %2, %0;"
553 "pushl %0;"
554 "popfl"
555 : "=&r" (reg)
556 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
559 #ifdef CONFIG_PARAVIRT
560 #include <asm/paravirt.h>
561 #else
562 #define paravirt_enabled() 0
564 static inline void load_sp0(struct tss_struct *tss, struct thread_struct *thread)
566 native_load_sp0(tss, thread);
570 * These special macros can be used to get or set a debugging register
572 #define get_debugreg(var, register) \
573 (var) = native_get_debugreg(register)
574 #define set_debugreg(value, register) \
575 native_set_debugreg(register, value)
577 #define set_iopl_mask native_set_iopl_mask
578 #endif /* CONFIG_PARAVIRT */
580 /* generic versions from gas */
581 #define GENERIC_NOP1 ".byte 0x90\n"
582 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
583 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
584 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
585 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
586 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
587 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
588 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
590 /* Opteron nops */
591 #define K8_NOP1 GENERIC_NOP1
592 #define K8_NOP2 ".byte 0x66,0x90\n"
593 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
594 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
595 #define K8_NOP5 K8_NOP3 K8_NOP2
596 #define K8_NOP6 K8_NOP3 K8_NOP3
597 #define K8_NOP7 K8_NOP4 K8_NOP3
598 #define K8_NOP8 K8_NOP4 K8_NOP4
600 /* K7 nops */
601 /* uses eax dependencies (arbitary choice) */
602 #define K7_NOP1 GENERIC_NOP1
603 #define K7_NOP2 ".byte 0x8b,0xc0\n"
604 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
605 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
606 #define K7_NOP5 K7_NOP4 ASM_NOP1
607 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
608 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
609 #define K7_NOP8 K7_NOP7 ASM_NOP1
611 /* P6 nops */
612 /* uses eax dependencies (Intel-recommended choice) */
613 #define P6_NOP1 GENERIC_NOP1
614 #define P6_NOP2 ".byte 0x66,0x90\n"
615 #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
616 #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
617 #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
618 #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
619 #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
620 #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
622 #ifdef CONFIG_MK8
623 #define ASM_NOP1 K8_NOP1
624 #define ASM_NOP2 K8_NOP2
625 #define ASM_NOP3 K8_NOP3
626 #define ASM_NOP4 K8_NOP4
627 #define ASM_NOP5 K8_NOP5
628 #define ASM_NOP6 K8_NOP6
629 #define ASM_NOP7 K8_NOP7
630 #define ASM_NOP8 K8_NOP8
631 #elif defined(CONFIG_MK7)
632 #define ASM_NOP1 K7_NOP1
633 #define ASM_NOP2 K7_NOP2
634 #define ASM_NOP3 K7_NOP3
635 #define ASM_NOP4 K7_NOP4
636 #define ASM_NOP5 K7_NOP5
637 #define ASM_NOP6 K7_NOP6
638 #define ASM_NOP7 K7_NOP7
639 #define ASM_NOP8 K7_NOP8
640 #elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \
641 defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \
642 defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4)
643 #define ASM_NOP1 P6_NOP1
644 #define ASM_NOP2 P6_NOP2
645 #define ASM_NOP3 P6_NOP3
646 #define ASM_NOP4 P6_NOP4
647 #define ASM_NOP5 P6_NOP5
648 #define ASM_NOP6 P6_NOP6
649 #define ASM_NOP7 P6_NOP7
650 #define ASM_NOP8 P6_NOP8
651 #else
652 #define ASM_NOP1 GENERIC_NOP1
653 #define ASM_NOP2 GENERIC_NOP2
654 #define ASM_NOP3 GENERIC_NOP3
655 #define ASM_NOP4 GENERIC_NOP4
656 #define ASM_NOP5 GENERIC_NOP5
657 #define ASM_NOP6 GENERIC_NOP6
658 #define ASM_NOP7 GENERIC_NOP7
659 #define ASM_NOP8 GENERIC_NOP8
660 #endif
662 #define ASM_NOP_MAX 8
664 /* Prefetch instructions for Pentium III and AMD Athlon */
665 /* It's not worth to care about 3dnow! prefetches for the K6
666 because they are microcoded there and very slow.
667 However we don't do prefetches for pre XP Athlons currently
668 That should be fixed. */
669 #define ARCH_HAS_PREFETCH
670 static inline void prefetch(const void *x)
672 alternative_input(ASM_NOP4,
673 "prefetchnta (%1)",
674 X86_FEATURE_XMM,
675 "r" (x));
678 #define ARCH_HAS_PREFETCH
679 #define ARCH_HAS_PREFETCHW
680 #define ARCH_HAS_SPINLOCK_PREFETCH
682 /* 3dnow! prefetch to get an exclusive cache line. Useful for
683 spinlocks to avoid one state transition in the cache coherency protocol. */
684 static inline void prefetchw(const void *x)
686 alternative_input(ASM_NOP4,
687 "prefetchw (%1)",
688 X86_FEATURE_3DNOW,
689 "r" (x));
691 #define spin_lock_prefetch(x) prefetchw(x)
693 extern void select_idle_routine(const struct cpuinfo_x86 *c);
695 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
697 extern unsigned long boot_option_idle_override;
698 extern void enable_sep_cpu(void);
699 extern int sysenter_setup(void);
701 /* Defined in head.S */
702 extern struct desc_ptr early_gdt_descr;
704 extern void cpu_set_gdt(int);
705 extern void switch_to_new_gdt(void);
706 extern void cpu_init(void);
707 extern void init_gdt(int cpu);
709 extern int force_mwait;
711 #endif /* __ASM_I386_PROCESSOR_H */