x86: spinlock_32/64 substitute types and instructions
[linux-2.6/mini2440.git] / include / asm-x86 / processor_32.h
blob3b51a18f16d5d57fecbb5e19905da4d9e246265c
1 /*
2 * Copyright (C) 1994 Linus Torvalds
3 */
5 #ifndef __ASM_I386_PROCESSOR_H
6 #define __ASM_I386_PROCESSOR_H
8 #include <asm/vm86.h>
9 #include <asm/math_emu.h>
10 #include <asm/segment.h>
11 #include <asm/page.h>
12 #include <asm/types.h>
13 #include <asm/sigcontext.h>
14 #include <asm/cpufeature.h>
15 #include <asm/msr.h>
16 #include <asm/system.h>
17 #include <linux/cache.h>
18 #include <linux/threads.h>
19 #include <asm/percpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/init.h>
22 #include <asm/processor-flags.h>
24 /* flag for disabling the tsc */
25 extern int tsc_disable;
27 struct desc_struct {
28 unsigned long a,b;
31 #define desc_empty(desc) \
32 (!((desc)->a | (desc)->b))
34 #define desc_equal(desc1, desc2) \
35 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
37 * Default implementation of macro that returns current
38 * instruction pointer ("program counter").
40 #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
43 * CPU type and hardware bug flags. Kept separately for each CPU.
44 * Members of this structure are referenced in head.S, so think twice
45 * before touching them. [mj]
48 struct cpuinfo_x86 {
49 __u8 x86; /* CPU family */
50 __u8 x86_vendor; /* CPU vendor */
51 __u8 x86_model;
52 __u8 x86_mask;
53 char wp_works_ok; /* It doesn't on 386's */
54 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
55 char hard_math;
56 char rfu;
57 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
58 unsigned long x86_capability[NCAPINTS];
59 char x86_vendor_id[16];
60 char x86_model_id[64];
61 int x86_cache_size; /* in KB - valid for CPUS which support this
62 call */
63 int x86_cache_alignment; /* In bytes */
64 char fdiv_bug;
65 char f00f_bug;
66 char coma_bug;
67 char pad0;
68 int x86_power;
69 unsigned long loops_per_jiffy;
70 #ifdef CONFIG_SMP
71 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
72 #endif
73 unsigned char x86_max_cores; /* cpuid returned max cores value */
74 unsigned char apicid;
75 unsigned short x86_clflush_size;
76 #ifdef CONFIG_SMP
77 unsigned char booted_cores; /* number of cores as seen by OS */
78 __u8 phys_proc_id; /* Physical processor id. */
79 __u8 cpu_core_id; /* Core id */
80 __u8 cpu_index; /* index into per_cpu list */
81 #endif
82 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
84 #define X86_VENDOR_INTEL 0
85 #define X86_VENDOR_CYRIX 1
86 #define X86_VENDOR_AMD 2
87 #define X86_VENDOR_UMC 3
88 #define X86_VENDOR_NEXGEN 4
89 #define X86_VENDOR_CENTAUR 5
90 #define X86_VENDOR_TRANSMETA 7
91 #define X86_VENDOR_NSC 8
92 #define X86_VENDOR_NUM 9
93 #define X86_VENDOR_UNKNOWN 0xff
96 * capabilities of CPUs
99 extern struct cpuinfo_x86 boot_cpu_data;
100 extern struct cpuinfo_x86 new_cpu_data;
101 extern struct tss_struct doublefault_tss;
102 DECLARE_PER_CPU(struct tss_struct, init_tss);
104 #ifdef CONFIG_SMP
105 DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
106 #define cpu_data(cpu) per_cpu(cpu_info, cpu)
107 #define current_cpu_data cpu_data(smp_processor_id())
108 #else
109 #define cpu_data(cpu) boot_cpu_data
110 #define current_cpu_data boot_cpu_data
111 #endif
114 * the following now lives in the per cpu area:
115 * extern int cpu_llc_id[NR_CPUS];
117 DECLARE_PER_CPU(u8, cpu_llc_id);
118 extern char ignore_fpu_irq;
120 void __init cpu_detect(struct cpuinfo_x86 *c);
122 extern void identify_boot_cpu(void);
123 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
124 extern void print_cpu_info(struct cpuinfo_x86 *);
125 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
126 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
127 extern unsigned short num_cache_leaves;
129 #ifdef CONFIG_X86_HT
130 extern void detect_ht(struct cpuinfo_x86 *c);
131 #else
132 static inline void detect_ht(struct cpuinfo_x86 *c) {}
133 #endif
135 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
136 unsigned int *ecx, unsigned int *edx)
138 /* ecx is often an input as well as an output. */
139 __asm__("cpuid"
140 : "=a" (*eax),
141 "=b" (*ebx),
142 "=c" (*ecx),
143 "=d" (*edx)
144 : "0" (*eax), "2" (*ecx));
147 #define load_cr3(pgdir) write_cr3(__pa(pgdir))
150 * Save the cr4 feature set we're using (ie
151 * Pentium 4MB enable and PPro Global page
152 * enable), so that any CPU's that boot up
153 * after us can get the correct flags.
155 extern unsigned long mmu_cr4_features;
157 static inline void set_in_cr4 (unsigned long mask)
159 unsigned cr4;
160 mmu_cr4_features |= mask;
161 cr4 = read_cr4();
162 cr4 |= mask;
163 write_cr4(cr4);
166 static inline void clear_in_cr4 (unsigned long mask)
168 unsigned cr4;
169 mmu_cr4_features &= ~mask;
170 cr4 = read_cr4();
171 cr4 &= ~mask;
172 write_cr4(cr4);
175 /* Stop speculative execution */
176 static inline void sync_core(void)
178 int tmp;
179 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
182 static inline void __monitor(const void *eax, unsigned long ecx,
183 unsigned long edx)
185 /* "monitor %eax,%ecx,%edx;" */
186 asm volatile(
187 ".byte 0x0f,0x01,0xc8;"
188 : :"a" (eax), "c" (ecx), "d"(edx));
191 static inline void __mwait(unsigned long eax, unsigned long ecx)
193 /* "mwait %eax,%ecx;" */
194 asm volatile(
195 ".byte 0x0f,0x01,0xc9;"
196 : :"a" (eax), "c" (ecx));
199 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
201 /* from system description table in BIOS. Mostly for MCA use, but
202 others may find it useful. */
203 extern unsigned int machine_id;
204 extern unsigned int machine_submodel_id;
205 extern unsigned int BIOS_revision;
206 extern unsigned int mca_pentium_flag;
208 /* Boot loader type from the setup header */
209 extern int bootloader_type;
212 * User space process size: 3GB (default).
214 #define TASK_SIZE (PAGE_OFFSET)
216 /* This decides where the kernel will search for a free chunk of vm
217 * space during mmap's.
219 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
221 #define HAVE_ARCH_PICK_MMAP_LAYOUT
223 extern void hard_disable_TSC(void);
224 extern void disable_TSC(void);
225 extern void hard_enable_TSC(void);
228 * Size of io_bitmap.
230 #define IO_BITMAP_BITS 65536
231 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
232 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
233 #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
234 #define INVALID_IO_BITMAP_OFFSET 0x8000
235 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
237 struct i387_fsave_struct {
238 long cwd;
239 long swd;
240 long twd;
241 long fip;
242 long fcs;
243 long foo;
244 long fos;
245 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
246 long status; /* software status information */
249 struct i387_fxsave_struct {
250 unsigned short cwd;
251 unsigned short swd;
252 unsigned short twd;
253 unsigned short fop;
254 long fip;
255 long fcs;
256 long foo;
257 long fos;
258 long mxcsr;
259 long mxcsr_mask;
260 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
261 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
262 long padding[56];
263 } __attribute__ ((aligned (16)));
265 struct i387_soft_struct {
266 long cwd;
267 long swd;
268 long twd;
269 long fip;
270 long fcs;
271 long foo;
272 long fos;
273 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
274 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
275 struct info *info;
276 unsigned long entry_eip;
279 union i387_union {
280 struct i387_fsave_struct fsave;
281 struct i387_fxsave_struct fxsave;
282 struct i387_soft_struct soft;
285 typedef struct {
286 unsigned long seg;
287 } mm_segment_t;
289 struct thread_struct;
291 /* This is the TSS defined by the hardware. */
292 struct i386_hw_tss {
293 unsigned short back_link,__blh;
294 unsigned long esp0;
295 unsigned short ss0,__ss0h;
296 unsigned long esp1;
297 unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
298 unsigned long esp2;
299 unsigned short ss2,__ss2h;
300 unsigned long __cr3;
301 unsigned long eip;
302 unsigned long eflags;
303 unsigned long eax,ecx,edx,ebx;
304 unsigned long esp;
305 unsigned long ebp;
306 unsigned long esi;
307 unsigned long edi;
308 unsigned short es, __esh;
309 unsigned short cs, __csh;
310 unsigned short ss, __ssh;
311 unsigned short ds, __dsh;
312 unsigned short fs, __fsh;
313 unsigned short gs, __gsh;
314 unsigned short ldt, __ldth;
315 unsigned short trace, io_bitmap_base;
316 } __attribute__((packed));
318 struct tss_struct {
319 struct i386_hw_tss x86_tss;
322 * The extra 1 is there because the CPU will access an
323 * additional byte beyond the end of the IO permission
324 * bitmap. The extra byte must be all 1 bits, and must
325 * be within the limit.
327 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
329 * Cache the current maximum and the last task that used the bitmap:
331 unsigned long io_bitmap_max;
332 struct thread_struct *io_bitmap_owner;
334 * pads the TSS to be cacheline-aligned (size is 0x100)
336 unsigned long __cacheline_filler[35];
338 * .. and then another 0x100 bytes for emergency kernel stack
340 unsigned long stack[64];
341 } __attribute__((packed));
343 #define ARCH_MIN_TASKALIGN 16
345 struct thread_struct {
346 /* cached TLS descriptors. */
347 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
348 unsigned long esp0;
349 unsigned long sysenter_cs;
350 unsigned long eip;
351 unsigned long esp;
352 unsigned long fs;
353 unsigned long gs;
354 /* Hardware debugging registers */
355 unsigned long debugreg[8]; /* %%db0-7 debug registers */
356 /* fault info */
357 unsigned long cr2, trap_no, error_code;
358 /* floating point info */
359 union i387_union i387;
360 /* virtual 86 mode info */
361 struct vm86_struct __user * vm86_info;
362 unsigned long screen_bitmap;
363 unsigned long v86flags, v86mask, saved_esp0;
364 unsigned int saved_fs, saved_gs;
365 /* IO permissions */
366 unsigned long *io_bitmap_ptr;
367 unsigned long iopl;
368 /* max allowed port in the bitmap, in bytes: */
369 unsigned long io_bitmap_max;
372 #define INIT_THREAD { \
373 .esp0 = sizeof(init_stack) + (long)&init_stack, \
374 .vm86_info = NULL, \
375 .sysenter_cs = __KERNEL_CS, \
376 .io_bitmap_ptr = NULL, \
377 .fs = __KERNEL_PERCPU, \
381 * Note that the .io_bitmap member must be extra-big. This is because
382 * the CPU will access an additional byte beyond the end of the IO
383 * permission bitmap. The extra byte must be all 1 bits, and must
384 * be within the limit.
386 #define INIT_TSS { \
387 .x86_tss = { \
388 .esp0 = sizeof(init_stack) + (long)&init_stack, \
389 .ss0 = __KERNEL_DS, \
390 .ss1 = __KERNEL_CS, \
391 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
392 }, \
393 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
396 #define start_thread(regs, new_eip, new_esp) do { \
397 __asm__("movl %0,%%gs": :"r" (0)); \
398 regs->xfs = 0; \
399 set_fs(USER_DS); \
400 regs->xds = __USER_DS; \
401 regs->xes = __USER_DS; \
402 regs->xss = __USER_DS; \
403 regs->xcs = __USER_CS; \
404 regs->eip = new_eip; \
405 regs->esp = new_esp; \
406 } while (0)
408 /* Forward declaration, a strange C thing */
409 struct task_struct;
410 struct mm_struct;
412 /* Free all resources held by a thread. */
413 extern void release_thread(struct task_struct *);
415 /* Prepare to copy thread state - unlazy all lazy status */
416 extern void prepare_to_copy(struct task_struct *tsk);
419 * create a kernel thread without removing it from tasklists
421 extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
423 extern unsigned long thread_saved_pc(struct task_struct *tsk);
425 unsigned long get_wchan(struct task_struct *p);
427 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
428 #define KSTK_TOP(info) \
429 ({ \
430 unsigned long *__ptr = (unsigned long *)(info); \
431 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
435 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
436 * This is necessary to guarantee that the entire "struct pt_regs"
437 * is accessable even if the CPU haven't stored the SS/ESP registers
438 * on the stack (interrupt gate does not save these registers
439 * when switching to the same priv ring).
440 * Therefore beware: accessing the xss/esp fields of the
441 * "struct pt_regs" is possible, but they may contain the
442 * completely wrong values.
444 #define task_pt_regs(task) \
445 ({ \
446 struct pt_regs *__regs__; \
447 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
448 __regs__ - 1; \
451 #define KSTK_EIP(task) (task_pt_regs(task)->eip)
452 #define KSTK_ESP(task) (task_pt_regs(task)->esp)
455 struct microcode_header {
456 unsigned int hdrver;
457 unsigned int rev;
458 unsigned int date;
459 unsigned int sig;
460 unsigned int cksum;
461 unsigned int ldrver;
462 unsigned int pf;
463 unsigned int datasize;
464 unsigned int totalsize;
465 unsigned int reserved[3];
468 struct microcode {
469 struct microcode_header hdr;
470 unsigned int bits[0];
473 typedef struct microcode microcode_t;
474 typedef struct microcode_header microcode_header_t;
476 /* microcode format is extended from prescott processors */
477 struct extended_signature {
478 unsigned int sig;
479 unsigned int pf;
480 unsigned int cksum;
483 struct extended_sigtable {
484 unsigned int count;
485 unsigned int cksum;
486 unsigned int reserved[3];
487 struct extended_signature sigs[0];
490 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
491 static inline void rep_nop(void)
493 __asm__ __volatile__("rep;nop": : :"memory");
496 #define cpu_relax() rep_nop()
498 static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread)
500 tss->x86_tss.esp0 = thread->esp0;
501 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
502 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
503 tss->x86_tss.ss1 = thread->sysenter_cs;
504 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
509 static inline unsigned long native_get_debugreg(int regno)
511 unsigned long val = 0; /* Damn you, gcc! */
513 switch (regno) {
514 case 0:
515 asm("movl %%db0, %0" :"=r" (val)); break;
516 case 1:
517 asm("movl %%db1, %0" :"=r" (val)); break;
518 case 2:
519 asm("movl %%db2, %0" :"=r" (val)); break;
520 case 3:
521 asm("movl %%db3, %0" :"=r" (val)); break;
522 case 6:
523 asm("movl %%db6, %0" :"=r" (val)); break;
524 case 7:
525 asm("movl %%db7, %0" :"=r" (val)); break;
526 default:
527 BUG();
529 return val;
532 static inline void native_set_debugreg(int regno, unsigned long value)
534 switch (regno) {
535 case 0:
536 asm("movl %0,%%db0" : /* no output */ :"r" (value));
537 break;
538 case 1:
539 asm("movl %0,%%db1" : /* no output */ :"r" (value));
540 break;
541 case 2:
542 asm("movl %0,%%db2" : /* no output */ :"r" (value));
543 break;
544 case 3:
545 asm("movl %0,%%db3" : /* no output */ :"r" (value));
546 break;
547 case 6:
548 asm("movl %0,%%db6" : /* no output */ :"r" (value));
549 break;
550 case 7:
551 asm("movl %0,%%db7" : /* no output */ :"r" (value));
552 break;
553 default:
554 BUG();
559 * Set IOPL bits in EFLAGS from given mask
561 static inline void native_set_iopl_mask(unsigned mask)
563 unsigned int reg;
564 __asm__ __volatile__ ("pushfl;"
565 "popl %0;"
566 "andl %1, %0;"
567 "orl %2, %0;"
568 "pushl %0;"
569 "popfl"
570 : "=&r" (reg)
571 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
574 #ifdef CONFIG_PARAVIRT
575 #include <asm/paravirt.h>
576 #else
577 #define paravirt_enabled() 0
578 #define __cpuid native_cpuid
580 static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
582 native_load_esp0(tss, thread);
586 * These special macros can be used to get or set a debugging register
588 #define get_debugreg(var, register) \
589 (var) = native_get_debugreg(register)
590 #define set_debugreg(value, register) \
591 native_set_debugreg(register, value)
593 #define set_iopl_mask native_set_iopl_mask
594 #endif /* CONFIG_PARAVIRT */
597 * Generic CPUID function
598 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
599 * resulting in stale register contents being returned.
601 static inline void cpuid(unsigned int op,
602 unsigned int *eax, unsigned int *ebx,
603 unsigned int *ecx, unsigned int *edx)
605 *eax = op;
606 *ecx = 0;
607 __cpuid(eax, ebx, ecx, edx);
610 /* Some CPUID calls want 'count' to be placed in ecx */
611 static inline void cpuid_count(unsigned int op, int count,
612 unsigned int *eax, unsigned int *ebx,
613 unsigned int *ecx, unsigned int *edx)
615 *eax = op;
616 *ecx = count;
617 __cpuid(eax, ebx, ecx, edx);
621 * CPUID functions returning a single datum
623 static inline unsigned int cpuid_eax(unsigned int op)
625 unsigned int eax, ebx, ecx, edx;
627 cpuid(op, &eax, &ebx, &ecx, &edx);
628 return eax;
630 static inline unsigned int cpuid_ebx(unsigned int op)
632 unsigned int eax, ebx, ecx, edx;
634 cpuid(op, &eax, &ebx, &ecx, &edx);
635 return ebx;
637 static inline unsigned int cpuid_ecx(unsigned int op)
639 unsigned int eax, ebx, ecx, edx;
641 cpuid(op, &eax, &ebx, &ecx, &edx);
642 return ecx;
644 static inline unsigned int cpuid_edx(unsigned int op)
646 unsigned int eax, ebx, ecx, edx;
648 cpuid(op, &eax, &ebx, &ecx, &edx);
649 return edx;
652 /* generic versions from gas */
653 #define GENERIC_NOP1 ".byte 0x90\n"
654 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
655 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
656 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
657 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
658 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
659 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
660 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
662 /* Opteron nops */
663 #define K8_NOP1 GENERIC_NOP1
664 #define K8_NOP2 ".byte 0x66,0x90\n"
665 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
666 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
667 #define K8_NOP5 K8_NOP3 K8_NOP2
668 #define K8_NOP6 K8_NOP3 K8_NOP3
669 #define K8_NOP7 K8_NOP4 K8_NOP3
670 #define K8_NOP8 K8_NOP4 K8_NOP4
672 /* K7 nops */
673 /* uses eax dependencies (arbitary choice) */
674 #define K7_NOP1 GENERIC_NOP1
675 #define K7_NOP2 ".byte 0x8b,0xc0\n"
676 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
677 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
678 #define K7_NOP5 K7_NOP4 ASM_NOP1
679 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
680 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
681 #define K7_NOP8 K7_NOP7 ASM_NOP1
683 /* P6 nops */
684 /* uses eax dependencies (Intel-recommended choice) */
685 #define P6_NOP1 GENERIC_NOP1
686 #define P6_NOP2 ".byte 0x66,0x90\n"
687 #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
688 #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
689 #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
690 #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
691 #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
692 #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
694 #ifdef CONFIG_MK8
695 #define ASM_NOP1 K8_NOP1
696 #define ASM_NOP2 K8_NOP2
697 #define ASM_NOP3 K8_NOP3
698 #define ASM_NOP4 K8_NOP4
699 #define ASM_NOP5 K8_NOP5
700 #define ASM_NOP6 K8_NOP6
701 #define ASM_NOP7 K8_NOP7
702 #define ASM_NOP8 K8_NOP8
703 #elif defined(CONFIG_MK7)
704 #define ASM_NOP1 K7_NOP1
705 #define ASM_NOP2 K7_NOP2
706 #define ASM_NOP3 K7_NOP3
707 #define ASM_NOP4 K7_NOP4
708 #define ASM_NOP5 K7_NOP5
709 #define ASM_NOP6 K7_NOP6
710 #define ASM_NOP7 K7_NOP7
711 #define ASM_NOP8 K7_NOP8
712 #elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \
713 defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \
714 defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4)
715 #define ASM_NOP1 P6_NOP1
716 #define ASM_NOP2 P6_NOP2
717 #define ASM_NOP3 P6_NOP3
718 #define ASM_NOP4 P6_NOP4
719 #define ASM_NOP5 P6_NOP5
720 #define ASM_NOP6 P6_NOP6
721 #define ASM_NOP7 P6_NOP7
722 #define ASM_NOP8 P6_NOP8
723 #else
724 #define ASM_NOP1 GENERIC_NOP1
725 #define ASM_NOP2 GENERIC_NOP2
726 #define ASM_NOP3 GENERIC_NOP3
727 #define ASM_NOP4 GENERIC_NOP4
728 #define ASM_NOP5 GENERIC_NOP5
729 #define ASM_NOP6 GENERIC_NOP6
730 #define ASM_NOP7 GENERIC_NOP7
731 #define ASM_NOP8 GENERIC_NOP8
732 #endif
734 #define ASM_NOP_MAX 8
736 /* Prefetch instructions for Pentium III and AMD Athlon */
737 /* It's not worth to care about 3dnow! prefetches for the K6
738 because they are microcoded there and very slow.
739 However we don't do prefetches for pre XP Athlons currently
740 That should be fixed. */
741 #define ARCH_HAS_PREFETCH
742 static inline void prefetch(const void *x)
744 alternative_input(ASM_NOP4,
745 "prefetchnta (%1)",
746 X86_FEATURE_XMM,
747 "r" (x));
750 #define ARCH_HAS_PREFETCH
751 #define ARCH_HAS_PREFETCHW
752 #define ARCH_HAS_SPINLOCK_PREFETCH
754 /* 3dnow! prefetch to get an exclusive cache line. Useful for
755 spinlocks to avoid one state transition in the cache coherency protocol. */
756 static inline void prefetchw(const void *x)
758 alternative_input(ASM_NOP4,
759 "prefetchw (%1)",
760 X86_FEATURE_3DNOW,
761 "r" (x));
763 #define spin_lock_prefetch(x) prefetchw(x)
765 extern void select_idle_routine(const struct cpuinfo_x86 *c);
767 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
769 extern unsigned long boot_option_idle_override;
770 extern void enable_sep_cpu(void);
771 extern int sysenter_setup(void);
773 /* Defined in head.S */
774 extern struct Xgt_desc_struct early_gdt_descr;
776 extern void cpu_set_gdt(int);
777 extern void switch_to_new_gdt(void);
778 extern void cpu_init(void);
779 extern void init_gdt(int cpu);
781 extern int force_mwait;
783 #endif /* __ASM_I386_PROCESSOR_H */