1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* Copyright (C) 1999,2001
5 * Author: J.E.J.Bottomley@HansenPartnership.com
7 * This file provides all the same external entries as smp.c but uses
8 * the voyager hal to provide the functionality
10 #include <linux/module.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/delay.h>
14 #include <linux/mc146818rtc.h>
15 #include <linux/cache.h>
16 #include <linux/interrupt.h>
17 #include <linux/init.h>
18 #include <linux/kernel.h>
19 #include <linux/bootmem.h>
20 #include <linux/completion.h>
22 #include <asm/voyager.h>
25 #include <asm/pgalloc.h>
26 #include <asm/tlbflush.h>
27 #include <asm/arch_hooks.h>
28 #include <asm/trampoline.h>
30 /* TLB state -- visible externally, indexed physically */
31 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state
, cpu_tlbstate
) = { &init_mm
, 0 };
33 /* CPU IRQ affinity -- set to all ones initially */
34 static unsigned long cpu_irq_affinity
[NR_CPUS
] __cacheline_aligned
=
35 {[0 ... NR_CPUS
-1] = ~0UL };
37 /* per CPU data structure (for /proc/cpuinfo et al), visible externally
38 * indexed physically */
39 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86
, cpu_info
);
40 EXPORT_PER_CPU_SYMBOL(cpu_info
);
42 /* physical ID of the CPU used to boot the system */
43 unsigned char boot_cpu_id
;
45 /* The memory line addresses for the Quad CPIs */
46 struct voyager_qic_cpi
*voyager_quad_cpi_addr
[NR_CPUS
] __cacheline_aligned
;
48 /* The masks for the Extended VIC processors, filled in by cat_init */
49 __u32 voyager_extended_vic_processors
= 0;
51 /* Masks for the extended Quad processors which cannot be VIC booted */
52 __u32 voyager_allowed_boot_processors
= 0;
54 /* The mask for the Quad Processors (both extended and non-extended) */
55 __u32 voyager_quad_processors
= 0;
57 /* Total count of live CPUs, used in process.c to display
58 * the CPU information and in irq.c for the per CPU irq
59 * activity count. Finally exported by i386_ksyms.c */
60 static int voyager_extended_cpus
= 1;
62 /* Used for the invalidate map that's also checked in the spinlock */
63 static volatile unsigned long smp_invalidate_needed
;
65 /* Bitmask of currently online CPUs - used by setup.c for
66 /proc/cpuinfo, visible externally but still physical */
67 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
68 EXPORT_SYMBOL(cpu_online_map
);
70 /* Bitmask of CPUs present in the system - exported by i386_syms.c, used
71 * by scheduler but indexed physically */
72 cpumask_t phys_cpu_present_map
= CPU_MASK_NONE
;
74 /* The internal functions */
75 static void send_CPI(__u32 cpuset
, __u8 cpi
);
76 static void ack_CPI(__u8 cpi
);
77 static int ack_QIC_CPI(__u8 cpi
);
78 static void ack_special_QIC_CPI(__u8 cpi
);
79 static void ack_VIC_CPI(__u8 cpi
);
80 static void send_CPI_allbutself(__u8 cpi
);
81 static void mask_vic_irq(unsigned int irq
);
82 static void unmask_vic_irq(unsigned int irq
);
83 static unsigned int startup_vic_irq(unsigned int irq
);
84 static void enable_local_vic_irq(unsigned int irq
);
85 static void disable_local_vic_irq(unsigned int irq
);
86 static void before_handle_vic_irq(unsigned int irq
);
87 static void after_handle_vic_irq(unsigned int irq
);
88 static void set_vic_irq_affinity(unsigned int irq
, cpumask_t mask
);
89 static void ack_vic_irq(unsigned int irq
);
90 static void vic_enable_cpi(void);
91 static void do_boot_cpu(__u8 cpuid
);
92 static void do_quad_bootstrap(void);
93 static void initialize_secondary(void);
95 int hard_smp_processor_id(void);
96 int safe_smp_processor_id(void);
98 /* Inline functions */
99 static inline void send_one_QIC_CPI(__u8 cpu
, __u8 cpi
)
101 voyager_quad_cpi_addr
[cpu
]->qic_cpi
[cpi
].cpi
=
102 (smp_processor_id() << 16) + cpi
;
105 static inline void send_QIC_CPI(__u32 cpuset
, __u8 cpi
)
109 for_each_online_cpu(cpu
) {
110 if (cpuset
& (1 << cpu
)) {
112 if (!cpu_online(cpu
))
113 VDEBUG(("CPU%d sending cpi %d to CPU%d not in "
115 hard_smp_processor_id(), cpi
, cpu
));
117 send_one_QIC_CPI(cpu
, cpi
- QIC_CPI_OFFSET
);
122 static inline void wrapper_smp_local_timer_interrupt(void)
125 smp_local_timer_interrupt();
129 static inline void send_one_CPI(__u8 cpu
, __u8 cpi
)
131 if (voyager_quad_processors
& (1 << cpu
))
132 send_one_QIC_CPI(cpu
, cpi
- QIC_CPI_OFFSET
);
134 send_CPI(1 << cpu
, cpi
);
137 static inline void send_CPI_allbutself(__u8 cpi
)
139 __u8 cpu
= smp_processor_id();
140 __u32 mask
= cpus_addr(cpu_online_map
)[0] & ~(1 << cpu
);
144 static inline int is_cpu_quad(void)
146 __u8 cpumask
= inb(VIC_PROC_WHO_AM_I
);
147 return ((cpumask
& QUAD_IDENTIFIER
) == QUAD_IDENTIFIER
);
150 static inline int is_cpu_extended(void)
152 __u8 cpu
= hard_smp_processor_id();
154 return (voyager_extended_vic_processors
& (1 << cpu
));
157 static inline int is_cpu_vic_boot(void)
159 __u8 cpu
= hard_smp_processor_id();
161 return (voyager_extended_vic_processors
162 & voyager_allowed_boot_processors
& (1 << cpu
));
165 static inline void ack_CPI(__u8 cpi
)
168 case VIC_CPU_BOOT_CPI
:
169 if (is_cpu_quad() && !is_cpu_vic_boot())
176 /* These are slightly strange. Even on the Quad card,
177 * They are vectored as VIC CPIs */
179 ack_special_QIC_CPI(cpi
);
184 printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi
);
189 /* local variables */
191 /* The VIC IRQ descriptors -- these look almost identical to the
192 * 8259 IRQs except that masks and things must be kept per processor
194 static struct irq_chip vic_chip
= {
196 .startup
= startup_vic_irq
,
197 .mask
= mask_vic_irq
,
198 .unmask
= unmask_vic_irq
,
199 .set_affinity
= set_vic_irq_affinity
,
202 /* used to count up as CPUs are brought on line (starts at 0) */
203 static int cpucount
= 0;
205 /* The per cpu profile stuff - used in smp_local_timer_interrupt */
206 static DEFINE_PER_CPU(int, prof_multiplier
) = 1;
207 static DEFINE_PER_CPU(int, prof_old_multiplier
) = 1;
208 static DEFINE_PER_CPU(int, prof_counter
) = 1;
210 /* the map used to check if a CPU has booted */
211 static __u32 cpu_booted_map
;
213 /* the synchronize flag used to hold all secondary CPUs spinning in
214 * a tight loop until the boot sequence is ready for them */
215 static cpumask_t smp_commenced_mask
= CPU_MASK_NONE
;
217 /* This is for the new dynamic CPU boot code */
218 cpumask_t cpu_callin_map
= CPU_MASK_NONE
;
219 cpumask_t cpu_callout_map
= CPU_MASK_NONE
;
220 cpumask_t cpu_possible_map
= CPU_MASK_NONE
;
221 EXPORT_SYMBOL(cpu_possible_map
);
223 /* The per processor IRQ masks (these are usually kept in sync) */
224 static __u16 vic_irq_mask
[NR_CPUS
] __cacheline_aligned
;
226 /* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */
227 static __u16 vic_irq_enable_mask
[NR_CPUS
] __cacheline_aligned
= { 0 };
229 /* Lock for enable/disable of VIC interrupts */
230 static __cacheline_aligned
DEFINE_SPINLOCK(vic_irq_lock
);
232 /* The boot processor is correctly set up in PC mode when it
233 * comes up, but the secondaries need their master/slave 8259
234 * pairs initializing correctly */
236 /* Interrupt counters (per cpu) and total - used to try to
237 * even up the interrupt handling routines */
238 static long vic_intr_total
= 0;
239 static long vic_intr_count
[NR_CPUS
] __cacheline_aligned
= { 0 };
240 static unsigned long vic_tick
[NR_CPUS
] __cacheline_aligned
= { 0 };
242 /* Since we can only use CPI0, we fake all the other CPIs */
243 static unsigned long vic_cpi_mailbox
[NR_CPUS
] __cacheline_aligned
;
245 /* debugging routine to read the isr of the cpu's pic */
246 static inline __u16
vic_read_isr(void)
251 isr
= inb(0xa0) << 8;
258 static __init
void qic_setup(void)
260 if (!is_cpu_quad()) {
261 /* not a quad, no setup */
264 outb(QIC_DEFAULT_MASK0
, QIC_MASK_REGISTER0
);
265 outb(QIC_CPI_ENABLE
, QIC_MASK_REGISTER1
);
267 if (is_cpu_extended()) {
268 /* the QIC duplicate of the VIC base register */
269 outb(VIC_DEFAULT_CPI_BASE
, QIC_VIC_CPI_BASE_REGISTER
);
270 outb(QIC_DEFAULT_CPI_BASE
, QIC_CPI_BASE_REGISTER
);
272 /* FIXME: should set up the QIC timer and memory parity
273 * error vectors here */
277 static __init
void vic_setup_pic(void)
279 outb(1, VIC_REDIRECT_REGISTER_1
);
280 /* clear the claim registers for dynamic routing */
281 outb(0, VIC_CLAIM_REGISTER_0
);
282 outb(0, VIC_CLAIM_REGISTER_1
);
284 outb(0, VIC_PRIORITY_REGISTER
);
285 /* Set the Primary and Secondary Microchannel vector
286 * bases to be the same as the ordinary interrupts
288 * FIXME: This would be more efficient using separate
290 outb(FIRST_EXTERNAL_VECTOR
, VIC_PRIMARY_MC_BASE
);
291 outb(FIRST_EXTERNAL_VECTOR
, VIC_SECONDARY_MC_BASE
);
292 /* Now initiallise the master PIC belonging to this CPU by
293 * sending the four ICWs */
295 /* ICW1: level triggered, ICW4 needed */
298 /* ICW2: vector base */
299 outb(FIRST_EXTERNAL_VECTOR
, 0x21);
301 /* ICW3: slave at line 2 */
304 /* ICW4: 8086 mode */
307 /* now the same for the slave PIC */
309 /* ICW1: level trigger, ICW4 needed */
312 /* ICW2: slave vector base */
313 outb(FIRST_EXTERNAL_VECTOR
+ 8, 0xA1);
318 /* ICW4: 8086 mode */
322 static void do_quad_bootstrap(void)
324 if (is_cpu_quad() && is_cpu_vic_boot()) {
327 __u8 cpuid
= hard_smp_processor_id();
329 local_irq_save(flags
);
331 for (i
= 0; i
< 4; i
++) {
332 /* FIXME: this would be >>3 &0x7 on the 32 way */
333 if (((cpuid
>> 2) & 0x03) == i
)
334 /* don't lower our own mask! */
337 /* masquerade as local Quad CPU */
338 outb(QIC_CPUID_ENABLE
| i
, QIC_PROCESSOR_ID
);
339 /* enable the startup CPI */
340 outb(QIC_BOOT_CPI_MASK
, QIC_MASK_REGISTER1
);
342 outb(0, QIC_PROCESSOR_ID
);
344 local_irq_restore(flags
);
348 void prefill_possible_map(void)
350 /* This is empty on voyager because we need a much
351 * earlier detection which is done in find_smp_config */
354 /* Set up all the basic stuff: read the SMP config and make all the
355 * SMP information reflect only the boot cpu. All others will be
356 * brought on-line later. */
357 void __init
find_smp_config(void)
361 boot_cpu_id
= hard_smp_processor_id();
363 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id
);
365 /* initialize the CPU structures (moved from smp_boot_cpus) */
366 for (i
= 0; i
< NR_CPUS
; i
++) {
367 cpu_irq_affinity
[i
] = ~0;
369 cpu_online_map
= cpumask_of_cpu(boot_cpu_id
);
371 /* The boot CPU must be extended */
372 voyager_extended_vic_processors
= 1 << boot_cpu_id
;
373 /* initially, all of the first 8 CPUs can boot */
374 voyager_allowed_boot_processors
= 0xff;
375 /* set up everything for just this CPU, we can alter
376 * this as we start the other CPUs later */
377 /* now get the CPU disposition from the extended CMOS */
378 cpus_addr(phys_cpu_present_map
)[0] =
379 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
);
380 cpus_addr(phys_cpu_present_map
)[0] |=
381 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
+ 1) << 8;
382 cpus_addr(phys_cpu_present_map
)[0] |=
383 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
+
385 cpus_addr(phys_cpu_present_map
)[0] |=
386 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
+
388 cpu_possible_map
= phys_cpu_present_map
;
389 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
390 cpus_addr(phys_cpu_present_map
)[0]);
391 /* Here we set up the VIC to enable SMP */
392 /* enable the CPIs by writing the base vector to their register */
393 outb(VIC_DEFAULT_CPI_BASE
, VIC_CPI_BASE_REGISTER
);
394 outb(1, VIC_REDIRECT_REGISTER_1
);
395 /* set the claim registers for static routing --- Boot CPU gets
396 * all interrupts untill all other CPUs started */
397 outb(0xff, VIC_CLAIM_REGISTER_0
);
398 outb(0xff, VIC_CLAIM_REGISTER_1
);
399 /* Set the Primary and Secondary Microchannel vector
400 * bases to be the same as the ordinary interrupts
402 * FIXME: This would be more efficient using separate
404 outb(FIRST_EXTERNAL_VECTOR
, VIC_PRIMARY_MC_BASE
);
405 outb(FIRST_EXTERNAL_VECTOR
, VIC_SECONDARY_MC_BASE
);
407 /* Finally tell the firmware that we're driving */
408 outb(inb(VOYAGER_SUS_IN_CONTROL_PORT
) | VOYAGER_IN_CONTROL_FLAG
,
409 VOYAGER_SUS_IN_CONTROL_PORT
);
411 current_thread_info()->cpu
= boot_cpu_id
;
412 x86_write_percpu(cpu_number
, boot_cpu_id
);
416 * The bootstrap kernel entry code has set these up. Save them
417 * for a given CPU, id is physical */
418 void __init
smp_store_cpu_info(int id
)
420 struct cpuinfo_x86
*c
= &cpu_data(id
);
425 identify_secondary_cpu(c
);
428 /* Routine initially called when a non-boot CPU is brought online */
429 static void __init
start_secondary(void *unused
)
431 __u8 cpuid
= hard_smp_processor_id();
435 /* OK, we're in the routine */
436 ack_CPI(VIC_CPU_BOOT_CPI
);
438 /* setup the 8259 master slave pair belonging to this CPU ---
439 * we won't actually receive any until the boot CPU
440 * relinquishes it's static routing mask */
445 if (is_cpu_quad() && !is_cpu_vic_boot()) {
446 /* clear the boot CPI */
450 voyager_quad_cpi_addr
[cpuid
]->qic_cpi
[VIC_CPU_BOOT_CPI
].cpi
;
451 printk("read dummy %d\n", dummy
);
454 /* lower the mask to receive CPIs */
457 VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid
, &cpuid
));
459 notify_cpu_starting(cpuid
);
461 /* enable interrupts */
464 /* get our bogomips */
467 /* save our processor parameters */
468 smp_store_cpu_info(cpuid
);
470 /* if we're a quad, we may need to bootstrap other CPUs */
473 /* FIXME: this is rather a poor hack to prevent the CPU
474 * activating softirqs while it's supposed to be waiting for
475 * permission to proceed. Without this, the new per CPU stuff
476 * in the softirqs will fail */
478 cpu_set(cpuid
, cpu_callin_map
);
480 /* signal that we're done */
483 while (!cpu_isset(cpuid
, smp_commenced_mask
))
489 cpu_set(cpuid
, cpu_online_map
);
494 /* Routine to kick start the given CPU and wait for it to report ready
495 * (or timeout in startup). When this routine returns, the requested
496 * CPU is either fully running and configured or known to be dead.
498 * We call this routine sequentially 1 CPU at a time, so no need for
501 static void __init
do_boot_cpu(__u8 cpu
)
503 struct task_struct
*idle
;
506 int quad_boot
= (1 << cpu
) & voyager_quad_processors
507 & ~(voyager_extended_vic_processors
508 & voyager_allowed_boot_processors
);
510 /* This is the format of the CPI IDT gate (in real mode) which
511 * we're hijacking to boot the CPU */
520 __u32
*hijack_vector
;
521 __u32 start_phys_address
= setup_trampoline();
523 /* There's a clever trick to this: The linux trampoline is
524 * compiled to begin at absolute location zero, so make the
525 * address zero but have the data segment selector compensate
526 * for the actual address */
527 hijack_source
.idt
.Offset
= start_phys_address
& 0x000F;
528 hijack_source
.idt
.Segment
= (start_phys_address
>> 4) & 0xFFFF;
531 alternatives_smp_switch(1);
533 idle
= fork_idle(cpu
);
535 panic("failed fork for CPU%d", cpu
);
536 idle
->thread
.ip
= (unsigned long)start_secondary
;
537 /* init_tasks (in sched.c) is indexed logically */
538 stack_start
.sp
= (void *)idle
->thread
.sp
;
541 per_cpu(current_task
, cpu
) = idle
;
542 early_gdt_descr
.address
= (unsigned long)get_cpu_gdt_table(cpu
);
545 /* Note: Don't modify initial ss override */
546 VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu
,
547 (unsigned long)hijack_source
.val
, hijack_source
.idt
.Segment
,
548 hijack_source
.idt
.Offset
, stack_start
.sp
));
550 /* init lowmem identity mapping */
551 clone_pgd_range(swapper_pg_dir
, swapper_pg_dir
+ KERNEL_PGD_BOUNDARY
,
552 min_t(unsigned long, KERNEL_PGD_PTRS
, KERNEL_PGD_BOUNDARY
));
556 printk("CPU %d: non extended Quad boot\n", cpu
);
559 phys_to_virt((VIC_CPU_BOOT_CPI
+ QIC_DEFAULT_CPI_BASE
) * 4);
560 *hijack_vector
= hijack_source
.val
;
562 printk("CPU%d: extended VIC boot\n", cpu
);
565 phys_to_virt((VIC_CPU_BOOT_CPI
+ VIC_DEFAULT_CPI_BASE
) * 4);
566 *hijack_vector
= hijack_source
.val
;
567 /* VIC errata, may also receive interrupt at this address */
570 phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI
+
571 VIC_DEFAULT_CPI_BASE
) * 4);
572 *hijack_vector
= hijack_source
.val
;
574 /* All non-boot CPUs start with interrupts fully masked. Need
575 * to lower the mask of the CPI we're about to send. We do
576 * this in the VIC by masquerading as the processor we're
577 * about to boot and lowering its interrupt mask */
578 local_irq_save(flags
);
580 send_one_QIC_CPI(cpu
, VIC_CPU_BOOT_CPI
);
582 outb(VIC_CPU_MASQUERADE_ENABLE
| cpu
, VIC_PROCESSOR_ID
);
583 /* here we're altering registers belonging to `cpu' */
585 outb(VIC_BOOT_INTERRUPT_MASK
, 0x21);
586 /* now go back to our original identity */
587 outb(boot_cpu_id
, VIC_PROCESSOR_ID
);
589 /* and boot the CPU */
591 send_CPI((1 << cpu
), VIC_CPU_BOOT_CPI
);
594 local_irq_restore(flags
);
596 /* now wait for it to become ready (or timeout) */
597 for (timeout
= 0; timeout
< 50000; timeout
++) {
602 /* reset the page table */
605 if (cpu_booted_map
) {
606 VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
607 cpu
, smp_processor_id()));
609 printk("CPU%d: ", cpu
);
610 print_cpu_info(&cpu_data(cpu
));
612 cpu_set(cpu
, cpu_callout_map
);
613 cpu_set(cpu
, cpu_present_map
);
615 printk("CPU%d FAILED TO BOOT: ", cpu
);
617 ((volatile unsigned char *)phys_to_virt(start_phys_address
))
621 printk("Not responding.\n");
627 void __init
smp_boot_cpus(void)
631 /* CAT BUS initialisation must be done after the memory */
632 /* FIXME: The L4 has a catbus too, it just needs to be
633 * accessed in a totally different way */
634 if (voyager_level
== 5) {
637 /* now that the cat has probed the Voyager System Bus, sanity
638 * check the cpu map */
639 if (((voyager_quad_processors
| voyager_extended_vic_processors
)
640 & cpus_addr(phys_cpu_present_map
)[0]) !=
641 cpus_addr(phys_cpu_present_map
)[0]) {
643 printk("\n\n***WARNING*** "
644 "Sanity check of CPU present map FAILED\n");
646 } else if (voyager_level
== 4)
647 voyager_extended_vic_processors
=
648 cpus_addr(phys_cpu_present_map
)[0];
650 /* this sets up the idle task to run on the current cpu */
651 voyager_extended_cpus
= 1;
652 /* Remove the global_irq_holder setting, it triggers a BUG() on
653 * schedule at the moment */
654 //global_irq_holder = boot_cpu_id;
656 /* FIXME: Need to do something about this but currently only works
657 * on CPUs with a tsc which none of mine have.
658 smp_tune_scheduling();
660 smp_store_cpu_info(boot_cpu_id
);
661 /* setup the jump vector */
662 initial_code
= (unsigned long)initialize_secondary
;
663 printk("CPU%d: ", boot_cpu_id
);
664 print_cpu_info(&cpu_data(boot_cpu_id
));
667 /* booting on a Quad CPU */
668 printk("VOYAGER SMP: Boot CPU is Quad\n");
673 /* enable our own CPIs */
676 cpu_set(boot_cpu_id
, cpu_online_map
);
677 cpu_set(boot_cpu_id
, cpu_callout_map
);
679 /* loop over all the extended VIC CPUs and boot them. The
680 * Quad CPUs must be bootstrapped by their extended VIC cpu */
681 for (i
= 0; i
< NR_CPUS
; i
++) {
682 if (i
== boot_cpu_id
|| !cpu_isset(i
, phys_cpu_present_map
))
685 /* This udelay seems to be needed for the Quad boots
686 * don't remove unless you know what you're doing */
689 /* we could compute the total bogomips here, but why bother?,
690 * Code added from smpboot.c */
692 unsigned long bogosum
= 0;
694 for_each_online_cpu(i
)
695 bogosum
+= cpu_data(i
).loops_per_jiffy
;
696 printk(KERN_INFO
"Total of %d processors activated "
697 "(%lu.%02lu BogoMIPS).\n",
698 cpucount
+ 1, bogosum
/ (500000 / HZ
),
699 (bogosum
/ (5000 / HZ
)) % 100);
701 voyager_extended_cpus
= hweight32(voyager_extended_vic_processors
);
702 printk("VOYAGER: Extended (interrupt handling CPUs): "
703 "%d, non-extended: %d\n", voyager_extended_cpus
,
704 num_booting_cpus() - voyager_extended_cpus
);
705 /* that's it, switch to symmetric mode */
706 outb(0, VIC_PRIORITY_REGISTER
);
707 outb(0, VIC_CLAIM_REGISTER_0
);
708 outb(0, VIC_CLAIM_REGISTER_1
);
710 VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));
713 /* Reload the secondary CPUs task structure (this function does not
715 static void __init
initialize_secondary(void)
719 set_current(hard_get_current());
723 * We don't actually need to load the full TSS,
724 * basically just the stack pointer and the eip.
727 asm volatile ("movl %0,%%esp\n\t"
728 "jmp *%1"::"r" (current
->thread
.sp
),
729 "r"(current
->thread
.ip
));
732 /* handle a Voyager SYS_INT -- If we don't, the base board will
735 * System interrupts occur because some problem was detected on the
736 * various busses. To find out what you have to probe all the
737 * hardware via the CAT bus. FIXME: At the moment we do nothing. */
738 void smp_vic_sys_interrupt(struct pt_regs
*regs
)
740 ack_CPI(VIC_SYS_INT
);
741 printk("Voyager SYSTEM INTERRUPT\n");
744 /* Handle a voyager CMN_INT; These interrupts occur either because of
745 * a system status change or because a single bit memory error
746 * occurred. FIXME: At the moment, ignore all this. */
747 void smp_vic_cmn_interrupt(struct pt_regs
*regs
)
749 static __u8 in_cmn_int
= 0;
750 static DEFINE_SPINLOCK(cmn_int_lock
);
752 /* common ints are broadcast, so make sure we only do this once */
753 _raw_spin_lock(&cmn_int_lock
);
758 _raw_spin_unlock(&cmn_int_lock
);
760 VDEBUG(("Voyager COMMON INTERRUPT\n"));
762 if (voyager_level
== 5)
763 voyager_cat_do_common_interrupt();
765 _raw_spin_lock(&cmn_int_lock
);
768 _raw_spin_unlock(&cmn_int_lock
);
769 ack_CPI(VIC_CMN_INT
);
773 * Reschedule call back. Nothing to do, all the work is done
774 * automatically when we return from the interrupt. */
775 static void smp_reschedule_interrupt(void)
780 static struct mm_struct
*flush_mm
;
781 static unsigned long flush_va
;
782 static DEFINE_SPINLOCK(tlbstate_lock
);
785 * We cannot call mmdrop() because we are in interrupt context,
786 * instead update mm->cpu_vm_mask.
788 * We need to reload %cr3 since the page tables may be going
789 * away from under us..
791 static inline void voyager_leave_mm(unsigned long cpu
)
793 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_OK
)
795 cpu_clear(cpu
, per_cpu(cpu_tlbstate
, cpu
).active_mm
->cpu_vm_mask
);
796 load_cr3(swapper_pg_dir
);
800 * Invalidate call-back
802 static void smp_invalidate_interrupt(void)
804 __u8 cpu
= smp_processor_id();
806 if (!test_bit(cpu
, &smp_invalidate_needed
))
808 /* This will flood messages. Don't uncomment unless you see
809 * Problems with cross cpu invalidation
810 VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
811 smp_processor_id()));
814 if (flush_mm
== per_cpu(cpu_tlbstate
, cpu
).active_mm
) {
815 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_OK
) {
816 if (flush_va
== TLB_FLUSH_ALL
)
819 __flush_tlb_one(flush_va
);
821 voyager_leave_mm(cpu
);
823 smp_mb__before_clear_bit();
824 clear_bit(cpu
, &smp_invalidate_needed
);
825 smp_mb__after_clear_bit();
828 /* All the new flush operations for 2.4 */
830 /* This routine is called with a physical cpu mask */
832 voyager_flush_tlb_others(unsigned long cpumask
, struct mm_struct
*mm
,
839 if ((cpumask
& cpus_addr(cpu_online_map
)[0]) != cpumask
)
841 if (cpumask
& (1 << smp_processor_id()))
846 spin_lock(&tlbstate_lock
);
850 atomic_set_mask(cpumask
, &smp_invalidate_needed
);
852 * We have to send the CPI only to
855 send_CPI(cpumask
, VIC_INVALIDATE_CPI
);
857 while (smp_invalidate_needed
) {
860 printk("***WARNING*** Stuck doing invalidate CPI "
861 "(CPU%d)\n", smp_processor_id());
866 /* Uncomment only to debug invalidation problems
867 VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
872 spin_unlock(&tlbstate_lock
);
875 void flush_tlb_current_task(void)
877 struct mm_struct
*mm
= current
->mm
;
878 unsigned long cpu_mask
;
882 cpu_mask
= cpus_addr(mm
->cpu_vm_mask
)[0] & ~(1 << smp_processor_id());
885 voyager_flush_tlb_others(cpu_mask
, mm
, TLB_FLUSH_ALL
);
890 void flush_tlb_mm(struct mm_struct
*mm
)
892 unsigned long cpu_mask
;
896 cpu_mask
= cpus_addr(mm
->cpu_vm_mask
)[0] & ~(1 << smp_processor_id());
898 if (current
->active_mm
== mm
) {
902 voyager_leave_mm(smp_processor_id());
905 voyager_flush_tlb_others(cpu_mask
, mm
, TLB_FLUSH_ALL
);
910 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long va
)
912 struct mm_struct
*mm
= vma
->vm_mm
;
913 unsigned long cpu_mask
;
917 cpu_mask
= cpus_addr(mm
->cpu_vm_mask
)[0] & ~(1 << smp_processor_id());
918 if (current
->active_mm
== mm
) {
922 voyager_leave_mm(smp_processor_id());
926 voyager_flush_tlb_others(cpu_mask
, mm
, va
);
931 EXPORT_SYMBOL(flush_tlb_page
);
933 /* enable the requested IRQs */
934 static void smp_enable_irq_interrupt(void)
937 __u8 cpu
= get_cpu();
939 VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu
,
940 vic_irq_enable_mask
[cpu
]));
942 spin_lock(&vic_irq_lock
);
943 for (irq
= 0; irq
< 16; irq
++) {
944 if (vic_irq_enable_mask
[cpu
] & (1 << irq
))
945 enable_local_vic_irq(irq
);
947 vic_irq_enable_mask
[cpu
] = 0;
948 spin_unlock(&vic_irq_lock
);
950 put_cpu_no_resched();
956 static void smp_stop_cpu_function(void *dummy
)
958 VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
959 cpu_clear(smp_processor_id(), cpu_online_map
);
965 /* execute a thread on a new CPU. The function to be called must be
966 * previously set up. This is used to schedule a function for
967 * execution on all CPUs - set up the function then broadcast a
968 * function_interrupt CPI to come here on each CPU */
969 static void smp_call_function_interrupt(void)
972 generic_smp_call_function_interrupt();
973 __get_cpu_var(irq_stat
).irq_call_count
++;
977 static void smp_call_function_single_interrupt(void)
980 generic_smp_call_function_single_interrupt();
981 __get_cpu_var(irq_stat
).irq_call_count
++;
985 /* Sorry about the name. In an APIC based system, the APICs
986 * themselves are programmed to send a timer interrupt. This is used
987 * by linux to reschedule the processor. Voyager doesn't have this,
988 * so we use the system clock to interrupt one processor, which in
989 * turn, broadcasts a timer CPI to all the others --- we receive that
990 * CPI here. We don't use this actually for counting so losing
991 * ticks doesn't matter
993 * FIXME: For those CPUs which actually have a local APIC, we could
994 * try to use it to trigger this interrupt instead of having to
995 * broadcast the timer tick. Unfortunately, all my pentium DYADs have
996 * no local APIC, so I can't do this
998 * This function is currently a placeholder and is unused in the code */
999 void smp_apic_timer_interrupt(struct pt_regs
*regs
)
1001 struct pt_regs
*old_regs
= set_irq_regs(regs
);
1002 wrapper_smp_local_timer_interrupt();
1003 set_irq_regs(old_regs
);
1006 /* All of the QUAD interrupt GATES */
1007 void smp_qic_timer_interrupt(struct pt_regs
*regs
)
1009 struct pt_regs
*old_regs
= set_irq_regs(regs
);
1010 ack_QIC_CPI(QIC_TIMER_CPI
);
1011 wrapper_smp_local_timer_interrupt();
1012 set_irq_regs(old_regs
);
1015 void smp_qic_invalidate_interrupt(struct pt_regs
*regs
)
1017 ack_QIC_CPI(QIC_INVALIDATE_CPI
);
1018 smp_invalidate_interrupt();
1021 void smp_qic_reschedule_interrupt(struct pt_regs
*regs
)
1023 ack_QIC_CPI(QIC_RESCHEDULE_CPI
);
1024 smp_reschedule_interrupt();
1027 void smp_qic_enable_irq_interrupt(struct pt_regs
*regs
)
1029 ack_QIC_CPI(QIC_ENABLE_IRQ_CPI
);
1030 smp_enable_irq_interrupt();
1033 void smp_qic_call_function_interrupt(struct pt_regs
*regs
)
1035 ack_QIC_CPI(QIC_CALL_FUNCTION_CPI
);
1036 smp_call_function_interrupt();
1039 void smp_qic_call_function_single_interrupt(struct pt_regs
*regs
)
1041 ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI
);
1042 smp_call_function_single_interrupt();
1045 void smp_vic_cpi_interrupt(struct pt_regs
*regs
)
1047 struct pt_regs
*old_regs
= set_irq_regs(regs
);
1048 __u8 cpu
= smp_processor_id();
1051 ack_QIC_CPI(VIC_CPI_LEVEL0
);
1053 ack_VIC_CPI(VIC_CPI_LEVEL0
);
1055 if (test_and_clear_bit(VIC_TIMER_CPI
, &vic_cpi_mailbox
[cpu
]))
1056 wrapper_smp_local_timer_interrupt();
1057 if (test_and_clear_bit(VIC_INVALIDATE_CPI
, &vic_cpi_mailbox
[cpu
]))
1058 smp_invalidate_interrupt();
1059 if (test_and_clear_bit(VIC_RESCHEDULE_CPI
, &vic_cpi_mailbox
[cpu
]))
1060 smp_reschedule_interrupt();
1061 if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI
, &vic_cpi_mailbox
[cpu
]))
1062 smp_enable_irq_interrupt();
1063 if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI
, &vic_cpi_mailbox
[cpu
]))
1064 smp_call_function_interrupt();
1065 if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI
, &vic_cpi_mailbox
[cpu
]))
1066 smp_call_function_single_interrupt();
1067 set_irq_regs(old_regs
);
1070 static void do_flush_tlb_all(void *info
)
1072 unsigned long cpu
= smp_processor_id();
1075 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_LAZY
)
1076 voyager_leave_mm(cpu
);
1079 /* flush the TLB of every active CPU in the system */
1080 void flush_tlb_all(void)
1082 on_each_cpu(do_flush_tlb_all
, 0, 1);
1085 /* send a reschedule CPI to one CPU by physical CPU number*/
1086 static void voyager_smp_send_reschedule(int cpu
)
1088 send_one_CPI(cpu
, VIC_RESCHEDULE_CPI
);
1091 int hard_smp_processor_id(void)
1094 __u8 cpumask
= inb(VIC_PROC_WHO_AM_I
);
1095 if ((cpumask
& QUAD_IDENTIFIER
) == QUAD_IDENTIFIER
)
1096 return cpumask
& 0x1F;
1098 for (i
= 0; i
< 8; i
++) {
1099 if (cpumask
& (1 << i
))
1102 printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask
);
1106 int safe_smp_processor_id(void)
1108 return hard_smp_processor_id();
1111 /* broadcast a halt to all other CPUs */
1112 static void voyager_smp_send_stop(void)
1114 smp_call_function(smp_stop_cpu_function
, NULL
, 1);
1117 /* this function is triggered in time.c when a clock tick fires
1118 * we need to re-broadcast the tick to all CPUs */
1119 void smp_vic_timer_interrupt(void)
1121 send_CPI_allbutself(VIC_TIMER_CPI
);
1122 smp_local_timer_interrupt();
1125 /* local (per CPU) timer interrupt. It does both profiling and
1126 * process statistics/rescheduling.
1128 * We do profiling in every local tick, statistics/rescheduling
1129 * happen only every 'profiling multiplier' ticks. The default
1130 * multiplier is 1 and it can be changed by writing the new multiplier
1131 * value into /proc/profile.
1133 void smp_local_timer_interrupt(void)
1135 int cpu
= smp_processor_id();
1138 profile_tick(CPU_PROFILING
);
1139 if (--per_cpu(prof_counter
, cpu
) <= 0) {
1141 * The multiplier may have changed since the last time we got
1142 * to this point as a result of the user writing to
1143 * /proc/profile. In this case we need to adjust the APIC
1144 * timer accordingly.
1146 * Interrupts are already masked off at this point.
1148 per_cpu(prof_counter
, cpu
) = per_cpu(prof_multiplier
, cpu
);
1149 if (per_cpu(prof_counter
, cpu
) !=
1150 per_cpu(prof_old_multiplier
, cpu
)) {
1151 /* FIXME: need to update the vic timer tick here */
1152 per_cpu(prof_old_multiplier
, cpu
) =
1153 per_cpu(prof_counter
, cpu
);
1156 update_process_times(user_mode_vm(get_irq_regs()));
1159 if (((1 << cpu
) & voyager_extended_vic_processors
) == 0)
1160 /* only extended VIC processors participate in
1161 * interrupt distribution */
1165 * We take the 'long' return path, and there every subsystem
1166 * grabs the appropriate locks (kernel lock/ irq lock).
1168 * we might want to decouple profiling from the 'long path',
1169 * and do the profiling totally in assembly.
1171 * Currently this isn't too much of an issue (performance wise),
1172 * we can take more than 100K local irqs per second on a 100 MHz P5.
1175 if ((++vic_tick
[cpu
] & 0x7) != 0)
1177 /* get here every 16 ticks (about every 1/6 of a second) */
1179 /* Change our priority to give someone else a chance at getting
1180 * the IRQ. The algorithm goes like this:
1182 * In the VIC, the dynamically routed interrupt is always
1183 * handled by the lowest priority eligible (i.e. receiving
1184 * interrupts) CPU. If >1 eligible CPUs are equal lowest, the
1185 * lowest processor number gets it.
1187 * The priority of a CPU is controlled by a special per-CPU
1188 * VIC priority register which is 3 bits wide 0 being lowest
1189 * and 7 highest priority..
1191 * Therefore we subtract the average number of interrupts from
1192 * the number we've fielded. If this number is negative, we
1193 * lower the activity count and if it is positive, we raise
1196 * I'm afraid this still leads to odd looking interrupt counts:
1197 * the totals are all roughly equal, but the individual ones
1198 * look rather skewed.
1200 * FIXME: This algorithm is total crap when mixed with SMP
1201 * affinity code since we now try to even up the interrupt
1202 * counts when an affinity binding is keeping them on a
1204 weight
= (vic_intr_count
[cpu
] * voyager_extended_cpus
1205 - vic_intr_total
) >> 4;
1212 outb((__u8
) weight
, VIC_PRIORITY_REGISTER
);
1214 #ifdef VOYAGER_DEBUG
1215 if ((vic_tick
[cpu
] & 0xFFF) == 0) {
1216 /* print this message roughly every 25 secs */
1217 printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n",
1218 cpu
, vic_tick
[cpu
], weight
);
1223 /* setup the profiling timer */
1224 int setup_profiling_timer(unsigned int multiplier
)
1232 * Set the new multiplier for each CPU. CPUs don't start using the
1233 * new values until the next timer interrupt in which they do process
1236 for (i
= 0; i
< NR_CPUS
; ++i
)
1237 per_cpu(prof_multiplier
, i
) = multiplier
;
1242 /* This is a bit of a mess, but forced on us by the genirq changes
1243 * there's no genirq handler that really does what voyager wants
1244 * so hack it up with the simple IRQ handler */
1245 static void handle_vic_irq(unsigned int irq
, struct irq_desc
*desc
)
1247 before_handle_vic_irq(irq
);
1248 handle_simple_irq(irq
, desc
);
1249 after_handle_vic_irq(irq
);
1252 /* The CPIs are handled in the per cpu 8259s, so they must be
1253 * enabled to be received: FIX: enabling the CPIs in the early
1254 * boot sequence interferes with bug checking; enable them later
1256 #define VIC_SET_GATE(cpi, vector) \
1257 set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector))
1258 #define QIC_SET_GATE(cpi, vector) \
1259 set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
1261 void __init
voyager_smp_intr_init(void)
1265 /* initialize the per cpu irq mask to all disabled */
1266 for (i
= 0; i
< NR_CPUS
; i
++)
1267 vic_irq_mask
[i
] = 0xFFFF;
1269 VIC_SET_GATE(VIC_CPI_LEVEL0
, vic_cpi_interrupt
);
1271 VIC_SET_GATE(VIC_SYS_INT
, vic_sys_interrupt
);
1272 VIC_SET_GATE(VIC_CMN_INT
, vic_cmn_interrupt
);
1274 QIC_SET_GATE(QIC_TIMER_CPI
, qic_timer_interrupt
);
1275 QIC_SET_GATE(QIC_INVALIDATE_CPI
, qic_invalidate_interrupt
);
1276 QIC_SET_GATE(QIC_RESCHEDULE_CPI
, qic_reschedule_interrupt
);
1277 QIC_SET_GATE(QIC_ENABLE_IRQ_CPI
, qic_enable_irq_interrupt
);
1278 QIC_SET_GATE(QIC_CALL_FUNCTION_CPI
, qic_call_function_interrupt
);
1280 /* now put the VIC descriptor into the first 48 IRQs
1282 * This is for later: first 16 correspond to PC IRQs; next 16
1283 * are Primary MC IRQs and final 16 are Secondary MC IRQs */
1284 for (i
= 0; i
< 48; i
++)
1285 set_irq_chip_and_handler(i
, &vic_chip
, handle_vic_irq
);
1288 /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
1289 * processor to receive CPI */
1290 static void send_CPI(__u32 cpuset
, __u8 cpi
)
1293 __u32 quad_cpuset
= (cpuset
& voyager_quad_processors
);
1295 if (cpi
< VIC_START_FAKE_CPI
) {
1296 /* fake CPI are only used for booting, so send to the
1297 * extended quads as well---Quads must be VIC booted */
1298 outb((__u8
) (cpuset
), VIC_CPI_Registers
[cpi
]);
1302 send_QIC_CPI(quad_cpuset
, cpi
);
1303 cpuset
&= ~quad_cpuset
;
1304 cpuset
&= 0xff; /* only first 8 CPUs vaild for VIC CPI */
1307 for_each_online_cpu(cpu
) {
1308 if (cpuset
& (1 << cpu
))
1309 set_bit(cpi
, &vic_cpi_mailbox
[cpu
]);
1312 outb((__u8
) cpuset
, VIC_CPI_Registers
[VIC_CPI_LEVEL0
]);
1315 /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and
1316 * set the cache line to shared by reading it.
1318 * DON'T make this inline otherwise the cache line read will be
1321 static int ack_QIC_CPI(__u8 cpi
)
1323 __u8 cpu
= hard_smp_processor_id();
1327 outb(1 << cpi
, QIC_INTERRUPT_CLEAR1
);
1328 return voyager_quad_cpi_addr
[cpu
]->qic_cpi
[cpi
].cpi
;
1331 static void ack_special_QIC_CPI(__u8 cpi
)
1335 outb(QIC_CMN_INT
, QIC_INTERRUPT_CLEAR0
);
1338 outb(QIC_SYS_INT
, QIC_INTERRUPT_CLEAR0
);
1341 /* also clear at the VIC, just in case (nop for non-extended proc) */
1345 /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */
1346 static void ack_VIC_CPI(__u8 cpi
)
1348 #ifdef VOYAGER_DEBUG
1349 unsigned long flags
;
1351 __u8 cpu
= smp_processor_id();
1353 local_irq_save(flags
);
1354 isr
= vic_read_isr();
1355 if ((isr
& (1 << (cpi
& 7))) == 0) {
1356 printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu
, cpi
);
1359 /* send specific EOI; the two system interrupts have
1360 * bit 4 set for a separate vector but behave as the
1361 * corresponding 3 bit intr */
1362 outb_p(0x60 | (cpi
& 7), 0x20);
1364 #ifdef VOYAGER_DEBUG
1365 if ((vic_read_isr() & (1 << (cpi
& 7))) != 0) {
1366 printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu
, cpi
);
1368 local_irq_restore(flags
);
1372 /* cribbed with thanks from irq.c */
1373 #define __byte(x,y) (((unsigned char *)&(y))[x])
1374 #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu]))
1375 #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu]))
1377 static unsigned int startup_vic_irq(unsigned int irq
)
1379 unmask_vic_irq(irq
);
1384 /* The enable and disable routines. This is where we run into
1385 * conflicting architectural philosophy. Fundamentally, the voyager
1386 * architecture does not expect to have to disable interrupts globally
1387 * (the IRQ controllers belong to each CPU). The processor masquerade
1388 * which is used to start the system shouldn't be used in a running OS
1389 * since it will cause great confusion if two separate CPUs drive to
1390 * the same IRQ controller (I know, I've tried it).
1392 * The solution is a variant on the NCR lazy SPL design:
1394 * 1) To disable an interrupt, do nothing (other than set the
1395 * IRQ_DISABLED flag). This dares the interrupt actually to arrive.
1397 * 2) If the interrupt dares to come in, raise the local mask against
1398 * it (this will result in all the CPU masks being raised
1401 * 3) To enable the interrupt, lower the mask on the local CPU and
1402 * broadcast an Interrupt enable CPI which causes all other CPUs to
1403 * adjust their masks accordingly. */
1405 static void unmask_vic_irq(unsigned int irq
)
1407 /* linux doesn't to processor-irq affinity, so enable on
1408 * all CPUs we know about */
1409 int cpu
= smp_processor_id(), real_cpu
;
1410 __u16 mask
= (1 << irq
);
1411 __u32 processorList
= 0;
1412 unsigned long flags
;
1414 VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n",
1415 irq
, cpu
, cpu_irq_affinity
[cpu
]));
1416 spin_lock_irqsave(&vic_irq_lock
, flags
);
1417 for_each_online_cpu(real_cpu
) {
1418 if (!(voyager_extended_vic_processors
& (1 << real_cpu
)))
1420 if (!(cpu_irq_affinity
[real_cpu
] & mask
)) {
1421 /* irq has no affinity for this CPU, ignore */
1424 if (real_cpu
== cpu
) {
1425 enable_local_vic_irq(irq
);
1426 } else if (vic_irq_mask
[real_cpu
] & mask
) {
1427 vic_irq_enable_mask
[real_cpu
] |= mask
;
1428 processorList
|= (1 << real_cpu
);
1431 spin_unlock_irqrestore(&vic_irq_lock
, flags
);
1433 send_CPI(processorList
, VIC_ENABLE_IRQ_CPI
);
1436 static void mask_vic_irq(unsigned int irq
)
1438 /* lazy disable, do nothing */
1441 static void enable_local_vic_irq(unsigned int irq
)
1443 __u8 cpu
= smp_processor_id();
1444 __u16 mask
= ~(1 << irq
);
1445 __u16 old_mask
= vic_irq_mask
[cpu
];
1447 vic_irq_mask
[cpu
] &= mask
;
1448 if (vic_irq_mask
[cpu
] == old_mask
)
1451 VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n",
1455 outb_p(cached_A1(cpu
), 0xA1);
1458 outb_p(cached_21(cpu
), 0x21);
1463 static void disable_local_vic_irq(unsigned int irq
)
1465 __u8 cpu
= smp_processor_id();
1466 __u16 mask
= (1 << irq
);
1467 __u16 old_mask
= vic_irq_mask
[cpu
];
1472 vic_irq_mask
[cpu
] |= mask
;
1473 if (old_mask
== vic_irq_mask
[cpu
])
1476 VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n",
1480 outb_p(cached_A1(cpu
), 0xA1);
1483 outb_p(cached_21(cpu
), 0x21);
1488 /* The VIC is level triggered, so the ack can only be issued after the
1489 * interrupt completes. However, we do Voyager lazy interrupt
1490 * handling here: It is an extremely expensive operation to mask an
1491 * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If
1492 * this interrupt actually comes in, then we mask and ack here to push
1493 * the interrupt off to another CPU */
1494 static void before_handle_vic_irq(unsigned int irq
)
1496 irq_desc_t
*desc
= irq_to_desc(irq
);
1497 __u8 cpu
= smp_processor_id();
1499 _raw_spin_lock(&vic_irq_lock
);
1501 vic_intr_count
[cpu
]++;
1503 if (!(cpu_irq_affinity
[cpu
] & (1 << irq
))) {
1504 /* The irq is not in our affinity mask, push it off
1505 * onto another CPU */
1506 VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d "
1507 "on cpu %d\n", irq
, cpu
));
1508 disable_local_vic_irq(irq
);
1509 /* set IRQ_INPROGRESS to prevent the handler in irq.c from
1510 * actually calling the interrupt routine */
1511 desc
->status
|= IRQ_REPLAY
| IRQ_INPROGRESS
;
1512 } else if (desc
->status
& IRQ_DISABLED
) {
1513 /* Damn, the interrupt actually arrived, do the lazy
1514 * disable thing. The interrupt routine in irq.c will
1515 * not handle a IRQ_DISABLED interrupt, so nothing more
1516 * need be done here */
1517 VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n",
1519 disable_local_vic_irq(irq
);
1520 desc
->status
|= IRQ_REPLAY
;
1522 desc
->status
&= ~IRQ_REPLAY
;
1525 _raw_spin_unlock(&vic_irq_lock
);
1528 /* Finish the VIC interrupt: basically mask */
1529 static void after_handle_vic_irq(unsigned int irq
)
1531 irq_desc_t
*desc
= irq_to_desc(irq
);
1533 _raw_spin_lock(&vic_irq_lock
);
1535 unsigned int status
= desc
->status
& ~IRQ_INPROGRESS
;
1536 #ifdef VOYAGER_DEBUG
1540 desc
->status
= status
;
1541 if ((status
& IRQ_DISABLED
))
1542 disable_local_vic_irq(irq
);
1543 #ifdef VOYAGER_DEBUG
1544 /* DEBUG: before we ack, check what's in progress */
1545 isr
= vic_read_isr();
1546 if ((isr
& (1 << irq
) && !(status
& IRQ_REPLAY
)) == 0) {
1548 __u8 cpu
= smp_processor_id();
1550 int mask
; /* Um... initialize me??? --RR */
1552 printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
1554 for_each_possible_cpu(real_cpu
, mask
) {
1556 outb(VIC_CPU_MASQUERADE_ENABLE
| real_cpu
,
1558 isr
= vic_read_isr();
1559 if (isr
& (1 << irq
)) {
1561 ("VOYAGER SMP: CPU%d ack irq %d\n",
1565 outb(cpu
, VIC_PROCESSOR_ID
);
1568 #endif /* VOYAGER_DEBUG */
1569 /* as soon as we ack, the interrupt is eligible for
1570 * receipt by another CPU so everything must be in
1573 if (status
& IRQ_REPLAY
) {
1574 /* replay is set if we disable the interrupt
1575 * in the before_handle_vic_irq() routine, so
1576 * clear the in progress bit here to allow the
1577 * next CPU to handle this correctly */
1578 desc
->status
&= ~(IRQ_REPLAY
| IRQ_INPROGRESS
);
1580 #ifdef VOYAGER_DEBUG
1581 isr
= vic_read_isr();
1582 if ((isr
& (1 << irq
)) != 0)
1583 printk("VOYAGER SMP: after_handle_vic_irq() after "
1584 "ack irq=%d, isr=0x%x\n", irq
, isr
);
1585 #endif /* VOYAGER_DEBUG */
1587 _raw_spin_unlock(&vic_irq_lock
);
1589 /* All code after this point is out of the main path - the IRQ
1590 * may be intercepted by another CPU if reasserted */
1593 /* Linux processor - interrupt affinity manipulations.
1595 * For each processor, we maintain a 32 bit irq affinity mask.
1596 * Initially it is set to all 1's so every processor accepts every
1597 * interrupt. In this call, we change the processor's affinity mask:
1599 * Change from enable to disable:
1601 * If the interrupt ever comes in to the processor, we will disable it
1602 * and ack it to push it off to another CPU, so just accept the mask here.
1604 * Change from disable to enable:
1606 * change the mask and then do an interrupt enable CPI to re-enable on
1607 * the selected processors */
1609 void set_vic_irq_affinity(unsigned int irq
, cpumask_t mask
)
1611 /* Only extended processors handle interrupts */
1612 unsigned long real_mask
;
1613 unsigned long irq_mask
= 1 << irq
;
1616 real_mask
= cpus_addr(mask
)[0] & voyager_extended_vic_processors
;
1618 if (cpus_addr(mask
)[0] == 0)
1619 /* can't have no CPUs to accept the interrupt -- extremely
1620 * bad things will happen */
1624 /* can't change the affinity of the timer IRQ. This
1625 * is due to the constraint in the voyager
1626 * architecture that the CPI also comes in on and IRQ
1627 * line and we have chosen IRQ0 for this. If you
1628 * raise the mask on this interrupt, the processor
1629 * will no-longer be able to accept VIC CPIs */
1633 /* You can only have 32 interrupts in a voyager system
1634 * (and 32 only if you have a secondary microchannel
1638 for_each_online_cpu(cpu
) {
1639 unsigned long cpu_mask
= 1 << cpu
;
1641 if (cpu_mask
& real_mask
) {
1642 /* enable the interrupt for this cpu */
1643 cpu_irq_affinity
[cpu
] |= irq_mask
;
1645 /* disable the interrupt for this cpu */
1646 cpu_irq_affinity
[cpu
] &= ~irq_mask
;
1649 /* this is magic, we now have the correct affinity maps, so
1650 * enable the interrupt. This will send an enable CPI to
1651 * those CPUs who need to enable it in their local masks,
1652 * causing them to correct for the new affinity . If the
1653 * interrupt is currently globally disabled, it will simply be
1654 * disabled again as it comes in (voyager lazy disable). If
1655 * the affinity map is tightened to disable the interrupt on a
1656 * cpu, it will be pushed off when it comes in */
1657 unmask_vic_irq(irq
);
1660 static void ack_vic_irq(unsigned int irq
)
1663 outb(0x62, 0x20); /* Specific EOI to cascade */
1664 outb(0x60 | (irq
& 7), 0xA0);
1666 outb(0x60 | (irq
& 7), 0x20);
1670 /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259
1671 * but are not vectored by it. This means that the 8259 mask must be
1672 * lowered to receive them */
1673 static __init
void vic_enable_cpi(void)
1675 __u8 cpu
= smp_processor_id();
1677 /* just take a copy of the current mask (nop for boot cpu) */
1678 vic_irq_mask
[cpu
] = vic_irq_mask
[boot_cpu_id
];
1680 enable_local_vic_irq(VIC_CPI_LEVEL0
);
1681 enable_local_vic_irq(VIC_CPI_LEVEL1
);
1682 /* for sys int and cmn int */
1683 enable_local_vic_irq(7);
1685 if (is_cpu_quad()) {
1686 outb(QIC_DEFAULT_MASK0
, QIC_MASK_REGISTER0
);
1687 outb(QIC_CPI_ENABLE
, QIC_MASK_REGISTER1
);
1688 VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n",
1689 cpu
, QIC_CPI_ENABLE
));
1692 VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n",
1693 cpu
, vic_irq_mask
[cpu
]));
1696 void voyager_smp_dump()
1698 int old_cpu
= smp_processor_id(), cpu
;
1700 /* dump the interrupt masks of each processor */
1701 for_each_online_cpu(cpu
) {
1702 __u16 imr
, isr
, irr
;
1703 unsigned long flags
;
1705 local_irq_save(flags
);
1706 outb(VIC_CPU_MASQUERADE_ENABLE
| cpu
, VIC_PROCESSOR_ID
);
1707 imr
= (inb(0xa1) << 8) | inb(0x21);
1709 irr
= inb(0xa0) << 8;
1713 isr
= inb(0xa0) << 8;
1716 outb(old_cpu
, VIC_PROCESSOR_ID
);
1717 local_irq_restore(flags
);
1718 printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n",
1719 cpu
, vic_irq_mask
[cpu
], imr
, irr
, isr
);
1721 /* These lines are put in to try to unstick an un ack'd irq */
1724 for (irq
= 0; irq
< 16; irq
++) {
1725 if (isr
& (1 << irq
)) {
1726 printk("\tCPU%d: ack irq %d\n",
1728 local_irq_save(flags
);
1729 outb(VIC_CPU_MASQUERADE_ENABLE
| cpu
,
1732 outb(old_cpu
, VIC_PROCESSOR_ID
);
1733 local_irq_restore(flags
);
1741 void smp_voyager_power_off(void *dummy
)
1743 if (smp_processor_id() == boot_cpu_id
)
1744 voyager_power_off();
1746 smp_stop_cpu_function(NULL
);
1749 static void __init
voyager_smp_prepare_cpus(unsigned int max_cpus
)
1751 /* FIXME: ignore max_cpus for now */
1755 static void __cpuinit
voyager_smp_prepare_boot_cpu(void)
1757 init_gdt(smp_processor_id());
1758 switch_to_new_gdt();
1760 cpu_set(smp_processor_id(), cpu_online_map
);
1761 cpu_set(smp_processor_id(), cpu_callout_map
);
1762 cpu_set(smp_processor_id(), cpu_possible_map
);
1763 cpu_set(smp_processor_id(), cpu_present_map
);
1766 static int __cpuinit
voyager_cpu_up(unsigned int cpu
)
1768 /* This only works at boot for x86. See "rewrite" above. */
1769 if (cpu_isset(cpu
, smp_commenced_mask
))
1772 /* In case one didn't come up */
1773 if (!cpu_isset(cpu
, cpu_callin_map
))
1775 /* Unleash the CPU! */
1776 cpu_set(cpu
, smp_commenced_mask
);
1777 while (!cpu_online(cpu
))
1782 static void __init
voyager_smp_cpus_done(unsigned int max_cpus
)
1787 void __init
smp_setup_processor_id(void)
1789 current_thread_info()->cpu
= hard_smp_processor_id();
1790 x86_write_percpu(cpu_number
, hard_smp_processor_id());
1793 struct smp_ops smp_ops
= {
1794 .smp_prepare_boot_cpu
= voyager_smp_prepare_boot_cpu
,
1795 .smp_prepare_cpus
= voyager_smp_prepare_cpus
,
1796 .cpu_up
= voyager_cpu_up
,
1797 .smp_cpus_done
= voyager_smp_cpus_done
,
1799 .smp_send_stop
= voyager_smp_send_stop
,
1800 .smp_send_reschedule
= voyager_smp_send_reschedule
,
1802 .send_call_func_ipi
= native_send_call_func_ipi
,
1803 .send_call_func_single_ipi
= native_send_call_func_single_ipi
,