1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* Copyright (C) 1999,2001
5 * Author: J.E.J.Bottomley@HansenPartnership.com
7 * linux/arch/i386/kernel/voyager_smp.c
9 * This file provides all the same external entries as smp.c but uses
10 * the voyager hal to provide the functionality
12 #include <linux/config.h>
13 #include <linux/module.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/delay.h>
17 #include <linux/mc146818rtc.h>
18 #include <linux/cache.h>
19 #include <linux/interrupt.h>
20 #include <linux/smp_lock.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/bootmem.h>
24 #include <linux/completion.h>
26 #include <asm/voyager.h>
29 #include <asm/pgalloc.h>
30 #include <asm/tlbflush.h>
31 #include <asm/arch_hooks.h>
33 /* TLB state -- visible externally, indexed physically */
34 DEFINE_PER_CPU(struct tlb_state
, cpu_tlbstate
) ____cacheline_aligned
= { &init_mm
, 0 };
36 /* CPU IRQ affinity -- set to all ones initially */
37 static unsigned long cpu_irq_affinity
[NR_CPUS
] __cacheline_aligned
= { [0 ... NR_CPUS
-1] = ~0UL };
39 /* per CPU data structure (for /proc/cpuinfo et al), visible externally
40 * indexed physically */
41 struct cpuinfo_x86 cpu_data
[NR_CPUS
] __cacheline_aligned
;
42 EXPORT_SYMBOL(cpu_data
);
44 /* physical ID of the CPU used to boot the system */
45 unsigned char boot_cpu_id
;
47 /* The memory line addresses for the Quad CPIs */
48 struct voyager_qic_cpi
*voyager_quad_cpi_addr
[NR_CPUS
] __cacheline_aligned
;
50 /* The masks for the Extended VIC processors, filled in by cat_init */
51 __u32 voyager_extended_vic_processors
= 0;
53 /* Masks for the extended Quad processors which cannot be VIC booted */
54 __u32 voyager_allowed_boot_processors
= 0;
56 /* The mask for the Quad Processors (both extended and non-extended) */
57 __u32 voyager_quad_processors
= 0;
59 /* Total count of live CPUs, used in process.c to display
60 * the CPU information and in irq.c for the per CPU irq
61 * activity count. Finally exported by i386_ksyms.c */
62 static int voyager_extended_cpus
= 1;
64 /* Have we found an SMP box - used by time.c to do the profiling
65 interrupt for timeslicing; do not set to 1 until the per CPU timer
66 interrupt is active */
67 int smp_found_config
= 0;
69 /* Used for the invalidate map that's also checked in the spinlock */
70 static volatile unsigned long smp_invalidate_needed
;
72 /* Bitmask of currently online CPUs - used by setup.c for
73 /proc/cpuinfo, visible externally but still physical */
74 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
75 EXPORT_SYMBOL(cpu_online_map
);
77 /* Bitmask of CPUs present in the system - exported by i386_syms.c, used
78 * by scheduler but indexed physically */
79 cpumask_t phys_cpu_present_map
= CPU_MASK_NONE
;
82 /* The internal functions */
83 static void send_CPI(__u32 cpuset
, __u8 cpi
);
84 static void ack_CPI(__u8 cpi
);
85 static int ack_QIC_CPI(__u8 cpi
);
86 static void ack_special_QIC_CPI(__u8 cpi
);
87 static void ack_VIC_CPI(__u8 cpi
);
88 static void send_CPI_allbutself(__u8 cpi
);
89 static void enable_vic_irq(unsigned int irq
);
90 static void disable_vic_irq(unsigned int irq
);
91 static unsigned int startup_vic_irq(unsigned int irq
);
92 static void enable_local_vic_irq(unsigned int irq
);
93 static void disable_local_vic_irq(unsigned int irq
);
94 static void before_handle_vic_irq(unsigned int irq
);
95 static void after_handle_vic_irq(unsigned int irq
);
96 static void set_vic_irq_affinity(unsigned int irq
, cpumask_t mask
);
97 static void ack_vic_irq(unsigned int irq
);
98 static void vic_enable_cpi(void);
99 static void do_boot_cpu(__u8 cpuid
);
100 static void do_quad_bootstrap(void);
102 int hard_smp_processor_id(void);
104 /* Inline functions */
106 send_one_QIC_CPI(__u8 cpu
, __u8 cpi
)
108 voyager_quad_cpi_addr
[cpu
]->qic_cpi
[cpi
].cpi
=
109 (smp_processor_id() << 16) + cpi
;
113 send_QIC_CPI(__u32 cpuset
, __u8 cpi
)
117 for_each_online_cpu(cpu
) {
118 if(cpuset
& (1<<cpu
)) {
120 if(!cpu_isset(cpu
, cpu_online_map
))
121 VDEBUG(("CPU%d sending cpi %d to CPU%d not in cpu_online_map\n", hard_smp_processor_id(), cpi
, cpu
));
123 send_one_QIC_CPI(cpu
, cpi
- QIC_CPI_OFFSET
);
129 wrapper_smp_local_timer_interrupt(struct pt_regs
*regs
)
132 smp_local_timer_interrupt(regs
);
137 send_one_CPI(__u8 cpu
, __u8 cpi
)
139 if(voyager_quad_processors
& (1<<cpu
))
140 send_one_QIC_CPI(cpu
, cpi
- QIC_CPI_OFFSET
);
142 send_CPI(1<<cpu
, cpi
);
146 send_CPI_allbutself(__u8 cpi
)
148 __u8 cpu
= smp_processor_id();
149 __u32 mask
= cpus_addr(cpu_online_map
)[0] & ~(1 << cpu
);
156 __u8 cpumask
= inb(VIC_PROC_WHO_AM_I
);
157 return ((cpumask
& QUAD_IDENTIFIER
) == QUAD_IDENTIFIER
);
161 is_cpu_extended(void)
163 __u8 cpu
= hard_smp_processor_id();
165 return(voyager_extended_vic_processors
& (1<<cpu
));
169 is_cpu_vic_boot(void)
171 __u8 cpu
= hard_smp_processor_id();
173 return(voyager_extended_vic_processors
174 & voyager_allowed_boot_processors
& (1<<cpu
));
182 case VIC_CPU_BOOT_CPI
:
183 if(is_cpu_quad() && !is_cpu_vic_boot())
190 /* These are slightly strange. Even on the Quad card,
191 * They are vectored as VIC CPIs */
193 ack_special_QIC_CPI(cpi
);
198 printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi
);
203 /* local variables */
205 /* The VIC IRQ descriptors -- these look almost identical to the
206 * 8259 IRQs except that masks and things must be kept per processor
208 static struct hw_interrupt_type vic_irq_type
= {
209 .typename
= "VIC-level",
210 .startup
= startup_vic_irq
,
211 .shutdown
= disable_vic_irq
,
212 .enable
= enable_vic_irq
,
213 .disable
= disable_vic_irq
,
214 .ack
= before_handle_vic_irq
,
215 .end
= after_handle_vic_irq
,
216 .set_affinity
= set_vic_irq_affinity
,
219 /* used to count up as CPUs are brought on line (starts at 0) */
220 static int cpucount
= 0;
222 /* steal a page from the bottom of memory for the trampoline and
223 * squirrel its address away here. This will be in kernel virtual
225 static __u32 trampoline_base
;
227 /* The per cpu profile stuff - used in smp_local_timer_interrupt */
228 static DEFINE_PER_CPU(int, prof_multiplier
) = 1;
229 static DEFINE_PER_CPU(int, prof_old_multiplier
) = 1;
230 static DEFINE_PER_CPU(int, prof_counter
) = 1;
232 /* the map used to check if a CPU has booted */
233 static __u32 cpu_booted_map
;
235 /* the synchronize flag used to hold all secondary CPUs spinning in
236 * a tight loop until the boot sequence is ready for them */
237 static cpumask_t smp_commenced_mask
= CPU_MASK_NONE
;
239 /* This is for the new dynamic CPU boot code */
240 cpumask_t cpu_callin_map
= CPU_MASK_NONE
;
241 cpumask_t cpu_callout_map
= CPU_MASK_NONE
;
242 EXPORT_SYMBOL(cpu_callout_map
);
243 cpumask_t cpu_possible_map
= CPU_MASK_NONE
;
244 EXPORT_SYMBOL(cpu_possible_map
);
246 /* The per processor IRQ masks (these are usually kept in sync) */
247 static __u16 vic_irq_mask
[NR_CPUS
] __cacheline_aligned
;
249 /* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */
250 static __u16 vic_irq_enable_mask
[NR_CPUS
] __cacheline_aligned
= { 0 };
252 /* Lock for enable/disable of VIC interrupts */
253 static __cacheline_aligned
DEFINE_SPINLOCK(vic_irq_lock
);
255 /* The boot processor is correctly set up in PC mode when it
256 * comes up, but the secondaries need their master/slave 8259
257 * pairs initializing correctly */
259 /* Interrupt counters (per cpu) and total - used to try to
260 * even up the interrupt handling routines */
261 static long vic_intr_total
= 0;
262 static long vic_intr_count
[NR_CPUS
] __cacheline_aligned
= { 0 };
263 static unsigned long vic_tick
[NR_CPUS
] __cacheline_aligned
= { 0 };
265 /* Since we can only use CPI0, we fake all the other CPIs */
266 static unsigned long vic_cpi_mailbox
[NR_CPUS
] __cacheline_aligned
;
268 /* debugging routine to read the isr of the cpu's pic */
275 isr
= inb(0xa0) << 8;
286 /* not a quad, no setup */
289 outb(QIC_DEFAULT_MASK0
, QIC_MASK_REGISTER0
);
290 outb(QIC_CPI_ENABLE
, QIC_MASK_REGISTER1
);
292 if(is_cpu_extended()) {
293 /* the QIC duplicate of the VIC base register */
294 outb(VIC_DEFAULT_CPI_BASE
, QIC_VIC_CPI_BASE_REGISTER
);
295 outb(QIC_DEFAULT_CPI_BASE
, QIC_CPI_BASE_REGISTER
);
297 /* FIXME: should set up the QIC timer and memory parity
298 * error vectors here */
305 outb(1, VIC_REDIRECT_REGISTER_1
);
306 /* clear the claim registers for dynamic routing */
307 outb(0, VIC_CLAIM_REGISTER_0
);
308 outb(0, VIC_CLAIM_REGISTER_1
);
310 outb(0, VIC_PRIORITY_REGISTER
);
311 /* Set the Primary and Secondary Microchannel vector
312 * bases to be the same as the ordinary interrupts
314 * FIXME: This would be more efficient using separate
316 outb(FIRST_EXTERNAL_VECTOR
, VIC_PRIMARY_MC_BASE
);
317 outb(FIRST_EXTERNAL_VECTOR
, VIC_SECONDARY_MC_BASE
);
318 /* Now initiallise the master PIC belonging to this CPU by
319 * sending the four ICWs */
321 /* ICW1: level triggered, ICW4 needed */
324 /* ICW2: vector base */
325 outb(FIRST_EXTERNAL_VECTOR
, 0x21);
327 /* ICW3: slave at line 2 */
330 /* ICW4: 8086 mode */
333 /* now the same for the slave PIC */
335 /* ICW1: level trigger, ICW4 needed */
338 /* ICW2: slave vector base */
339 outb(FIRST_EXTERNAL_VECTOR
+ 8, 0xA1);
344 /* ICW4: 8086 mode */
349 do_quad_bootstrap(void)
351 if(is_cpu_quad() && is_cpu_vic_boot()) {
354 __u8 cpuid
= hard_smp_processor_id();
356 local_irq_save(flags
);
358 for(i
= 0; i
<4; i
++) {
359 /* FIXME: this would be >>3 &0x7 on the 32 way */
360 if(((cpuid
>> 2) & 0x03) == i
)
361 /* don't lower our own mask! */
364 /* masquerade as local Quad CPU */
365 outb(QIC_CPUID_ENABLE
| i
, QIC_PROCESSOR_ID
);
366 /* enable the startup CPI */
367 outb(QIC_BOOT_CPI_MASK
, QIC_MASK_REGISTER1
);
369 outb(0, QIC_PROCESSOR_ID
);
371 local_irq_restore(flags
);
376 /* Set up all the basic stuff: read the SMP config and make all the
377 * SMP information reflect only the boot cpu. All others will be
378 * brought on-line later. */
380 find_smp_config(void)
384 boot_cpu_id
= hard_smp_processor_id();
386 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id
);
388 /* initialize the CPU structures (moved from smp_boot_cpus) */
389 for(i
=0; i
<NR_CPUS
; i
++) {
390 cpu_irq_affinity
[i
] = ~0;
392 cpu_online_map
= cpumask_of_cpu(boot_cpu_id
);
394 /* The boot CPU must be extended */
395 voyager_extended_vic_processors
= 1<<boot_cpu_id
;
396 /* initially, all of the first 8 cpu's can boot */
397 voyager_allowed_boot_processors
= 0xff;
398 /* set up everything for just this CPU, we can alter
399 * this as we start the other CPUs later */
400 /* now get the CPU disposition from the extended CMOS */
401 cpus_addr(phys_cpu_present_map
)[0] = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
);
402 cpus_addr(phys_cpu_present_map
)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
+ 1) << 8;
403 cpus_addr(phys_cpu_present_map
)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
+ 2) << 16;
404 cpus_addr(phys_cpu_present_map
)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK
+ 3) << 24;
405 cpu_possible_map
= phys_cpu_present_map
;
406 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map
)[0]);
407 /* Here we set up the VIC to enable SMP */
408 /* enable the CPIs by writing the base vector to their register */
409 outb(VIC_DEFAULT_CPI_BASE
, VIC_CPI_BASE_REGISTER
);
410 outb(1, VIC_REDIRECT_REGISTER_1
);
411 /* set the claim registers for static routing --- Boot CPU gets
412 * all interrupts untill all other CPUs started */
413 outb(0xff, VIC_CLAIM_REGISTER_0
);
414 outb(0xff, VIC_CLAIM_REGISTER_1
);
415 /* Set the Primary and Secondary Microchannel vector
416 * bases to be the same as the ordinary interrupts
418 * FIXME: This would be more efficient using separate
420 outb(FIRST_EXTERNAL_VECTOR
, VIC_PRIMARY_MC_BASE
);
421 outb(FIRST_EXTERNAL_VECTOR
, VIC_SECONDARY_MC_BASE
);
423 /* Finally tell the firmware that we're driving */
424 outb(inb(VOYAGER_SUS_IN_CONTROL_PORT
) | VOYAGER_IN_CONTROL_FLAG
,
425 VOYAGER_SUS_IN_CONTROL_PORT
);
427 current_thread_info()->cpu
= boot_cpu_id
;
431 * The bootstrap kernel entry code has set these up. Save them
432 * for a given CPU, id is physical */
434 smp_store_cpu_info(int id
)
436 struct cpuinfo_x86
*c
=&cpu_data
[id
];
443 /* set up the trampoline and return the physical address of the code */
445 setup_trampoline(void)
447 /* these two are global symbols in trampoline.S */
448 extern __u8 trampoline_end
[];
449 extern __u8 trampoline_data
[];
451 memcpy((__u8
*)trampoline_base
, trampoline_data
,
452 trampoline_end
- trampoline_data
);
453 return virt_to_phys((__u8
*)trampoline_base
);
456 /* Routine initially called when a non-boot CPU is brought online */
458 start_secondary(void *unused
)
460 __u8 cpuid
= hard_smp_processor_id();
461 /* external functions not defined in the headers */
462 extern void calibrate_delay(void);
466 /* OK, we're in the routine */
467 ack_CPI(VIC_CPU_BOOT_CPI
);
469 /* setup the 8259 master slave pair belonging to this CPU ---
470 * we won't actually receive any until the boot CPU
471 * relinquishes it's static routing mask */
476 if(is_cpu_quad() && !is_cpu_vic_boot()) {
477 /* clear the boot CPI */
480 dummy
= voyager_quad_cpi_addr
[cpuid
]->qic_cpi
[VIC_CPU_BOOT_CPI
].cpi
;
481 printk("read dummy %d\n", dummy
);
484 /* lower the mask to receive CPIs */
487 VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid
, &cpuid
));
489 /* enable interrupts */
492 /* get our bogomips */
495 /* save our processor parameters */
496 smp_store_cpu_info(cpuid
);
498 /* if we're a quad, we may need to bootstrap other CPUs */
501 /* FIXME: this is rather a poor hack to prevent the CPU
502 * activating softirqs while it's supposed to be waiting for
503 * permission to proceed. Without this, the new per CPU stuff
504 * in the softirqs will fail */
506 cpu_set(cpuid
, cpu_callin_map
);
508 /* signal that we're done */
511 while (!cpu_isset(cpuid
, smp_commenced_mask
))
517 cpu_set(cpuid
, cpu_online_map
);
523 /* Routine to kick start the given CPU and wait for it to report ready
524 * (or timeout in startup). When this routine returns, the requested
525 * CPU is either fully running and configured or known to be dead.
527 * We call this routine sequentially 1 CPU at a time, so no need for
531 do_boot_cpu(__u8 cpu
)
533 struct task_struct
*idle
;
536 int quad_boot
= (1<<cpu
) & voyager_quad_processors
537 & ~( voyager_extended_vic_processors
538 & voyager_allowed_boot_processors
);
540 /* For the 486, we can't use the 4Mb page table trick, so
541 * must map a region of memory */
544 unsigned long *page_table_copies
= (unsigned long *)
545 __get_free_page(GFP_KERNEL
);
547 pgd_t orig_swapper_pg_dir0
;
549 /* This is an area in head.S which was used to set up the
550 * initial kernel stack. We need to alter this to give the
551 * booting CPU a new stack (taken from its idle process) */
556 /* This is the format of the CPI IDT gate (in real mode) which
557 * we're hijacking to boot the CPU */
566 __u32
*hijack_vector
;
567 __u32 start_phys_address
= setup_trampoline();
569 /* There's a clever trick to this: The linux trampoline is
570 * compiled to begin at absolute location zero, so make the
571 * address zero but have the data segment selector compensate
572 * for the actual address */
573 hijack_source
.idt
.Offset
= start_phys_address
& 0x000F;
574 hijack_source
.idt
.Segment
= (start_phys_address
>> 4) & 0xFFFF;
577 idle
= fork_idle(cpu
);
579 panic("failed fork for CPU%d", cpu
);
580 idle
->thread
.eip
= (unsigned long) start_secondary
;
581 /* init_tasks (in sched.c) is indexed logically */
582 stack_start
.esp
= (void *) idle
->thread
.esp
;
586 /* Note: Don't modify initial ss override */
587 VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu
,
588 (unsigned long)hijack_source
.val
, hijack_source
.idt
.Segment
,
589 hijack_source
.idt
.Offset
, stack_start
.esp
));
590 /* set the original swapper_pg_dir[0] to map 0 to 4Mb transparently
591 * (so that the booting CPU can find start_32 */
592 orig_swapper_pg_dir0
= swapper_pg_dir
[0];
594 if(page_table_copies
== NULL
)
595 panic("No free memory for 486 page tables\n");
596 for(i
= 0; i
< PAGE_SIZE
/sizeof(unsigned long); i
++)
597 page_table_copies
[i
] = (i
* PAGE_SIZE
)
598 | _PAGE_RW
| _PAGE_USER
| _PAGE_PRESENT
;
600 ((unsigned long *)swapper_pg_dir
)[0] =
601 ((virt_to_phys(page_table_copies
)) & PAGE_MASK
)
602 | _PAGE_RW
| _PAGE_USER
| _PAGE_PRESENT
;
604 ((unsigned long *)swapper_pg_dir
)[0] =
605 (virt_to_phys(pg0
) & PAGE_MASK
)
606 | _PAGE_RW
| _PAGE_USER
| _PAGE_PRESENT
;
610 printk("CPU %d: non extended Quad boot\n", cpu
);
611 hijack_vector
= (__u32
*)phys_to_virt((VIC_CPU_BOOT_CPI
+ QIC_DEFAULT_CPI_BASE
)*4);
612 *hijack_vector
= hijack_source
.val
;
614 printk("CPU%d: extended VIC boot\n", cpu
);
615 hijack_vector
= (__u32
*)phys_to_virt((VIC_CPU_BOOT_CPI
+ VIC_DEFAULT_CPI_BASE
)*4);
616 *hijack_vector
= hijack_source
.val
;
617 /* VIC errata, may also receive interrupt at this address */
618 hijack_vector
= (__u32
*)phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI
+ VIC_DEFAULT_CPI_BASE
)*4);
619 *hijack_vector
= hijack_source
.val
;
621 /* All non-boot CPUs start with interrupts fully masked. Need
622 * to lower the mask of the CPI we're about to send. We do
623 * this in the VIC by masquerading as the processor we're
624 * about to boot and lowering its interrupt mask */
625 local_irq_save(flags
);
627 send_one_QIC_CPI(cpu
, VIC_CPU_BOOT_CPI
);
629 outb(VIC_CPU_MASQUERADE_ENABLE
| cpu
, VIC_PROCESSOR_ID
);
630 /* here we're altering registers belonging to `cpu' */
632 outb(VIC_BOOT_INTERRUPT_MASK
, 0x21);
633 /* now go back to our original identity */
634 outb(boot_cpu_id
, VIC_PROCESSOR_ID
);
636 /* and boot the CPU */
638 send_CPI((1<<cpu
), VIC_CPU_BOOT_CPI
);
641 local_irq_restore(flags
);
643 /* now wait for it to become ready (or timeout) */
644 for(timeout
= 0; timeout
< 50000; timeout
++) {
649 /* reset the page table */
650 swapper_pg_dir
[0] = orig_swapper_pg_dir0
;
653 free_page((unsigned long)page_table_copies
);
656 if (cpu_booted_map
) {
657 VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
658 cpu
, smp_processor_id()));
660 printk("CPU%d: ", cpu
);
661 print_cpu_info(&cpu_data
[cpu
]);
663 cpu_set(cpu
, cpu_callout_map
);
666 printk("CPU%d FAILED TO BOOT: ", cpu
);
667 if (*((volatile unsigned char *)phys_to_virt(start_phys_address
))==0xA5)
670 printk("Not responding.\n");
681 /* CAT BUS initialisation must be done after the memory */
682 /* FIXME: The L4 has a catbus too, it just needs to be
683 * accessed in a totally different way */
684 if(voyager_level
== 5) {
687 /* now that the cat has probed the Voyager System Bus, sanity
688 * check the cpu map */
689 if( ((voyager_quad_processors
| voyager_extended_vic_processors
)
690 & cpus_addr(phys_cpu_present_map
)[0]) != cpus_addr(phys_cpu_present_map
)[0]) {
692 printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n");
694 } else if(voyager_level
== 4)
695 voyager_extended_vic_processors
= cpus_addr(phys_cpu_present_map
)[0];
697 /* this sets up the idle task to run on the current cpu */
698 voyager_extended_cpus
= 1;
699 /* Remove the global_irq_holder setting, it triggers a BUG() on
700 * schedule at the moment */
701 //global_irq_holder = boot_cpu_id;
703 /* FIXME: Need to do something about this but currently only works
704 * on CPUs with a tsc which none of mine have.
705 smp_tune_scheduling();
707 smp_store_cpu_info(boot_cpu_id
);
708 printk("CPU%d: ", boot_cpu_id
);
709 print_cpu_info(&cpu_data
[boot_cpu_id
]);
712 /* booting on a Quad CPU */
713 printk("VOYAGER SMP: Boot CPU is Quad\n");
718 /* enable our own CPIs */
721 cpu_set(boot_cpu_id
, cpu_online_map
);
722 cpu_set(boot_cpu_id
, cpu_callout_map
);
724 /* loop over all the extended VIC CPUs and boot them. The
725 * Quad CPUs must be bootstrapped by their extended VIC cpu */
726 for(i
= 0; i
< NR_CPUS
; i
++) {
727 if(i
== boot_cpu_id
|| !cpu_isset(i
, phys_cpu_present_map
))
730 /* This udelay seems to be needed for the Quad boots
731 * don't remove unless you know what you're doing */
734 /* we could compute the total bogomips here, but why bother?,
735 * Code added from smpboot.c */
737 unsigned long bogosum
= 0;
738 for (i
= 0; i
< NR_CPUS
; i
++)
739 if (cpu_isset(i
, cpu_online_map
))
740 bogosum
+= cpu_data
[i
].loops_per_jiffy
;
741 printk(KERN_INFO
"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
744 (bogosum
/(5000/HZ
))%100);
746 voyager_extended_cpus
= hweight32(voyager_extended_vic_processors
);
747 printk("VOYAGER: Extended (interrupt handling CPUs): %d, non-extended: %d\n", voyager_extended_cpus
, num_booting_cpus() - voyager_extended_cpus
);
748 /* that's it, switch to symmetric mode */
749 outb(0, VIC_PRIORITY_REGISTER
);
750 outb(0, VIC_CLAIM_REGISTER_0
);
751 outb(0, VIC_CLAIM_REGISTER_1
);
753 VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));
756 /* Reload the secondary CPUs task structure (this function does not
759 initialize_secondary(void)
763 set_current(hard_get_current());
767 * We don't actually need to load the full TSS,
768 * basically just the stack pointer and the eip.
775 :"r" (current
->thread
.esp
),"r" (current
->thread
.eip
));
778 /* handle a Voyager SYS_INT -- If we don't, the base board will
781 * System interrupts occur because some problem was detected on the
782 * various busses. To find out what you have to probe all the
783 * hardware via the CAT bus. FIXME: At the moment we do nothing. */
785 smp_vic_sys_interrupt(struct pt_regs
*regs
)
787 ack_CPI(VIC_SYS_INT
);
788 printk("Voyager SYSTEM INTERRUPT\n");
791 /* Handle a voyager CMN_INT; These interrupts occur either because of
792 * a system status change or because a single bit memory error
793 * occurred. FIXME: At the moment, ignore all this. */
795 smp_vic_cmn_interrupt(struct pt_regs
*regs
)
797 static __u8 in_cmn_int
= 0;
798 static DEFINE_SPINLOCK(cmn_int_lock
);
800 /* common ints are broadcast, so make sure we only do this once */
801 _raw_spin_lock(&cmn_int_lock
);
806 _raw_spin_unlock(&cmn_int_lock
);
808 VDEBUG(("Voyager COMMON INTERRUPT\n"));
810 if(voyager_level
== 5)
811 voyager_cat_do_common_interrupt();
813 _raw_spin_lock(&cmn_int_lock
);
816 _raw_spin_unlock(&cmn_int_lock
);
817 ack_CPI(VIC_CMN_INT
);
821 * Reschedule call back. Nothing to do, all the work is done
822 * automatically when we return from the interrupt. */
824 smp_reschedule_interrupt(void)
829 static struct mm_struct
* flush_mm
;
830 static unsigned long flush_va
;
831 static DEFINE_SPINLOCK(tlbstate_lock
);
832 #define FLUSH_ALL 0xffffffff
835 * We cannot call mmdrop() because we are in interrupt context,
836 * instead update mm->cpu_vm_mask.
838 * We need to reload %cr3 since the page tables may be going
839 * away from under us..
842 leave_mm (unsigned long cpu
)
844 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_OK
)
846 cpu_clear(cpu
, per_cpu(cpu_tlbstate
, cpu
).active_mm
->cpu_vm_mask
);
847 load_cr3(swapper_pg_dir
);
852 * Invalidate call-back
855 smp_invalidate_interrupt(void)
857 __u8 cpu
= smp_processor_id();
859 if (!test_bit(cpu
, &smp_invalidate_needed
))
861 /* This will flood messages. Don't uncomment unless you see
862 * Problems with cross cpu invalidation
863 VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
864 smp_processor_id()));
867 if (flush_mm
== per_cpu(cpu_tlbstate
, cpu
).active_mm
) {
868 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_OK
) {
869 if (flush_va
== FLUSH_ALL
)
872 __flush_tlb_one(flush_va
);
876 smp_mb__before_clear_bit();
877 clear_bit(cpu
, &smp_invalidate_needed
);
878 smp_mb__after_clear_bit();
881 /* All the new flush operations for 2.4 */
884 /* This routine is called with a physical cpu mask */
886 flush_tlb_others (unsigned long cpumask
, struct mm_struct
*mm
,
893 if ((cpumask
& cpus_addr(cpu_online_map
)[0]) != cpumask
)
895 if (cpumask
& (1 << smp_processor_id()))
900 spin_lock(&tlbstate_lock
);
904 atomic_set_mask(cpumask
, &smp_invalidate_needed
);
906 * We have to send the CPI only to
909 send_CPI(cpumask
, VIC_INVALIDATE_CPI
);
911 while (smp_invalidate_needed
) {
914 printk("***WARNING*** Stuck doing invalidate CPI (CPU%d)\n", smp_processor_id());
919 /* Uncomment only to debug invalidation problems
920 VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
925 spin_unlock(&tlbstate_lock
);
929 flush_tlb_current_task(void)
931 struct mm_struct
*mm
= current
->mm
;
932 unsigned long cpu_mask
;
936 cpu_mask
= cpus_addr(mm
->cpu_vm_mask
)[0] & ~(1 << smp_processor_id());
939 flush_tlb_others(cpu_mask
, mm
, FLUSH_ALL
);
946 flush_tlb_mm (struct mm_struct
* mm
)
948 unsigned long cpu_mask
;
952 cpu_mask
= cpus_addr(mm
->cpu_vm_mask
)[0] & ~(1 << smp_processor_id());
954 if (current
->active_mm
== mm
) {
958 leave_mm(smp_processor_id());
961 flush_tlb_others(cpu_mask
, mm
, FLUSH_ALL
);
966 void flush_tlb_page(struct vm_area_struct
* vma
, unsigned long va
)
968 struct mm_struct
*mm
= vma
->vm_mm
;
969 unsigned long cpu_mask
;
973 cpu_mask
= cpus_addr(mm
->cpu_vm_mask
)[0] & ~(1 << smp_processor_id());
974 if (current
->active_mm
== mm
) {
978 leave_mm(smp_processor_id());
982 flush_tlb_others(cpu_mask
, mm
, va
);
986 EXPORT_SYMBOL(flush_tlb_page
);
988 /* enable the requested IRQs */
990 smp_enable_irq_interrupt(void)
993 __u8 cpu
= get_cpu();
995 VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu
,
996 vic_irq_enable_mask
[cpu
]));
998 spin_lock(&vic_irq_lock
);
999 for(irq
= 0; irq
< 16; irq
++) {
1000 if(vic_irq_enable_mask
[cpu
] & (1<<irq
))
1001 enable_local_vic_irq(irq
);
1003 vic_irq_enable_mask
[cpu
] = 0;
1004 spin_unlock(&vic_irq_lock
);
1006 put_cpu_no_resched();
1010 * CPU halt call-back
1013 smp_stop_cpu_function(void *dummy
)
1015 VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
1016 cpu_clear(smp_processor_id(), cpu_online_map
);
1017 local_irq_disable();
1022 static DEFINE_SPINLOCK(call_lock
);
1024 struct call_data_struct
{
1025 void (*func
) (void *info
);
1027 volatile unsigned long started
;
1028 volatile unsigned long finished
;
1032 static struct call_data_struct
* call_data
;
1034 /* execute a thread on a new CPU. The function to be called must be
1035 * previously set up. This is used to schedule a function for
1036 * execution on all CPU's - set up the function then broadcast a
1037 * function_interrupt CPI to come here on each CPU */
1039 smp_call_function_interrupt(void)
1041 void (*func
) (void *info
) = call_data
->func
;
1042 void *info
= call_data
->info
;
1043 /* must take copy of wait because call_data may be replaced
1044 * unless the function is waiting for us to finish */
1045 int wait
= call_data
->wait
;
1046 __u8 cpu
= smp_processor_id();
1049 * Notify initiating CPU that I've grabbed the data and am
1050 * about to execute the function
1053 if(!test_and_clear_bit(cpu
, &call_data
->started
)) {
1054 /* If the bit wasn't set, this could be a replay */
1055 printk(KERN_WARNING
"VOYAGER SMP: CPU %d received call funtion with no call pending\n", cpu
);
1059 * At this point the info structure may be out of scope unless wait==1
1066 clear_bit(cpu
, &call_data
->finished
);
1070 /* Call this function on all CPUs using the function_interrupt above
1071 <func> The function to run. This must be fast and non-blocking.
1072 <info> An arbitrary pointer to pass to the function.
1073 <retry> If true, keep retrying until ready.
1074 <wait> If true, wait until function has completed on other CPUs.
1075 [RETURNS] 0 on success, else a negative status code. Does not return until
1076 remote CPUs are nearly ready to execute <<func>> or are or have executed.
1079 smp_call_function (void (*func
) (void *info
), void *info
, int retry
,
1082 struct call_data_struct data
;
1083 __u32 mask
= cpus_addr(cpu_online_map
)[0];
1085 mask
&= ~(1<<smp_processor_id());
1090 /* Can deadlock when called with interrupts disabled */
1091 WARN_ON(irqs_disabled());
1095 data
.started
= mask
;
1098 data
.finished
= mask
;
1100 spin_lock(&call_lock
);
1103 /* Send a message to all other CPUs and wait for them to respond */
1104 send_CPI_allbutself(VIC_CALL_FUNCTION_CPI
);
1106 /* Wait for response */
1107 while (data
.started
)
1111 while (data
.finished
)
1114 spin_unlock(&call_lock
);
1118 EXPORT_SYMBOL(smp_call_function
);
1120 /* Sorry about the name. In an APIC based system, the APICs
1121 * themselves are programmed to send a timer interrupt. This is used
1122 * by linux to reschedule the processor. Voyager doesn't have this,
1123 * so we use the system clock to interrupt one processor, which in
1124 * turn, broadcasts a timer CPI to all the others --- we receive that
1125 * CPI here. We don't use this actually for counting so losing
1126 * ticks doesn't matter
1128 * FIXME: For those CPU's which actually have a local APIC, we could
1129 * try to use it to trigger this interrupt instead of having to
1130 * broadcast the timer tick. Unfortunately, all my pentium DYADs have
1131 * no local APIC, so I can't do this
1133 * This function is currently a placeholder and is unused in the code */
1135 smp_apic_timer_interrupt(struct pt_regs
*regs
)
1137 wrapper_smp_local_timer_interrupt(regs
);
1140 /* All of the QUAD interrupt GATES */
1142 smp_qic_timer_interrupt(struct pt_regs
*regs
)
1144 ack_QIC_CPI(QIC_TIMER_CPI
);
1145 wrapper_smp_local_timer_interrupt(regs
);
1149 smp_qic_invalidate_interrupt(struct pt_regs
*regs
)
1151 ack_QIC_CPI(QIC_INVALIDATE_CPI
);
1152 smp_invalidate_interrupt();
1156 smp_qic_reschedule_interrupt(struct pt_regs
*regs
)
1158 ack_QIC_CPI(QIC_RESCHEDULE_CPI
);
1159 smp_reschedule_interrupt();
1163 smp_qic_enable_irq_interrupt(struct pt_regs
*regs
)
1165 ack_QIC_CPI(QIC_ENABLE_IRQ_CPI
);
1166 smp_enable_irq_interrupt();
1170 smp_qic_call_function_interrupt(struct pt_regs
*regs
)
1172 ack_QIC_CPI(QIC_CALL_FUNCTION_CPI
);
1173 smp_call_function_interrupt();
1177 smp_vic_cpi_interrupt(struct pt_regs
*regs
)
1179 __u8 cpu
= smp_processor_id();
1182 ack_QIC_CPI(VIC_CPI_LEVEL0
);
1184 ack_VIC_CPI(VIC_CPI_LEVEL0
);
1186 if(test_and_clear_bit(VIC_TIMER_CPI
, &vic_cpi_mailbox
[cpu
]))
1187 wrapper_smp_local_timer_interrupt(regs
);
1188 if(test_and_clear_bit(VIC_INVALIDATE_CPI
, &vic_cpi_mailbox
[cpu
]))
1189 smp_invalidate_interrupt();
1190 if(test_and_clear_bit(VIC_RESCHEDULE_CPI
, &vic_cpi_mailbox
[cpu
]))
1191 smp_reschedule_interrupt();
1192 if(test_and_clear_bit(VIC_ENABLE_IRQ_CPI
, &vic_cpi_mailbox
[cpu
]))
1193 smp_enable_irq_interrupt();
1194 if(test_and_clear_bit(VIC_CALL_FUNCTION_CPI
, &vic_cpi_mailbox
[cpu
]))
1195 smp_call_function_interrupt();
1199 do_flush_tlb_all(void* info
)
1201 unsigned long cpu
= smp_processor_id();
1204 if (per_cpu(cpu_tlbstate
, cpu
).state
== TLBSTATE_LAZY
)
1209 /* flush the TLB of every active CPU in the system */
1213 on_each_cpu(do_flush_tlb_all
, 0, 1, 1);
1216 /* used to set up the trampoline for other CPUs when the memory manager
1219 smp_alloc_memory(void)
1221 trampoline_base
= (__u32
)alloc_bootmem_low_pages(PAGE_SIZE
);
1222 if(__pa(trampoline_base
) >= 0x93000)
1226 /* send a reschedule CPI to one CPU by physical CPU number*/
1228 smp_send_reschedule(int cpu
)
1230 send_one_CPI(cpu
, VIC_RESCHEDULE_CPI
);
1235 hard_smp_processor_id(void)
1238 __u8 cpumask
= inb(VIC_PROC_WHO_AM_I
);
1239 if((cpumask
& QUAD_IDENTIFIER
) == QUAD_IDENTIFIER
)
1240 return cpumask
& 0x1F;
1242 for(i
= 0; i
< 8; i
++) {
1243 if(cpumask
& (1<<i
))
1246 printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask
);
1250 /* broadcast a halt to all other CPUs */
1254 smp_call_function(smp_stop_cpu_function
, NULL
, 1, 1);
1257 /* this function is triggered in time.c when a clock tick fires
1258 * we need to re-broadcast the tick to all CPUs */
1260 smp_vic_timer_interrupt(struct pt_regs
*regs
)
1262 send_CPI_allbutself(VIC_TIMER_CPI
);
1263 smp_local_timer_interrupt(regs
);
1266 /* local (per CPU) timer interrupt. It does both profiling and
1267 * process statistics/rescheduling.
1269 * We do profiling in every local tick, statistics/rescheduling
1270 * happen only every 'profiling multiplier' ticks. The default
1271 * multiplier is 1 and it can be changed by writing the new multiplier
1272 * value into /proc/profile.
1275 smp_local_timer_interrupt(struct pt_regs
* regs
)
1277 int cpu
= smp_processor_id();
1280 profile_tick(CPU_PROFILING
, regs
);
1281 if (--per_cpu(prof_counter
, cpu
) <= 0) {
1283 * The multiplier may have changed since the last time we got
1284 * to this point as a result of the user writing to
1285 * /proc/profile. In this case we need to adjust the APIC
1286 * timer accordingly.
1288 * Interrupts are already masked off at this point.
1290 per_cpu(prof_counter
,cpu
) = per_cpu(prof_multiplier
, cpu
);
1291 if (per_cpu(prof_counter
, cpu
) !=
1292 per_cpu(prof_old_multiplier
, cpu
)) {
1293 /* FIXME: need to update the vic timer tick here */
1294 per_cpu(prof_old_multiplier
, cpu
) =
1295 per_cpu(prof_counter
, cpu
);
1298 update_process_times(user_mode_vm(regs
));
1301 if( ((1<<cpu
) & voyager_extended_vic_processors
) == 0)
1302 /* only extended VIC processors participate in
1303 * interrupt distribution */
1307 * We take the 'long' return path, and there every subsystem
1308 * grabs the apropriate locks (kernel lock/ irq lock).
1310 * we might want to decouple profiling from the 'long path',
1311 * and do the profiling totally in assembly.
1313 * Currently this isn't too much of an issue (performance wise),
1314 * we can take more than 100K local irqs per second on a 100 MHz P5.
1317 if((++vic_tick
[cpu
] & 0x7) != 0)
1319 /* get here every 16 ticks (about every 1/6 of a second) */
1321 /* Change our priority to give someone else a chance at getting
1322 * the IRQ. The algorithm goes like this:
1324 * In the VIC, the dynamically routed interrupt is always
1325 * handled by the lowest priority eligible (i.e. receiving
1326 * interrupts) CPU. If >1 eligible CPUs are equal lowest, the
1327 * lowest processor number gets it.
1329 * The priority of a CPU is controlled by a special per-CPU
1330 * VIC priority register which is 3 bits wide 0 being lowest
1331 * and 7 highest priority..
1333 * Therefore we subtract the average number of interrupts from
1334 * the number we've fielded. If this number is negative, we
1335 * lower the activity count and if it is positive, we raise
1338 * I'm afraid this still leads to odd looking interrupt counts:
1339 * the totals are all roughly equal, but the individual ones
1340 * look rather skewed.
1342 * FIXME: This algorithm is total crap when mixed with SMP
1343 * affinity code since we now try to even up the interrupt
1344 * counts when an affinity binding is keeping them on a
1346 weight
= (vic_intr_count
[cpu
]*voyager_extended_cpus
1347 - vic_intr_total
) >> 4;
1354 outb((__u8
)weight
, VIC_PRIORITY_REGISTER
);
1356 #ifdef VOYAGER_DEBUG
1357 if((vic_tick
[cpu
] & 0xFFF) == 0) {
1358 /* print this message roughly every 25 secs */
1359 printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n",
1360 cpu
, vic_tick
[cpu
], weight
);
1365 /* setup the profiling timer */
1367 setup_profiling_timer(unsigned int multiplier
)
1375 * Set the new multiplier for each CPU. CPUs don't start using the
1376 * new values until the next timer interrupt in which they do process
1379 for (i
= 0; i
< NR_CPUS
; ++i
)
1380 per_cpu(prof_multiplier
, i
) = multiplier
;
1386 /* The CPIs are handled in the per cpu 8259s, so they must be
1387 * enabled to be received: FIX: enabling the CPIs in the early
1388 * boot sequence interferes with bug checking; enable them later
1390 #define VIC_SET_GATE(cpi, vector) \
1391 set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector))
1392 #define QIC_SET_GATE(cpi, vector) \
1393 set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
1400 /* initialize the per cpu irq mask to all disabled */
1401 for(i
= 0; i
< NR_CPUS
; i
++)
1402 vic_irq_mask
[i
] = 0xFFFF;
1404 VIC_SET_GATE(VIC_CPI_LEVEL0
, vic_cpi_interrupt
);
1406 VIC_SET_GATE(VIC_SYS_INT
, vic_sys_interrupt
);
1407 VIC_SET_GATE(VIC_CMN_INT
, vic_cmn_interrupt
);
1409 QIC_SET_GATE(QIC_TIMER_CPI
, qic_timer_interrupt
);
1410 QIC_SET_GATE(QIC_INVALIDATE_CPI
, qic_invalidate_interrupt
);
1411 QIC_SET_GATE(QIC_RESCHEDULE_CPI
, qic_reschedule_interrupt
);
1412 QIC_SET_GATE(QIC_ENABLE_IRQ_CPI
, qic_enable_irq_interrupt
);
1413 QIC_SET_GATE(QIC_CALL_FUNCTION_CPI
, qic_call_function_interrupt
);
1416 /* now put the VIC descriptor into the first 48 IRQs
1418 * This is for later: first 16 correspond to PC IRQs; next 16
1419 * are Primary MC IRQs and final 16 are Secondary MC IRQs */
1420 for(i
= 0; i
< 48; i
++)
1421 irq_desc
[i
].handler
= &vic_irq_type
;
1424 /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
1425 * processor to receive CPI */
1427 send_CPI(__u32 cpuset
, __u8 cpi
)
1430 __u32 quad_cpuset
= (cpuset
& voyager_quad_processors
);
1432 if(cpi
< VIC_START_FAKE_CPI
) {
1433 /* fake CPI are only used for booting, so send to the
1434 * extended quads as well---Quads must be VIC booted */
1435 outb((__u8
)(cpuset
), VIC_CPI_Registers
[cpi
]);
1439 send_QIC_CPI(quad_cpuset
, cpi
);
1440 cpuset
&= ~quad_cpuset
;
1441 cpuset
&= 0xff; /* only first 8 CPUs vaild for VIC CPI */
1444 for_each_online_cpu(cpu
) {
1445 if(cpuset
& (1<<cpu
))
1446 set_bit(cpi
, &vic_cpi_mailbox
[cpu
]);
1449 outb((__u8
)cpuset
, VIC_CPI_Registers
[VIC_CPI_LEVEL0
]);
1452 /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and
1453 * set the cache line to shared by reading it.
1455 * DON'T make this inline otherwise the cache line read will be
1459 ack_QIC_CPI(__u8 cpi
) {
1460 __u8 cpu
= hard_smp_processor_id();
1464 outb(1<<cpi
, QIC_INTERRUPT_CLEAR1
);
1465 return voyager_quad_cpi_addr
[cpu
]->qic_cpi
[cpi
].cpi
;
1469 ack_special_QIC_CPI(__u8 cpi
)
1473 outb(QIC_CMN_INT
, QIC_INTERRUPT_CLEAR0
);
1476 outb(QIC_SYS_INT
, QIC_INTERRUPT_CLEAR0
);
1479 /* also clear at the VIC, just in case (nop for non-extended proc) */
1483 /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */
1485 ack_VIC_CPI(__u8 cpi
)
1487 #ifdef VOYAGER_DEBUG
1488 unsigned long flags
;
1490 __u8 cpu
= smp_processor_id();
1492 local_irq_save(flags
);
1493 isr
= vic_read_isr();
1494 if((isr
& (1<<(cpi
&7))) == 0) {
1495 printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu
, cpi
);
1498 /* send specific EOI; the two system interrupts have
1499 * bit 4 set for a separate vector but behave as the
1500 * corresponding 3 bit intr */
1501 outb_p(0x60|(cpi
& 7),0x20);
1503 #ifdef VOYAGER_DEBUG
1504 if((vic_read_isr() & (1<<(cpi
&7))) != 0) {
1505 printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu
, cpi
);
1507 local_irq_restore(flags
);
1511 /* cribbed with thanks from irq.c */
1512 #define __byte(x,y) (((unsigned char *)&(y))[x])
1513 #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu]))
1514 #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu]))
1517 startup_vic_irq(unsigned int irq
)
1519 enable_vic_irq(irq
);
1524 /* The enable and disable routines. This is where we run into
1525 * conflicting architectural philosophy. Fundamentally, the voyager
1526 * architecture does not expect to have to disable interrupts globally
1527 * (the IRQ controllers belong to each CPU). The processor masquerade
1528 * which is used to start the system shouldn't be used in a running OS
1529 * since it will cause great confusion if two separate CPUs drive to
1530 * the same IRQ controller (I know, I've tried it).
1532 * The solution is a variant on the NCR lazy SPL design:
1534 * 1) To disable an interrupt, do nothing (other than set the
1535 * IRQ_DISABLED flag). This dares the interrupt actually to arrive.
1537 * 2) If the interrupt dares to come in, raise the local mask against
1538 * it (this will result in all the CPU masks being raised
1541 * 3) To enable the interrupt, lower the mask on the local CPU and
1542 * broadcast an Interrupt enable CPI which causes all other CPUs to
1543 * adjust their masks accordingly. */
1546 enable_vic_irq(unsigned int irq
)
1548 /* linux doesn't to processor-irq affinity, so enable on
1549 * all CPUs we know about */
1550 int cpu
= smp_processor_id(), real_cpu
;
1551 __u16 mask
= (1<<irq
);
1552 __u32 processorList
= 0;
1553 unsigned long flags
;
1555 VDEBUG(("VOYAGER: enable_vic_irq(%d) CPU%d affinity 0x%lx\n",
1556 irq
, cpu
, cpu_irq_affinity
[cpu
]));
1557 spin_lock_irqsave(&vic_irq_lock
, flags
);
1558 for_each_online_cpu(real_cpu
) {
1559 if(!(voyager_extended_vic_processors
& (1<<real_cpu
)))
1561 if(!(cpu_irq_affinity
[real_cpu
] & mask
)) {
1562 /* irq has no affinity for this CPU, ignore */
1565 if(real_cpu
== cpu
) {
1566 enable_local_vic_irq(irq
);
1568 else if(vic_irq_mask
[real_cpu
] & mask
) {
1569 vic_irq_enable_mask
[real_cpu
] |= mask
;
1570 processorList
|= (1<<real_cpu
);
1573 spin_unlock_irqrestore(&vic_irq_lock
, flags
);
1575 send_CPI(processorList
, VIC_ENABLE_IRQ_CPI
);
1579 disable_vic_irq(unsigned int irq
)
1581 /* lazy disable, do nothing */
1585 enable_local_vic_irq(unsigned int irq
)
1587 __u8 cpu
= smp_processor_id();
1588 __u16 mask
= ~(1 << irq
);
1589 __u16 old_mask
= vic_irq_mask
[cpu
];
1591 vic_irq_mask
[cpu
] &= mask
;
1592 if(vic_irq_mask
[cpu
] == old_mask
)
1595 VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n",
1599 outb_p(cached_A1(cpu
),0xA1);
1603 outb_p(cached_21(cpu
),0x21);
1609 disable_local_vic_irq(unsigned int irq
)
1611 __u8 cpu
= smp_processor_id();
1612 __u16 mask
= (1 << irq
);
1613 __u16 old_mask
= vic_irq_mask
[cpu
];
1618 vic_irq_mask
[cpu
] |= mask
;
1619 if(old_mask
== vic_irq_mask
[cpu
])
1622 VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n",
1626 outb_p(cached_A1(cpu
),0xA1);
1630 outb_p(cached_21(cpu
),0x21);
1635 /* The VIC is level triggered, so the ack can only be issued after the
1636 * interrupt completes. However, we do Voyager lazy interrupt
1637 * handling here: It is an extremely expensive operation to mask an
1638 * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If
1639 * this interrupt actually comes in, then we mask and ack here to push
1640 * the interrupt off to another CPU */
1642 before_handle_vic_irq(unsigned int irq
)
1644 irq_desc_t
*desc
= irq_desc
+ irq
;
1645 __u8 cpu
= smp_processor_id();
1647 _raw_spin_lock(&vic_irq_lock
);
1649 vic_intr_count
[cpu
]++;
1651 if(!(cpu_irq_affinity
[cpu
] & (1<<irq
))) {
1652 /* The irq is not in our affinity mask, push it off
1653 * onto another CPU */
1654 VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d on cpu %d\n",
1656 disable_local_vic_irq(irq
);
1657 /* set IRQ_INPROGRESS to prevent the handler in irq.c from
1658 * actually calling the interrupt routine */
1659 desc
->status
|= IRQ_REPLAY
| IRQ_INPROGRESS
;
1660 } else if(desc
->status
& IRQ_DISABLED
) {
1661 /* Damn, the interrupt actually arrived, do the lazy
1662 * disable thing. The interrupt routine in irq.c will
1663 * not handle a IRQ_DISABLED interrupt, so nothing more
1664 * need be done here */
1665 VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n",
1667 disable_local_vic_irq(irq
);
1668 desc
->status
|= IRQ_REPLAY
;
1670 desc
->status
&= ~IRQ_REPLAY
;
1673 _raw_spin_unlock(&vic_irq_lock
);
1676 /* Finish the VIC interrupt: basically mask */
1678 after_handle_vic_irq(unsigned int irq
)
1680 irq_desc_t
*desc
= irq_desc
+ irq
;
1682 _raw_spin_lock(&vic_irq_lock
);
1684 unsigned int status
= desc
->status
& ~IRQ_INPROGRESS
;
1685 #ifdef VOYAGER_DEBUG
1689 desc
->status
= status
;
1690 if ((status
& IRQ_DISABLED
))
1691 disable_local_vic_irq(irq
);
1692 #ifdef VOYAGER_DEBUG
1693 /* DEBUG: before we ack, check what's in progress */
1694 isr
= vic_read_isr();
1695 if((isr
& (1<<irq
) && !(status
& IRQ_REPLAY
)) == 0) {
1697 __u8 cpu
= smp_processor_id();
1699 int mask
; /* Um... initialize me??? --RR */
1701 printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
1703 for_each_possible_cpu(real_cpu
, mask
) {
1705 outb(VIC_CPU_MASQUERADE_ENABLE
| real_cpu
,
1707 isr
= vic_read_isr();
1708 if(isr
& (1<<irq
)) {
1709 printk("VOYAGER SMP: CPU%d ack irq %d\n",
1713 outb(cpu
, VIC_PROCESSOR_ID
);
1716 #endif /* VOYAGER_DEBUG */
1717 /* as soon as we ack, the interrupt is eligible for
1718 * receipt by another CPU so everything must be in
1721 if(status
& IRQ_REPLAY
) {
1722 /* replay is set if we disable the interrupt
1723 * in the before_handle_vic_irq() routine, so
1724 * clear the in progress bit here to allow the
1725 * next CPU to handle this correctly */
1726 desc
->status
&= ~(IRQ_REPLAY
| IRQ_INPROGRESS
);
1728 #ifdef VOYAGER_DEBUG
1729 isr
= vic_read_isr();
1730 if((isr
& (1<<irq
)) != 0)
1731 printk("VOYAGER SMP: after_handle_vic_irq() after ack irq=%d, isr=0x%x\n",
1733 #endif /* VOYAGER_DEBUG */
1735 _raw_spin_unlock(&vic_irq_lock
);
1737 /* All code after this point is out of the main path - the IRQ
1738 * may be intercepted by another CPU if reasserted */
1742 /* Linux processor - interrupt affinity manipulations.
1744 * For each processor, we maintain a 32 bit irq affinity mask.
1745 * Initially it is set to all 1's so every processor accepts every
1746 * interrupt. In this call, we change the processor's affinity mask:
1748 * Change from enable to disable:
1750 * If the interrupt ever comes in to the processor, we will disable it
1751 * and ack it to push it off to another CPU, so just accept the mask here.
1753 * Change from disable to enable:
1755 * change the mask and then do an interrupt enable CPI to re-enable on
1756 * the selected processors */
1759 set_vic_irq_affinity(unsigned int irq
, cpumask_t mask
)
1761 /* Only extended processors handle interrupts */
1762 unsigned long real_mask
;
1763 unsigned long irq_mask
= 1 << irq
;
1766 real_mask
= cpus_addr(mask
)[0] & voyager_extended_vic_processors
;
1768 if(cpus_addr(mask
)[0] == 0)
1769 /* can't have no cpu's to accept the interrupt -- extremely
1770 * bad things will happen */
1774 /* can't change the affinity of the timer IRQ. This
1775 * is due to the constraint in the voyager
1776 * architecture that the CPI also comes in on and IRQ
1777 * line and we have chosen IRQ0 for this. If you
1778 * raise the mask on this interrupt, the processor
1779 * will no-longer be able to accept VIC CPIs */
1783 /* You can only have 32 interrupts in a voyager system
1784 * (and 32 only if you have a secondary microchannel
1788 for_each_online_cpu(cpu
) {
1789 unsigned long cpu_mask
= 1 << cpu
;
1791 if(cpu_mask
& real_mask
) {
1792 /* enable the interrupt for this cpu */
1793 cpu_irq_affinity
[cpu
] |= irq_mask
;
1795 /* disable the interrupt for this cpu */
1796 cpu_irq_affinity
[cpu
] &= ~irq_mask
;
1799 /* this is magic, we now have the correct affinity maps, so
1800 * enable the interrupt. This will send an enable CPI to
1801 * those cpu's who need to enable it in their local masks,
1802 * causing them to correct for the new affinity . If the
1803 * interrupt is currently globally disabled, it will simply be
1804 * disabled again as it comes in (voyager lazy disable). If
1805 * the affinity map is tightened to disable the interrupt on a
1806 * cpu, it will be pushed off when it comes in */
1807 enable_vic_irq(irq
);
1811 ack_vic_irq(unsigned int irq
)
1814 outb(0x62,0x20); /* Specific EOI to cascade */
1815 outb(0x60|(irq
& 7),0xA0);
1817 outb(0x60 | (irq
& 7),0x20);
1821 /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259
1822 * but are not vectored by it. This means that the 8259 mask must be
1823 * lowered to receive them */
1825 vic_enable_cpi(void)
1827 __u8 cpu
= smp_processor_id();
1829 /* just take a copy of the current mask (nop for boot cpu) */
1830 vic_irq_mask
[cpu
] = vic_irq_mask
[boot_cpu_id
];
1832 enable_local_vic_irq(VIC_CPI_LEVEL0
);
1833 enable_local_vic_irq(VIC_CPI_LEVEL1
);
1834 /* for sys int and cmn int */
1835 enable_local_vic_irq(7);
1838 outb(QIC_DEFAULT_MASK0
, QIC_MASK_REGISTER0
);
1839 outb(QIC_CPI_ENABLE
, QIC_MASK_REGISTER1
);
1840 VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n",
1841 cpu
, QIC_CPI_ENABLE
));
1844 VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n",
1845 cpu
, vic_irq_mask
[cpu
]));
1851 int old_cpu
= smp_processor_id(), cpu
;
1853 /* dump the interrupt masks of each processor */
1854 for_each_online_cpu(cpu
) {
1855 __u16 imr
, isr
, irr
;
1856 unsigned long flags
;
1858 local_irq_save(flags
);
1859 outb(VIC_CPU_MASQUERADE_ENABLE
| cpu
, VIC_PROCESSOR_ID
);
1860 imr
= (inb(0xa1) << 8) | inb(0x21);
1862 irr
= inb(0xa0) << 8;
1866 isr
= inb(0xa0) << 8;
1869 outb(old_cpu
, VIC_PROCESSOR_ID
);
1870 local_irq_restore(flags
);
1871 printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n",
1872 cpu
, vic_irq_mask
[cpu
], imr
, irr
, isr
);
1874 /* These lines are put in to try to unstick an un ack'd irq */
1877 for(irq
=0; irq
<16; irq
++) {
1878 if(isr
& (1<<irq
)) {
1879 printk("\tCPU%d: ack irq %d\n",
1881 local_irq_save(flags
);
1882 outb(VIC_CPU_MASQUERADE_ENABLE
| cpu
,
1885 outb(old_cpu
, VIC_PROCESSOR_ID
);
1886 local_irq_restore(flags
);
1895 smp_voyager_power_off(void *dummy
)
1897 if(smp_processor_id() == boot_cpu_id
)
1898 voyager_power_off();
1900 smp_stop_cpu_function(NULL
);
1904 smp_prepare_cpus(unsigned int max_cpus
)
1906 /* FIXME: ignore max_cpus for now */
1910 void __devinit
smp_prepare_boot_cpu(void)
1912 cpu_set(smp_processor_id(), cpu_online_map
);
1913 cpu_set(smp_processor_id(), cpu_callout_map
);
1914 cpu_set(smp_processor_id(), cpu_possible_map
);
1918 __cpu_up(unsigned int cpu
)
1920 /* This only works at boot for x86. See "rewrite" above. */
1921 if (cpu_isset(cpu
, smp_commenced_mask
))
1924 /* In case one didn't come up */
1925 if (!cpu_isset(cpu
, cpu_callin_map
))
1927 /* Unleash the CPU! */
1928 cpu_set(cpu
, smp_commenced_mask
);
1929 while (!cpu_isset(cpu
, cpu_online_map
))
1935 smp_cpus_done(unsigned int max_cpus
)