1 /* Copyright (C) 2004 Mips Technologies, Inc */
3 #include <linux/kernel.h>
4 #include <linux/sched.h>
5 #include <linux/cpumask.h>
6 #include <linux/interrupt.h>
7 #include <linux/kernel_stat.h>
8 #include <linux/module.h>
11 #include <asm/processor.h>
12 #include <asm/atomic.h>
13 #include <asm/system.h>
14 #include <asm/hardirq.h>
15 #include <asm/hazards.h>
17 #include <asm/mmu_context.h>
19 #include <asm/mipsregs.h>
20 #include <asm/cacheflush.h>
22 #include <asm/addrspace.h>
24 #include <asm/smtc_ipi.h>
25 #include <asm/smtc_proc.h>
28 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
31 #define MIPS_CPU_IPI_IRQ 1
33 #define LOCK_MT_PRA() \
34 local_irq_save(flags); \
37 #define UNLOCK_MT_PRA() \
39 local_irq_restore(flags)
41 #define LOCK_CORE_PRA() \
42 local_irq_save(flags); \
45 #define UNLOCK_CORE_PRA() \
47 local_irq_restore(flags)
50 * Data structures purely associated with SMTC parallelism
55 * Table for tracking ASIDs whose lifetime is prolonged.
58 asiduse smtc_live_asid
[MAX_SMTC_TLBS
][MAX_SMTC_ASIDS
];
61 * Clock interrupt "latch" buffers, per "CPU"
64 unsigned int ipi_timer_latch
[NR_CPUS
];
67 * Number of InterProcessor Interupt (IPI) message buffers to allocate
70 #define IPIBUF_PER_CPU 4
72 struct smtc_ipi_q IPIQ
[NR_CPUS
];
73 static struct smtc_ipi_q freeIPIq
;
76 /* Forward declarations */
78 void ipi_decode(struct smtc_ipi
*);
79 static void post_direct_ipi(int cpu
, struct smtc_ipi
*pipi
);
80 static void setup_cross_vpe_interrupts(unsigned int nvpe
);
81 void init_smtc_stats(void);
83 /* Global SMTC Status */
85 unsigned int smtc_status
= 0;
87 /* Boot command line configuration overrides */
89 static int vpelimit
= 0;
90 static int tclimit
= 0;
91 static int ipibuffers
= 0;
92 static int nostlb
= 0;
93 static int asidmask
= 0;
94 unsigned long smtc_asid_mask
= 0xff;
96 static int __init
maxvpes(char *str
)
98 get_option(&str
, &vpelimit
);
102 static int __init
maxtcs(char *str
)
104 get_option(&str
, &tclimit
);
108 static int __init
ipibufs(char *str
)
110 get_option(&str
, &ipibuffers
);
114 static int __init
stlb_disable(char *s
)
120 static int __init
asidmask_set(char *str
)
122 get_option(&str
, &asidmask
);
132 smtc_asid_mask
= (unsigned long)asidmask
;
135 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask
);
140 __setup("maxvpes=", maxvpes
);
141 __setup("maxtcs=", maxtcs
);
142 __setup("ipibufs=", ipibufs
);
143 __setup("nostlb", stlb_disable
);
144 __setup("asidmask=", asidmask_set
);
146 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
148 static int hang_trig
= 0;
150 static int __init
hangtrig_enable(char *s
)
157 __setup("hangtrig", hangtrig_enable
);
159 #define DEFAULT_BLOCKED_IPI_LIMIT 32
161 static int timerq_limit
= DEFAULT_BLOCKED_IPI_LIMIT
;
163 static int __init
tintq(char *str
)
165 get_option(&str
, &timerq_limit
);
169 __setup("tintq=", tintq
);
171 int imstuckcount
[2][8];
172 /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
173 int vpemask
[2][8] = {
174 {0, 0, 1, 0, 0, 0, 0, 1},
175 {0, 0, 0, 0, 0, 0, 0, 1}
177 int tcnoprog
[NR_CPUS
];
178 static atomic_t idle_hook_initialized
= ATOMIC_INIT(0);
179 static int clock_hang_reported
[NR_CPUS
];
181 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
183 /* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
185 void __init
sanitize_tlb_entries(void)
187 printk("Deprecated sanitize_tlb_entries() invoked\n");
192 * Configure shared TLB - VPC configuration bit must be set by caller
195 static void smtc_configure_tlb(void)
198 unsigned long mvpconf0
;
199 unsigned long config1val
;
201 /* Set up ASID preservation table */
202 for (vpes
=0; vpes
<MAX_SMTC_TLBS
; vpes
++) {
203 for(i
= 0; i
< MAX_SMTC_ASIDS
; i
++) {
204 smtc_live_asid
[vpes
][i
] = 0;
207 mvpconf0
= read_c0_mvpconf0();
209 if ((vpes
= ((mvpconf0
& MVPCONF0_PVPE
)
210 >> MVPCONF0_PVPE_SHIFT
) + 1) > 1) {
211 /* If we have multiple VPEs, try to share the TLB */
212 if ((mvpconf0
& MVPCONF0_TLBS
) && !nostlb
) {
214 * If TLB sizing is programmable, shared TLB
215 * size is the total available complement.
216 * Otherwise, we have to take the sum of all
217 * static VPE TLB entries.
219 if ((tlbsiz
= ((mvpconf0
& MVPCONF0_PTLBE
)
220 >> MVPCONF0_PTLBE_SHIFT
)) == 0) {
222 * If there's more than one VPE, there had better
223 * be more than one TC, because we need one to bind
224 * to each VPE in turn to be able to read
225 * its configuration state!
228 /* Stop the TC from doing anything foolish */
229 write_tc_c0_tchalt(TCHALT_H
);
231 /* No need to un-Halt - that happens later anyway */
232 for (i
=0; i
< vpes
; i
++) {
233 write_tc_c0_tcbind(i
);
235 * To be 100% sure we're really getting the right
236 * information, we exit the configuration state
237 * and do an IHB after each rebinding.
240 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC
);
243 * Only count if the MMU Type indicated is TLB
245 if (((read_vpe_c0_config() & MIPS_CONF_MT
) >> 7) == 1) {
246 config1val
= read_vpe_c0_config1();
247 tlbsiz
+= ((config1val
>> 25) & 0x3f) + 1;
250 /* Put core back in configuration state */
252 read_c0_mvpcontrol() | MVPCONTROL_VPC
);
256 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB
);
260 * Setup kernel data structures to use software total,
261 * rather than read the per-VPE Config1 value. The values
262 * for "CPU 0" gets copied to all the other CPUs as part
263 * of their initialization in smtc_cpu_setup().
266 /* MIPS32 limits TLB indices to 64 */
269 cpu_data
[0].tlbsize
= current_cpu_data
.tlbsize
= tlbsiz
;
270 smtc_status
|= SMTC_TLB_SHARED
;
271 local_flush_tlb_all();
273 printk("TLB of %d entry pairs shared by %d VPEs\n",
276 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
283 * Incrementally build the CPU map out of constituent MIPS MT cores,
284 * using the specified available VPEs and TCs. Plaform code needs
285 * to ensure that each MIPS MT core invokes this routine on reset,
288 * This version of the build_cpu_map and prepare_cpus routines assumes
289 * that *all* TCs of a MIPS MT core will be used for Linux, and that
290 * they will be spread across *all* available VPEs (to minimise the
291 * loss of efficiency due to exception service serialization).
292 * An improved version would pick up configuration information and
293 * possibly leave some TCs/VPEs as "slave" processors.
295 * Use c0_MVPConf0 to find out how many TCs are available, setting up
296 * phys_cpu_present_map and the logical/physical mappings.
299 int __init
mipsmt_build_cpu_map(int start_cpu_slot
)
304 * The CPU map isn't actually used for anything at this point,
305 * so it's not clear what else we should do apart from set
306 * everything up so that "logical" = "physical".
308 ntcs
= ((read_c0_mvpconf0() & MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1;
309 for (i
=start_cpu_slot
; i
<NR_CPUS
&& i
<ntcs
; i
++) {
310 cpu_set(i
, phys_cpu_present_map
);
311 __cpu_number_map
[i
] = i
;
312 __cpu_logical_map
[i
] = i
;
314 /* Initialize map of CPUs with FPUs */
315 cpus_clear(mt_fpu_cpumask
);
317 /* One of those TC's is the one booting, and not a secondary... */
318 printk("%i available secondary CPU TC(s)\n", i
- 1);
324 * Common setup before any secondaries are started
325 * Make sure all CPU's are in a sensible state before we boot any of the
328 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
329 * as possible across the available VPEs.
332 static void smtc_tc_setup(int vpe
, int tc
, int cpu
)
335 write_tc_c0_tchalt(TCHALT_H
);
337 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
338 & ~(TCSTATUS_TKSU
| TCSTATUS_DA
| TCSTATUS_IXMT
))
340 write_tc_c0_tccontext(0);
342 write_tc_c0_tcbind(vpe
);
343 /* In general, all TCs should have the same cpu_data indications */
344 memcpy(&cpu_data
[cpu
], &cpu_data
[0], sizeof(struct cpuinfo_mips
));
345 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
346 if (cpu_data
[0].cputype
== CPU_34K
)
347 cpu_data
[cpu
].options
&= ~MIPS_CPU_FPU
;
348 cpu_data
[cpu
].vpe_id
= vpe
;
349 cpu_data
[cpu
].tc_id
= tc
;
353 void mipsmt_prepare_cpus(void)
355 int i
, vpe
, tc
, ntc
, nvpe
, tcpervpe
, slop
, cpu
;
359 struct smtc_ipi
*pipi
;
361 /* disable interrupts so we can disable MT */
362 local_irq_save(flags
);
363 /* disable MT so we can configure */
367 spin_lock_init(&freeIPIq
.lock
);
370 * We probably don't have as many VPEs as we do SMP "CPUs",
371 * but it's possible - and in any case we'll never use more!
373 for (i
=0; i
<NR_CPUS
; i
++) {
374 IPIQ
[i
].head
= IPIQ
[i
].tail
= NULL
;
375 spin_lock_init(&IPIQ
[i
].lock
);
377 ipi_timer_latch
[i
] = 0;
380 /* cpu_data index starts at zero */
382 cpu_data
[cpu
].vpe_id
= 0;
383 cpu_data
[cpu
].tc_id
= 0;
386 /* Report on boot-time options */
387 mips_mt_set_cpuoptions ();
389 printk("Limit of %d VPEs set\n", vpelimit
);
391 printk("Limit of %d TCs set\n", tclimit
);
393 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
396 printk("ASID mask value override to 0x%x\n", asidmask
);
399 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
401 printk("Logic Analyser Trigger on suspected TC hang\n");
402 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
404 /* Put MVPE's into 'configuration state' */
405 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC
);
407 val
= read_c0_mvpconf0();
408 nvpe
= ((val
& MVPCONF0_PVPE
) >> MVPCONF0_PVPE_SHIFT
) + 1;
409 if (vpelimit
> 0 && nvpe
> vpelimit
)
411 ntc
= ((val
& MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1;
414 if (tclimit
> 0 && ntc
> tclimit
)
416 tcpervpe
= ntc
/ nvpe
;
417 slop
= ntc
% nvpe
; /* Residual TCs, < NVPE */
419 /* Set up shared TLB */
420 smtc_configure_tlb();
422 for (tc
= 0, vpe
= 0 ; (vpe
< nvpe
) && (tc
< ntc
) ; vpe
++) {
423 if (tcpervpe
[vpe
] == 0)
427 printk("VPE %d: TC", vpe
);
428 for (i
= 0; i
< tcpervpe
; i
++) {
430 * TC 0 is bound to VPE 0 at reset,
431 * and is presumably executing this
432 * code. Leave it alone!
435 smtc_tc_setup(vpe
,tc
, cpu
);
443 smtc_tc_setup(vpe
,tc
, cpu
);
452 * Allow this VPE to control others.
454 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() |
458 * Clear any stale software interrupts from VPE's Cause
460 write_vpe_c0_cause(0);
463 * Clear ERL/EXL of VPEs other than 0
464 * and set restricted interrupt enable/mask.
466 write_vpe_c0_status((read_vpe_c0_status()
467 & ~(ST0_BEV
| ST0_ERL
| ST0_EXL
| ST0_IM
))
468 | (STATUSF_IP0
| STATUSF_IP1
| STATUSF_IP7
471 * set config to be the same as vpe0,
472 * particularly kseg0 coherency alg
474 write_vpe_c0_config(read_c0_config());
475 /* Clear any pending timer interrupt */
476 write_vpe_c0_compare(0);
477 /* Propagate Config7 */
478 write_vpe_c0_config7(read_c0_config7());
479 write_vpe_c0_count(read_c0_count());
481 /* enable multi-threading within VPE */
482 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE
);
484 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA
);
488 * Pull any physically present but unused TCs out of circulation.
490 while (tc
< (((val
& MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1)) {
491 cpu_clear(tc
, phys_cpu_present_map
);
492 cpu_clear(tc
, cpu_present_map
);
496 /* release config state */
497 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC
);
501 /* Set up coprocessor affinity CPU mask(s) */
503 for (tc
= 0; tc
< ntc
; tc
++) {
504 if (cpu_data
[tc
].options
& MIPS_CPU_FPU
)
505 cpu_set(tc
, mt_fpu_cpumask
);
508 /* set up ipi interrupts... */
510 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
512 setup_cross_vpe_interrupts(nvpe
);
514 /* Set up queue of free IPI "messages". */
515 nipi
= NR_CPUS
* IPIBUF_PER_CPU
;
519 pipi
= kmalloc(nipi
*sizeof(struct smtc_ipi
), GFP_KERNEL
);
521 panic("kmalloc of IPI message buffers failed\n");
523 printk("IPI buffer pool of %d buffers\n", nipi
);
524 for (i
= 0; i
< nipi
; i
++) {
525 smtc_ipi_nq(&freeIPIq
, pipi
);
529 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
532 local_irq_restore(flags
);
533 /* Initialize SMTC /proc statistics/diagnostics */
539 * Setup the PC, SP, and GP of a secondary processor and start it
541 * smp_bootstrap is the place to resume from
542 * __KSTK_TOS(idle) is apparently the stack pointer
543 * (unsigned long)idle->thread_info the gp
546 void smtc_boot_secondary(int cpu
, struct task_struct
*idle
)
548 extern u32 kernelsp
[NR_CPUS
];
553 if (cpu_data
[cpu
].vpe_id
!= cpu_data
[smp_processor_id()].vpe_id
) {
556 settc(cpu_data
[cpu
].tc_id
);
559 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap
);
562 kernelsp
[cpu
] = __KSTK_TOS(idle
);
563 write_tc_gpr_sp(__KSTK_TOS(idle
));
566 write_tc_gpr_gp((unsigned long)task_thread_info(idle
));
568 smtc_status
|= SMTC_MTC_ACTIVE
;
569 write_tc_c0_tchalt(0);
570 if (cpu_data
[cpu
].vpe_id
!= cpu_data
[smp_processor_id()].vpe_id
) {
576 void smtc_init_secondary(void)
579 * Start timer on secondary VPEs if necessary.
580 * plat_timer_setup has already have been invoked by init/main
581 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
582 * SMTC init code assigns TCs consdecutively and in ascending order
583 * to across available VPEs.
585 if (((read_c0_tcbind() & TCBIND_CURTC
) != 0) &&
586 ((read_c0_tcbind() & TCBIND_CURVPE
)
587 != cpu_data
[smp_processor_id() - 1].vpe_id
)){
588 write_c0_compare (read_c0_count() + mips_hpt_frequency
/HZ
);
594 void smtc_smp_finish(void)
596 printk("TC %d going on-line as CPU %d\n",
597 cpu_data
[smp_processor_id()].tc_id
, smp_processor_id());
600 void smtc_cpus_done(void)
605 * Support for SMTC-optimized driver IRQ registration
609 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
610 * in do_IRQ. These are passed in setup_irq_smtc() and stored
614 int setup_irq_smtc(unsigned int irq
, struct irqaction
* new,
615 unsigned long hwmask
)
617 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
618 unsigned int vpe
= current_cpu_data
.vpe_id
;
620 vpemask
[vpe
][irq
- MIPS_CPU_IRQ_BASE
] = 1;
622 irq_hwmask
[irq
] = hwmask
;
624 return setup_irq(irq
, new);
628 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
629 * Within a VPE one TC can interrupt another by different approaches.
630 * The easiest to get right would probably be to make all TCs except
631 * the target IXMT and set a software interrupt, but an IXMT-based
632 * scheme requires that a handler must run before a new IPI could
633 * be sent, which would break the "broadcast" loops in MIPS MT.
634 * A more gonzo approach within a VPE is to halt the TC, extract
635 * its Restart, Status, and a couple of GPRs, and program the Restart
636 * address to emulate an interrupt.
638 * Within a VPE, one can be confident that the target TC isn't in
639 * a critical EXL state when halted, since the write to the Halt
640 * register could not have issued on the writing thread if the
641 * halting thread had EXL set. So k0 and k1 of the target TC
642 * can be used by the injection code. Across VPEs, one can't
643 * be certain that the target TC isn't in a critical exception
644 * state. So we try a two-step process of sending a software
645 * interrupt to the target VPE, which either handles the event
646 * itself (if it was the target) or injects the event within
650 static void smtc_ipi_qdump(void)
654 for (i
= 0; i
< NR_CPUS
;i
++) {
655 printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
656 i
, (unsigned)IPIQ
[i
].head
, (unsigned)IPIQ
[i
].tail
,
662 * The standard atomic.h primitives don't quite do what we want
663 * here: We need an atomic add-and-return-previous-value (which
664 * could be done with atomic_add_return and a decrement) and an
665 * atomic set/zero-and-return-previous-value (which can't really
666 * be done with the atomic.h primitives). And since this is
667 * MIPS MT, we can assume that we have LL/SC.
669 static __inline__
int atomic_postincrement(unsigned int *pv
)
671 unsigned long result
;
675 __asm__
__volatile__(
681 : "=&r" (result
), "=&r" (temp
), "=m" (*pv
)
688 void smtc_send_ipi(int cpu
, int type
, unsigned int action
)
691 struct smtc_ipi
*pipi
;
695 if (cpu
== smp_processor_id()) {
696 printk("Cannot Send IPI to self!\n");
699 /* Set up a descriptor, to be delivered either promptly or queued */
700 pipi
= smtc_ipi_dq(&freeIPIq
);
703 mips_mt_regdump(dvpe());
704 panic("IPI Msg. Buffers Depleted\n");
707 pipi
->arg
= (void *)action
;
709 if (cpu_data
[cpu
].vpe_id
!= cpu_data
[smp_processor_id()].vpe_id
) {
710 /* If not on same VPE, enqueue and send cross-VPE interupt */
711 smtc_ipi_nq(&IPIQ
[cpu
], pipi
);
713 settc(cpu_data
[cpu
].tc_id
);
714 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1
);
718 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
719 * since ASID shootdown on the other VPE may
720 * collide with this operation.
723 settc(cpu_data
[cpu
].tc_id
);
724 /* Halt the targeted TC */
725 write_tc_c0_tchalt(TCHALT_H
);
729 * Inspect TCStatus - if IXMT is set, we have to queue
730 * a message. Otherwise, we set up the "interrupt"
733 tcstatus
= read_tc_c0_tcstatus();
735 if ((tcstatus
& TCSTATUS_IXMT
) != 0) {
737 * Spin-waiting here can deadlock,
738 * so we queue the message for the target TC.
740 write_tc_c0_tchalt(0);
742 /* Try to reduce redundant timer interrupt messages */
743 if (type
== SMTC_CLOCK_TICK
) {
744 if (atomic_postincrement(&ipi_timer_latch
[cpu
])!=0){
745 smtc_ipi_nq(&freeIPIq
, pipi
);
749 smtc_ipi_nq(&IPIQ
[cpu
], pipi
);
751 post_direct_ipi(cpu
, pipi
);
752 write_tc_c0_tchalt(0);
759 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
761 static void post_direct_ipi(int cpu
, struct smtc_ipi
*pipi
)
763 struct pt_regs
*kstack
;
764 unsigned long tcstatus
;
765 unsigned long tcrestart
;
766 extern u32 kernelsp
[NR_CPUS
];
767 extern void __smtc_ipi_vector(void);
769 /* Extract Status, EPC from halted TC */
770 tcstatus
= read_tc_c0_tcstatus();
771 tcrestart
= read_tc_c0_tcrestart();
772 /* If TCRestart indicates a WAIT instruction, advance the PC */
773 if ((tcrestart
& 0x80000000)
774 && ((*(unsigned int *)tcrestart
& 0xfe00003f) == 0x42000020)) {
778 * Save on TC's future kernel stack
780 * CU bit of Status is indicator that TC was
781 * already running on a kernel stack...
783 if (tcstatus
& ST0_CU0
) {
784 /* Note that this "- 1" is pointer arithmetic */
785 kstack
= ((struct pt_regs
*)read_tc_gpr_sp()) - 1;
787 kstack
= ((struct pt_regs
*)kernelsp
[cpu
]) - 1;
790 kstack
->cp0_epc
= (long)tcrestart
;
792 kstack
->cp0_tcstatus
= tcstatus
;
793 /* Pass token of operation to be performed kernel stack pad area */
794 kstack
->pad0
[4] = (unsigned long)pipi
;
795 /* Pass address of function to be called likewise */
796 kstack
->pad0
[5] = (unsigned long)&ipi_decode
;
797 /* Set interrupt exempt and kernel mode */
798 tcstatus
|= TCSTATUS_IXMT
;
799 tcstatus
&= ~TCSTATUS_TKSU
;
800 write_tc_c0_tcstatus(tcstatus
);
802 /* Set TC Restart address to be SMTC IPI vector */
803 write_tc_c0_tcrestart(__smtc_ipi_vector
);
806 static void ipi_resched_interrupt(void)
808 /* Return from interrupt should be enough to cause scheduler check */
812 static void ipi_call_interrupt(void)
814 /* Invoke generic function invocation code in smp.c */
815 smp_call_function_interrupt();
818 void ipi_decode(struct smtc_ipi
*pipi
)
820 void *arg_copy
= pipi
->arg
;
821 int type_copy
= pipi
->type
;
822 int dest_copy
= pipi
->dest
;
824 smtc_ipi_nq(&freeIPIq
, pipi
);
826 case SMTC_CLOCK_TICK
:
828 kstat_this_cpu
.irqs
[MIPS_CPU_IRQ_BASE
+ cp0_compare_irq
]++;
829 /* Invoke Clock "Interrupt" */
830 ipi_timer_latch
[dest_copy
] = 0;
831 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
832 clock_hang_reported
[dest_copy
] = 0;
833 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
834 local_timer_interrupt(0, NULL
);
838 switch ((int)arg_copy
) {
839 case SMP_RESCHEDULE_YOURSELF
:
840 ipi_resched_interrupt();
842 case SMP_CALL_FUNCTION
:
843 ipi_call_interrupt();
846 printk("Impossible SMTC IPI Argument 0x%x\n",
852 printk("Impossible SMTC IPI Type 0x%x\n", type_copy
);
857 void deferred_smtc_ipi(void)
859 struct smtc_ipi
*pipi
;
862 int q
= smp_processor_id();
865 * Test is not atomic, but much faster than a dequeue,
866 * and the vast majority of invocations will have a null queue.
868 if (IPIQ
[q
].head
!= NULL
) {
869 while((pipi
= smtc_ipi_dq(&IPIQ
[q
])) != NULL
) {
870 /* ipi_decode() should be called with interrupts off */
871 local_irq_save(flags
);
873 local_irq_restore(flags
);
879 * Send clock tick to all TCs except the one executing the funtion
882 void smtc_timer_broadcast(int vpe
)
885 int myTC
= cpu_data
[smp_processor_id()].tc_id
;
886 int myVPE
= cpu_data
[smp_processor_id()].vpe_id
;
888 smtc_cpu_stats
[smp_processor_id()].timerints
++;
890 for_each_online_cpu(cpu
) {
891 if (cpu_data
[cpu
].vpe_id
== myVPE
&&
892 cpu_data
[cpu
].tc_id
!= myTC
)
893 smtc_send_ipi(cpu
, SMTC_CLOCK_TICK
, 0);
898 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
899 * set via cross-VPE MTTR manipulation of the Cause register. It would be
900 * in some regards preferable to have external logic for "doorbell" hardware
904 static int cpu_ipi_irq
= MIPS_CPU_IRQ_BASE
+ MIPS_CPU_IPI_IRQ
;
906 static irqreturn_t
ipi_interrupt(int irq
, void *dev_idm
)
908 int my_vpe
= cpu_data
[smp_processor_id()].vpe_id
;
909 int my_tc
= cpu_data
[smp_processor_id()].tc_id
;
911 struct smtc_ipi
*pipi
;
912 unsigned long tcstatus
;
915 unsigned int mtflags
;
916 unsigned int vpflags
;
919 * So long as cross-VPE interrupts are done via
920 * MFTR/MTTR read-modify-writes of Cause, we need
921 * to stop other VPEs whenever the local VPE does
924 local_irq_save(flags
);
926 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ
);
927 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ
);
930 local_irq_restore(flags
);
933 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
934 * queued for TCs on this VPE other than the current one.
935 * Return-from-interrupt should cause us to drain the queue
936 * for the current TC, so we ought not to have to do it explicitly here.
939 for_each_online_cpu(cpu
) {
940 if (cpu_data
[cpu
].vpe_id
!= my_vpe
)
943 pipi
= smtc_ipi_dq(&IPIQ
[cpu
]);
945 if (cpu_data
[cpu
].tc_id
!= my_tc
) {
948 settc(cpu_data
[cpu
].tc_id
);
949 write_tc_c0_tchalt(TCHALT_H
);
951 tcstatus
= read_tc_c0_tcstatus();
952 if ((tcstatus
& TCSTATUS_IXMT
) == 0) {
953 post_direct_ipi(cpu
, pipi
);
956 write_tc_c0_tchalt(0);
959 smtc_ipi_req(&IPIQ
[cpu
], pipi
);
963 * ipi_decode() should be called
964 * with interrupts off
966 local_irq_save(flags
);
968 local_irq_restore(flags
);
976 static void ipi_irq_dispatch(void)
981 static struct irqaction irq_ipi
;
983 static void setup_cross_vpe_interrupts(unsigned int nvpe
)
989 panic("SMTC Kernel requires Vectored Interupt support");
991 set_vi_handler(MIPS_CPU_IPI_IRQ
, ipi_irq_dispatch
);
993 irq_ipi
.handler
= ipi_interrupt
;
994 irq_ipi
.flags
= IRQF_DISABLED
;
995 irq_ipi
.name
= "SMTC_IPI";
997 setup_irq_smtc(cpu_ipi_irq
, &irq_ipi
, (0x100 << MIPS_CPU_IPI_IRQ
));
999 irq_desc
[cpu_ipi_irq
].status
|= IRQ_PER_CPU
;
1000 set_irq_handler(cpu_ipi_irq
, handle_percpu_irq
);
1004 * SMTC-specific hacks invoked from elsewhere in the kernel.
1006 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
1007 * called with interrupts disabled. We do rely on interrupts being disabled
1008 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
1009 * result in a recursive call to raw_local_irq_restore().
1012 static void __smtc_ipi_replay(void)
1014 unsigned int cpu
= smp_processor_id();
1017 * To the extent that we've ever turned interrupts off,
1018 * we may have accumulated deferred IPIs. This is subtle.
1019 * If we use the smtc_ipi_qdepth() macro, we'll get an
1020 * exact number - but we'll also disable interrupts
1021 * and create a window of failure where a new IPI gets
1022 * queued after we test the depth but before we re-enable
1023 * interrupts. So long as IXMT never gets set, however,
1024 * we should be OK: If we pick up something and dispatch
1025 * it here, that's great. If we see nothing, but concurrent
1026 * with this operation, another TC sends us an IPI, IXMT
1027 * is clear, and we'll handle it as a real pseudo-interrupt
1028 * and not a pseudo-pseudo interrupt.
1030 if (IPIQ
[cpu
].depth
> 0) {
1032 struct smtc_ipi_q
*q
= &IPIQ
[cpu
];
1033 struct smtc_ipi
*pipi
;
1034 extern void self_ipi(struct smtc_ipi
*);
1036 spin_lock(&q
->lock
);
1037 pipi
= __smtc_ipi_dq(q
);
1038 spin_unlock(&q
->lock
);
1043 smtc_cpu_stats
[cpu
].selfipis
++;
1048 void smtc_ipi_replay(void)
1050 raw_local_irq_disable();
1051 __smtc_ipi_replay();
1054 EXPORT_SYMBOL(smtc_ipi_replay
);
1056 void smtc_idle_loop_hook(void)
1058 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
1067 * printk within DMT-protected regions can deadlock,
1068 * so buffer diagnostic messages for later output.
1071 char id_ho_db_msg
[768]; /* worst-case use should be less than 700 */
1073 if (atomic_read(&idle_hook_initialized
) == 0) { /* fast test */
1074 if (atomic_add_return(1, &idle_hook_initialized
) == 1) {
1076 /* Tedious stuff to just do once */
1077 mvpconf0
= read_c0_mvpconf0();
1078 hook_ntcs
= ((mvpconf0
& MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1;
1079 if (hook_ntcs
> NR_CPUS
)
1080 hook_ntcs
= NR_CPUS
;
1081 for (tc
= 0; tc
< hook_ntcs
; tc
++) {
1083 clock_hang_reported
[tc
] = 0;
1085 for (vpe
= 0; vpe
< 2; vpe
++)
1086 for (im
= 0; im
< 8; im
++)
1087 imstuckcount
[vpe
][im
] = 0;
1088 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs
);
1089 atomic_set(&idle_hook_initialized
, 1000);
1091 /* Someone else is initializing in parallel - let 'em finish */
1092 while (atomic_read(&idle_hook_initialized
) < 1000)
1097 /* Have we stupidly left IXMT set somewhere? */
1098 if (read_c0_tcstatus() & 0x400) {
1099 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1101 printk("Dangling IXMT in cpu_idle()\n");
1104 /* Have we stupidly left an IM bit turned off? */
1105 #define IM_LIMIT 2000
1106 local_irq_save(flags
);
1108 pdb_msg
= &id_ho_db_msg
[0];
1109 im
= read_c0_status();
1110 vpe
= cpu_data
[smp_processor_id()].vpe_id
;
1111 for (bit
= 0; bit
< 8; bit
++) {
1113 * In current prototype, I/O interrupts
1114 * are masked for VPE > 0
1116 if (vpemask
[vpe
][bit
]) {
1117 if (!(im
& (0x100 << bit
)))
1118 imstuckcount
[vpe
][bit
]++;
1120 imstuckcount
[vpe
][bit
] = 0;
1121 if (imstuckcount
[vpe
][bit
] > IM_LIMIT
) {
1122 set_c0_status(0x100 << bit
);
1124 imstuckcount
[vpe
][bit
] = 0;
1125 pdb_msg
+= sprintf(pdb_msg
,
1126 "Dangling IM %d fixed for VPE %d\n", bit
,
1133 * Now that we limit outstanding timer IPIs, check for hung TC
1135 for (tc
= 0; tc
< NR_CPUS
; tc
++) {
1136 /* Don't check ourself - we'll dequeue IPIs just below */
1137 if ((tc
!= smp_processor_id()) &&
1138 ipi_timer_latch
[tc
] > timerq_limit
) {
1139 if (clock_hang_reported
[tc
] == 0) {
1140 pdb_msg
+= sprintf(pdb_msg
,
1141 "TC %d looks hung with timer latch at %d\n",
1142 tc
, ipi_timer_latch
[tc
]);
1143 clock_hang_reported
[tc
]++;
1148 local_irq_restore(flags
);
1149 if (pdb_msg
!= &id_ho_db_msg
[0])
1150 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg
);
1151 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1154 * Replay any accumulated deferred IPIs. If "Instant Replay"
1155 * is in use, there should never be any.
1157 #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1159 unsigned long flags
;
1161 local_irq_save(flags
);
1162 __smtc_ipi_replay();
1163 local_irq_restore(flags
);
1165 #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
1168 void smtc_soft_dump(void)
1172 printk("Counter Interrupts taken per CPU (TC)\n");
1173 for (i
=0; i
< NR_CPUS
; i
++) {
1174 printk("%d: %ld\n", i
, smtc_cpu_stats
[i
].timerints
);
1176 printk("Self-IPI invocations:\n");
1177 for (i
=0; i
< NR_CPUS
; i
++) {
1178 printk("%d: %ld\n", i
, smtc_cpu_stats
[i
].selfipis
);
1181 printk("Timer IPI Backlogs:\n");
1182 for (i
=0; i
< NR_CPUS
; i
++) {
1183 printk("%d: %d\n", i
, ipi_timer_latch
[i
]);
1185 printk("%d Recoveries of \"stolen\" FPU\n",
1186 atomic_read(&smtc_fpu_recoveries
));
1191 * TLB management routines special to SMTC
1194 void smtc_get_new_mmu_context(struct mm_struct
*mm
, unsigned long cpu
)
1196 unsigned long flags
, mtflags
, tcstat
, prevhalt
, asid
;
1200 * It would be nice to be able to use a spinlock here,
1201 * but this is invoked from within TLB flush routines
1202 * that protect themselves with DVPE, so if a lock is
1203 * held by another TC, it'll never be freed.
1205 * DVPE/DMT must not be done with interrupts enabled,
1206 * so even so most callers will already have disabled
1207 * them, let's be really careful...
1210 local_irq_save(flags
);
1211 if (smtc_status
& SMTC_TLB_SHARED
) {
1216 tlb
= cpu_data
[cpu
].vpe_id
;
1218 asid
= asid_cache(cpu
);
1221 if (!((asid
+= ASID_INC
) & ASID_MASK
) ) {
1222 if (cpu_has_vtag_icache
)
1224 /* Traverse all online CPUs (hack requires contigous range) */
1225 for (i
= 0; i
< num_online_cpus(); i
++) {
1227 * We don't need to worry about our own CPU, nor those of
1228 * CPUs who don't share our TLB.
1230 if ((i
!= smp_processor_id()) &&
1231 ((smtc_status
& SMTC_TLB_SHARED
) ||
1232 (cpu_data
[i
].vpe_id
== cpu_data
[cpu
].vpe_id
))) {
1233 settc(cpu_data
[i
].tc_id
);
1234 prevhalt
= read_tc_c0_tchalt() & TCHALT_H
;
1236 write_tc_c0_tchalt(TCHALT_H
);
1239 tcstat
= read_tc_c0_tcstatus();
1240 smtc_live_asid
[tlb
][(tcstat
& ASID_MASK
)] |= (asiduse
)(0x1 << i
);
1242 write_tc_c0_tchalt(0);
1245 if (!asid
) /* fix version if needed */
1246 asid
= ASID_FIRST_VERSION
;
1247 local_flush_tlb_all(); /* start new asid cycle */
1249 } while (smtc_live_asid
[tlb
][(asid
& ASID_MASK
)]);
1252 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1254 for (i
= 0; i
< num_online_cpus(); i
++) {
1255 if ((smtc_status
& SMTC_TLB_SHARED
) ||
1256 (cpu_data
[i
].vpe_id
== cpu_data
[cpu
].vpe_id
))
1257 cpu_context(i
, mm
) = asid_cache(i
) = asid
;
1260 if (smtc_status
& SMTC_TLB_SHARED
)
1264 local_irq_restore(flags
);
1268 * Invoked from macros defined in mmu_context.h
1269 * which must already have disabled interrupts
1270 * and done a DVPE or DMT as appropriate.
1273 void smtc_flush_tlb_asid(unsigned long asid
)
1278 entry
= read_c0_wired();
1280 /* Traverse all non-wired entries */
1281 while (entry
< current_cpu_data
.tlbsize
) {
1282 write_c0_index(entry
);
1286 ehi
= read_c0_entryhi();
1287 if ((ehi
& ASID_MASK
) == asid
) {
1289 * Invalidate only entries with specified ASID,
1290 * makiing sure all entries differ.
1292 write_c0_entryhi(CKSEG0
+ (entry
<< (PAGE_SHIFT
+ 1)));
1293 write_c0_entrylo0(0);
1294 write_c0_entrylo1(0);
1296 tlb_write_indexed();
1300 write_c0_index(PARKED_INDEX
);
1305 * Support for single-threading cache flush operations.
1308 static int halt_state_save
[NR_CPUS
];
1311 * To really, really be sure that nothing is being done
1312 * by other TCs, halt them all. This code assumes that
1313 * a DVPE has already been done, so while their Halted
1314 * state is theoretically architecturally unstable, in
1315 * practice, it's not going to change while we're looking
1319 void smtc_cflush_lockdown(void)
1323 for_each_online_cpu(cpu
) {
1324 if (cpu
!= smp_processor_id()) {
1325 settc(cpu_data
[cpu
].tc_id
);
1326 halt_state_save
[cpu
] = read_tc_c0_tchalt();
1327 write_tc_c0_tchalt(TCHALT_H
);
1333 /* It would be cheating to change the cpu_online states during a flush! */
1335 void smtc_cflush_release(void)
1340 * Start with a hazard barrier to ensure
1341 * that all CACHE ops have played through.
1345 for_each_online_cpu(cpu
) {
1346 if (cpu
!= smp_processor_id()) {
1347 settc(cpu_data
[cpu
].tc_id
);
1348 write_tc_c0_tchalt(halt_state_save
[cpu
]);