1 /* Copyright (C) 2004 Mips Technologies, Inc */
3 #include <linux/clockchips.h>
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/cpumask.h>
7 #include <linux/interrupt.h>
8 #include <linux/kernel_stat.h>
9 #include <linux/module.h>
12 #include <asm/processor.h>
13 #include <asm/atomic.h>
14 #include <asm/system.h>
15 #include <asm/hardirq.h>
16 #include <asm/hazards.h>
18 #include <asm/mmu_context.h>
19 #include <asm/mipsregs.h>
20 #include <asm/cacheflush.h>
22 #include <asm/addrspace.h>
24 #include <asm/smtc_ipi.h>
25 #include <asm/smtc_proc.h>
28 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
29 * in do_IRQ. These are passed in setup_irq_smtc() and stored
32 unsigned long irq_hwmask
[NR_IRQS
];
34 #define LOCK_MT_PRA() \
35 local_irq_save(flags); \
38 #define UNLOCK_MT_PRA() \
40 local_irq_restore(flags)
42 #define LOCK_CORE_PRA() \
43 local_irq_save(flags); \
46 #define UNLOCK_CORE_PRA() \
48 local_irq_restore(flags)
51 * Data structures purely associated with SMTC parallelism
56 * Table for tracking ASIDs whose lifetime is prolonged.
59 asiduse smtc_live_asid
[MAX_SMTC_TLBS
][MAX_SMTC_ASIDS
];
62 * Clock interrupt "latch" buffers, per "CPU"
65 static atomic_t ipi_timer_latch
[NR_CPUS
];
68 * Number of InterProcessor Interrupt (IPI) message buffers to allocate
71 #define IPIBUF_PER_CPU 4
73 static struct smtc_ipi_q IPIQ
[NR_CPUS
];
74 static struct smtc_ipi_q freeIPIq
;
77 /* Forward declarations */
79 void ipi_decode(struct smtc_ipi
*);
80 static void post_direct_ipi(int cpu
, struct smtc_ipi
*pipi
);
81 static void setup_cross_vpe_interrupts(unsigned int nvpe
);
82 void init_smtc_stats(void);
84 /* Global SMTC Status */
86 unsigned int smtc_status
= 0;
88 /* Boot command line configuration overrides */
91 static int ipibuffers
= 0;
92 static int nostlb
= 0;
93 static int asidmask
= 0;
94 unsigned long smtc_asid_mask
= 0xff;
96 static int __init
vpe0tcs(char *str
)
98 get_option(&str
, &vpe0limit
);
103 static int __init
ipibufs(char *str
)
105 get_option(&str
, &ipibuffers
);
109 static int __init
stlb_disable(char *s
)
115 static int __init
asidmask_set(char *str
)
117 get_option(&str
, &asidmask
);
127 smtc_asid_mask
= (unsigned long)asidmask
;
130 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask
);
135 __setup("vpe0tcs=", vpe0tcs
);
136 __setup("ipibufs=", ipibufs
);
137 __setup("nostlb", stlb_disable
);
138 __setup("asidmask=", asidmask_set
);
140 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
142 static int hang_trig
= 0;
144 static int __init
hangtrig_enable(char *s
)
151 __setup("hangtrig", hangtrig_enable
);
153 #define DEFAULT_BLOCKED_IPI_LIMIT 32
155 static int timerq_limit
= DEFAULT_BLOCKED_IPI_LIMIT
;
157 static int __init
tintq(char *str
)
159 get_option(&str
, &timerq_limit
);
163 __setup("tintq=", tintq
);
165 static int imstuckcount
[2][8];
166 /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
167 static int vpemask
[2][8] = {
168 {0, 0, 1, 0, 0, 0, 0, 1},
169 {0, 0, 0, 0, 0, 0, 0, 1}
171 int tcnoprog
[NR_CPUS
];
172 static atomic_t idle_hook_initialized
= {0};
173 static int clock_hang_reported
[NR_CPUS
];
175 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
178 * Configure shared TLB - VPC configuration bit must be set by caller
181 static void smtc_configure_tlb(void)
184 unsigned long mvpconf0
;
185 unsigned long config1val
;
187 /* Set up ASID preservation table */
188 for (vpes
=0; vpes
<MAX_SMTC_TLBS
; vpes
++) {
189 for(i
= 0; i
< MAX_SMTC_ASIDS
; i
++) {
190 smtc_live_asid
[vpes
][i
] = 0;
193 mvpconf0
= read_c0_mvpconf0();
195 if ((vpes
= ((mvpconf0
& MVPCONF0_PVPE
)
196 >> MVPCONF0_PVPE_SHIFT
) + 1) > 1) {
197 /* If we have multiple VPEs, try to share the TLB */
198 if ((mvpconf0
& MVPCONF0_TLBS
) && !nostlb
) {
200 * If TLB sizing is programmable, shared TLB
201 * size is the total available complement.
202 * Otherwise, we have to take the sum of all
203 * static VPE TLB entries.
205 if ((tlbsiz
= ((mvpconf0
& MVPCONF0_PTLBE
)
206 >> MVPCONF0_PTLBE_SHIFT
)) == 0) {
208 * If there's more than one VPE, there had better
209 * be more than one TC, because we need one to bind
210 * to each VPE in turn to be able to read
211 * its configuration state!
214 /* Stop the TC from doing anything foolish */
215 write_tc_c0_tchalt(TCHALT_H
);
217 /* No need to un-Halt - that happens later anyway */
218 for (i
=0; i
< vpes
; i
++) {
219 write_tc_c0_tcbind(i
);
221 * To be 100% sure we're really getting the right
222 * information, we exit the configuration state
223 * and do an IHB after each rebinding.
226 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC
);
229 * Only count if the MMU Type indicated is TLB
231 if (((read_vpe_c0_config() & MIPS_CONF_MT
) >> 7) == 1) {
232 config1val
= read_vpe_c0_config1();
233 tlbsiz
+= ((config1val
>> 25) & 0x3f) + 1;
236 /* Put core back in configuration state */
238 read_c0_mvpcontrol() | MVPCONTROL_VPC
);
242 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB
);
246 * Setup kernel data structures to use software total,
247 * rather than read the per-VPE Config1 value. The values
248 * for "CPU 0" gets copied to all the other CPUs as part
249 * of their initialization in smtc_cpu_setup().
252 /* MIPS32 limits TLB indices to 64 */
255 cpu_data
[0].tlbsize
= current_cpu_data
.tlbsize
= tlbsiz
;
256 smtc_status
|= SMTC_TLB_SHARED
;
257 local_flush_tlb_all();
259 printk("TLB of %d entry pairs shared by %d VPEs\n",
262 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
269 * Incrementally build the CPU map out of constituent MIPS MT cores,
270 * using the specified available VPEs and TCs. Plaform code needs
271 * to ensure that each MIPS MT core invokes this routine on reset,
274 * This version of the build_cpu_map and prepare_cpus routines assumes
275 * that *all* TCs of a MIPS MT core will be used for Linux, and that
276 * they will be spread across *all* available VPEs (to minimise the
277 * loss of efficiency due to exception service serialization).
278 * An improved version would pick up configuration information and
279 * possibly leave some TCs/VPEs as "slave" processors.
281 * Use c0_MVPConf0 to find out how many TCs are available, setting up
282 * phys_cpu_present_map and the logical/physical mappings.
285 int __init
mipsmt_build_cpu_map(int start_cpu_slot
)
290 * The CPU map isn't actually used for anything at this point,
291 * so it's not clear what else we should do apart from set
292 * everything up so that "logical" = "physical".
294 ntcs
= ((read_c0_mvpconf0() & MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1;
295 for (i
=start_cpu_slot
; i
<NR_CPUS
&& i
<ntcs
; i
++) {
296 cpu_set(i
, phys_cpu_present_map
);
297 __cpu_number_map
[i
] = i
;
298 __cpu_logical_map
[i
] = i
;
300 #ifdef CONFIG_MIPS_MT_FPAFF
301 /* Initialize map of CPUs with FPUs */
302 cpus_clear(mt_fpu_cpumask
);
305 /* One of those TC's is the one booting, and not a secondary... */
306 printk("%i available secondary CPU TC(s)\n", i
- 1);
312 * Common setup before any secondaries are started
313 * Make sure all CPU's are in a sensible state before we boot any of the
316 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
317 * as possible across the available VPEs.
320 static void smtc_tc_setup(int vpe
, int tc
, int cpu
)
323 write_tc_c0_tchalt(TCHALT_H
);
325 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
326 & ~(TCSTATUS_TKSU
| TCSTATUS_DA
| TCSTATUS_IXMT
))
328 write_tc_c0_tccontext(0);
330 write_tc_c0_tcbind(vpe
);
331 /* In general, all TCs should have the same cpu_data indications */
332 memcpy(&cpu_data
[cpu
], &cpu_data
[0], sizeof(struct cpuinfo_mips
));
333 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
334 if (cpu_data
[0].cputype
== CPU_34K
||
335 cpu_data
[0].cputype
== CPU_1004K
)
336 cpu_data
[cpu
].options
&= ~MIPS_CPU_FPU
;
337 cpu_data
[cpu
].vpe_id
= vpe
;
338 cpu_data
[cpu
].tc_id
= tc
;
342 void mipsmt_prepare_cpus(void)
344 int i
, vpe
, tc
, ntc
, nvpe
, tcpervpe
[NR_CPUS
], slop
, cpu
;
348 struct smtc_ipi
*pipi
;
350 /* disable interrupts so we can disable MT */
351 local_irq_save(flags
);
352 /* disable MT so we can configure */
356 spin_lock_init(&freeIPIq
.lock
);
359 * We probably don't have as many VPEs as we do SMP "CPUs",
360 * but it's possible - and in any case we'll never use more!
362 for (i
=0; i
<NR_CPUS
; i
++) {
363 IPIQ
[i
].head
= IPIQ
[i
].tail
= NULL
;
364 spin_lock_init(&IPIQ
[i
].lock
);
366 atomic_set(&ipi_timer_latch
[i
], 0);
369 /* cpu_data index starts at zero */
371 cpu_data
[cpu
].vpe_id
= 0;
372 cpu_data
[cpu
].tc_id
= 0;
375 /* Report on boot-time options */
376 mips_mt_set_cpuoptions();
378 printk("Limit of %d VPEs set\n", vpelimit
);
380 printk("Limit of %d TCs set\n", tclimit
);
382 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
385 printk("ASID mask value override to 0x%x\n", asidmask
);
388 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
390 printk("Logic Analyser Trigger on suspected TC hang\n");
391 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
393 /* Put MVPE's into 'configuration state' */
394 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC
);
396 val
= read_c0_mvpconf0();
397 nvpe
= ((val
& MVPCONF0_PVPE
) >> MVPCONF0_PVPE_SHIFT
) + 1;
398 if (vpelimit
> 0 && nvpe
> vpelimit
)
400 ntc
= ((val
& MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1;
403 if (tclimit
> 0 && ntc
> tclimit
)
406 for (i
= 0; i
< nvpe
; i
++) {
407 tcpervpe
[i
] = ntc
/ nvpe
;
409 if((slop
- i
) > 0) tcpervpe
[i
]++;
412 /* Handle command line override for VPE0 */
413 if (vpe0limit
> ntc
) vpe0limit
= ntc
;
416 if (vpe0limit
< tcpervpe
[0]) {
417 /* Reducing TC count - distribute to others */
418 slop
= tcpervpe
[0] - vpe0limit
;
419 slopslop
= slop
% (nvpe
- 1);
420 tcpervpe
[0] = vpe0limit
;
421 for (i
= 1; i
< nvpe
; i
++) {
422 tcpervpe
[i
] += slop
/ (nvpe
- 1);
423 if(slopslop
&& ((slopslop
- (i
- 1) > 0)))
426 } else if (vpe0limit
> tcpervpe
[0]) {
427 /* Increasing TC count - steal from others */
428 slop
= vpe0limit
- tcpervpe
[0];
429 slopslop
= slop
% (nvpe
- 1);
430 tcpervpe
[0] = vpe0limit
;
431 for (i
= 1; i
< nvpe
; i
++) {
432 tcpervpe
[i
] -= slop
/ (nvpe
- 1);
433 if(slopslop
&& ((slopslop
- (i
- 1) > 0)))
439 /* Set up shared TLB */
440 smtc_configure_tlb();
442 for (tc
= 0, vpe
= 0 ; (vpe
< nvpe
) && (tc
< ntc
) ; vpe
++) {
447 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP
);
450 printk("VPE %d: TC", vpe
);
451 for (i
= 0; i
< tcpervpe
[vpe
]; i
++) {
453 * TC 0 is bound to VPE 0 at reset,
454 * and is presumably executing this
455 * code. Leave it alone!
458 smtc_tc_setup(vpe
, tc
, cpu
);
466 * Clear any stale software interrupts from VPE's Cause
468 write_vpe_c0_cause(0);
471 * Clear ERL/EXL of VPEs other than 0
472 * and set restricted interrupt enable/mask.
474 write_vpe_c0_status((read_vpe_c0_status()
475 & ~(ST0_BEV
| ST0_ERL
| ST0_EXL
| ST0_IM
))
476 | (STATUSF_IP0
| STATUSF_IP1
| STATUSF_IP7
479 * set config to be the same as vpe0,
480 * particularly kseg0 coherency alg
482 write_vpe_c0_config(read_c0_config());
483 /* Clear any pending timer interrupt */
484 write_vpe_c0_compare(0);
485 /* Propagate Config7 */
486 write_vpe_c0_config7(read_c0_config7());
487 write_vpe_c0_count(read_c0_count());
489 /* enable multi-threading within VPE */
490 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE
);
492 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA
);
496 * Pull any physically present but unused TCs out of circulation.
498 while (tc
< (((val
& MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1)) {
499 cpu_clear(tc
, phys_cpu_present_map
);
500 cpu_clear(tc
, cpu_present_map
);
504 /* release config state */
505 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC
);
509 /* Set up coprocessor affinity CPU mask(s) */
511 #ifdef CONFIG_MIPS_MT_FPAFF
512 for (tc
= 0; tc
< ntc
; tc
++) {
513 if (cpu_data
[tc
].options
& MIPS_CPU_FPU
)
514 cpu_set(tc
, mt_fpu_cpumask
);
518 /* set up ipi interrupts... */
520 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
522 setup_cross_vpe_interrupts(nvpe
);
524 /* Set up queue of free IPI "messages". */
525 nipi
= NR_CPUS
* IPIBUF_PER_CPU
;
529 pipi
= kmalloc(nipi
*sizeof(struct smtc_ipi
), GFP_KERNEL
);
531 panic("kmalloc of IPI message buffers failed\n");
533 printk("IPI buffer pool of %d buffers\n", nipi
);
534 for (i
= 0; i
< nipi
; i
++) {
535 smtc_ipi_nq(&freeIPIq
, pipi
);
539 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
542 local_irq_restore(flags
);
543 /* Initialize SMTC /proc statistics/diagnostics */
549 * Setup the PC, SP, and GP of a secondary processor and start it
551 * smp_bootstrap is the place to resume from
552 * __KSTK_TOS(idle) is apparently the stack pointer
553 * (unsigned long)idle->thread_info the gp
556 void __cpuinit
smtc_boot_secondary(int cpu
, struct task_struct
*idle
)
558 extern u32 kernelsp
[NR_CPUS
];
563 if (cpu_data
[cpu
].vpe_id
!= cpu_data
[smp_processor_id()].vpe_id
) {
566 settc(cpu_data
[cpu
].tc_id
);
569 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap
);
572 kernelsp
[cpu
] = __KSTK_TOS(idle
);
573 write_tc_gpr_sp(__KSTK_TOS(idle
));
576 write_tc_gpr_gp((unsigned long)task_thread_info(idle
));
578 smtc_status
|= SMTC_MTC_ACTIVE
;
579 write_tc_c0_tchalt(0);
580 if (cpu_data
[cpu
].vpe_id
!= cpu_data
[smp_processor_id()].vpe_id
) {
586 void smtc_init_secondary(void)
589 * Start timer on secondary VPEs if necessary.
590 * plat_timer_setup has already have been invoked by init/main
591 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
592 * SMTC init code assigns TCs consdecutively and in ascending order
593 * to across available VPEs.
595 if (((read_c0_tcbind() & TCBIND_CURTC
) != 0) &&
596 ((read_c0_tcbind() & TCBIND_CURVPE
)
597 != cpu_data
[smp_processor_id() - 1].vpe_id
)){
598 write_c0_compare(read_c0_count() + mips_hpt_frequency
/HZ
);
604 void smtc_smp_finish(void)
606 printk("TC %d going on-line as CPU %d\n",
607 cpu_data
[smp_processor_id()].tc_id
, smp_processor_id());
610 void smtc_cpus_done(void)
615 * Support for SMTC-optimized driver IRQ registration
619 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
620 * in do_IRQ. These are passed in setup_irq_smtc() and stored
624 int setup_irq_smtc(unsigned int irq
, struct irqaction
* new,
625 unsigned long hwmask
)
627 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
628 unsigned int vpe
= current_cpu_data
.vpe_id
;
630 vpemask
[vpe
][irq
- MIPS_CPU_IRQ_BASE
] = 1;
632 irq_hwmask
[irq
] = hwmask
;
634 return setup_irq(irq
, new);
637 #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
639 * Support for IRQ affinity to TCs
642 void smtc_set_irq_affinity(unsigned int irq
, cpumask_t affinity
)
645 * If a "fast path" cache of quickly decodable affinity state
646 * is maintained, this is where it gets done, on a call up
647 * from the platform affinity code.
651 void smtc_forward_irq(unsigned int irq
)
656 * OK wise guy, now figure out how to get the IRQ
657 * to be serviced on an authorized "CPU".
659 * Ideally, to handle the situation where an IRQ has multiple
660 * eligible CPUS, we would maintain state per IRQ that would
661 * allow a fair distribution of service requests. Since the
662 * expected use model is any-or-only-one, for simplicity
663 * and efficiency, we just pick the easiest one to find.
666 target
= first_cpu(irq_desc
[irq
].affinity
);
669 * We depend on the platform code to have correctly processed
670 * IRQ affinity change requests to ensure that the IRQ affinity
671 * mask has been purged of bits corresponding to nonexistent and
672 * offline "CPUs", and to TCs bound to VPEs other than the VPE
673 * connected to the physical interrupt input for the interrupt
674 * in question. Otherwise we have a nasty problem with interrupt
675 * mask management. This is best handled in non-performance-critical
676 * platform IRQ affinity setting code, to minimize interrupt-time
680 /* If no one is eligible, service locally */
681 if (target
>= NR_CPUS
) {
682 do_IRQ_no_affinity(irq
);
686 smtc_send_ipi(target
, IRQ_AFFINITY_IPI
, irq
);
689 #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
692 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
693 * Within a VPE one TC can interrupt another by different approaches.
694 * The easiest to get right would probably be to make all TCs except
695 * the target IXMT and set a software interrupt, but an IXMT-based
696 * scheme requires that a handler must run before a new IPI could
697 * be sent, which would break the "broadcast" loops in MIPS MT.
698 * A more gonzo approach within a VPE is to halt the TC, extract
699 * its Restart, Status, and a couple of GPRs, and program the Restart
700 * address to emulate an interrupt.
702 * Within a VPE, one can be confident that the target TC isn't in
703 * a critical EXL state when halted, since the write to the Halt
704 * register could not have issued on the writing thread if the
705 * halting thread had EXL set. So k0 and k1 of the target TC
706 * can be used by the injection code. Across VPEs, one can't
707 * be certain that the target TC isn't in a critical exception
708 * state. So we try a two-step process of sending a software
709 * interrupt to the target VPE, which either handles the event
710 * itself (if it was the target) or injects the event within
714 static void smtc_ipi_qdump(void)
718 for (i
= 0; i
< NR_CPUS
;i
++) {
719 printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
720 i
, (unsigned)IPIQ
[i
].head
, (unsigned)IPIQ
[i
].tail
,
726 * The standard atomic.h primitives don't quite do what we want
727 * here: We need an atomic add-and-return-previous-value (which
728 * could be done with atomic_add_return and a decrement) and an
729 * atomic set/zero-and-return-previous-value (which can't really
730 * be done with the atomic.h primitives). And since this is
731 * MIPS MT, we can assume that we have LL/SC.
733 static inline int atomic_postincrement(atomic_t
*v
)
735 unsigned long result
;
739 __asm__
__volatile__(
745 : "=&r" (result
), "=&r" (temp
), "=m" (v
->counter
)
752 void smtc_send_ipi(int cpu
, int type
, unsigned int action
)
755 struct smtc_ipi
*pipi
;
759 if (cpu
== smp_processor_id()) {
760 printk("Cannot Send IPI to self!\n");
763 /* Set up a descriptor, to be delivered either promptly or queued */
764 pipi
= smtc_ipi_dq(&freeIPIq
);
767 mips_mt_regdump(dvpe());
768 panic("IPI Msg. Buffers Depleted\n");
771 pipi
->arg
= (void *)action
;
773 if (cpu_data
[cpu
].vpe_id
!= cpu_data
[smp_processor_id()].vpe_id
) {
774 if (type
== SMTC_CLOCK_TICK
)
775 atomic_inc(&ipi_timer_latch
[cpu
]);
776 /* If not on same VPE, enqueue and send cross-VPE interrupt */
777 smtc_ipi_nq(&IPIQ
[cpu
], pipi
);
779 settc(cpu_data
[cpu
].tc_id
);
780 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1
);
784 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
785 * since ASID shootdown on the other VPE may
786 * collide with this operation.
789 settc(cpu_data
[cpu
].tc_id
);
790 /* Halt the targeted TC */
791 write_tc_c0_tchalt(TCHALT_H
);
795 * Inspect TCStatus - if IXMT is set, we have to queue
796 * a message. Otherwise, we set up the "interrupt"
799 tcstatus
= read_tc_c0_tcstatus();
801 if ((tcstatus
& TCSTATUS_IXMT
) != 0) {
803 * Spin-waiting here can deadlock,
804 * so we queue the message for the target TC.
806 write_tc_c0_tchalt(0);
808 /* Try to reduce redundant timer interrupt messages */
809 if (type
== SMTC_CLOCK_TICK
) {
810 if (atomic_postincrement(&ipi_timer_latch
[cpu
])!=0){
811 smtc_ipi_nq(&freeIPIq
, pipi
);
815 smtc_ipi_nq(&IPIQ
[cpu
], pipi
);
817 if (type
== SMTC_CLOCK_TICK
)
818 atomic_inc(&ipi_timer_latch
[cpu
]);
819 post_direct_ipi(cpu
, pipi
);
820 write_tc_c0_tchalt(0);
827 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
829 static void post_direct_ipi(int cpu
, struct smtc_ipi
*pipi
)
831 struct pt_regs
*kstack
;
832 unsigned long tcstatus
;
833 unsigned long tcrestart
;
834 extern u32 kernelsp
[NR_CPUS
];
835 extern void __smtc_ipi_vector(void);
836 //printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
838 /* Extract Status, EPC from halted TC */
839 tcstatus
= read_tc_c0_tcstatus();
840 tcrestart
= read_tc_c0_tcrestart();
841 /* If TCRestart indicates a WAIT instruction, advance the PC */
842 if ((tcrestart
& 0x80000000)
843 && ((*(unsigned int *)tcrestart
& 0xfe00003f) == 0x42000020)) {
847 * Save on TC's future kernel stack
849 * CU bit of Status is indicator that TC was
850 * already running on a kernel stack...
852 if (tcstatus
& ST0_CU0
) {
853 /* Note that this "- 1" is pointer arithmetic */
854 kstack
= ((struct pt_regs
*)read_tc_gpr_sp()) - 1;
856 kstack
= ((struct pt_regs
*)kernelsp
[cpu
]) - 1;
859 kstack
->cp0_epc
= (long)tcrestart
;
861 kstack
->cp0_tcstatus
= tcstatus
;
862 /* Pass token of operation to be performed kernel stack pad area */
863 kstack
->pad0
[4] = (unsigned long)pipi
;
864 /* Pass address of function to be called likewise */
865 kstack
->pad0
[5] = (unsigned long)&ipi_decode
;
866 /* Set interrupt exempt and kernel mode */
867 tcstatus
|= TCSTATUS_IXMT
;
868 tcstatus
&= ~TCSTATUS_TKSU
;
869 write_tc_c0_tcstatus(tcstatus
);
871 /* Set TC Restart address to be SMTC IPI vector */
872 write_tc_c0_tcrestart(__smtc_ipi_vector
);
875 static void ipi_resched_interrupt(void)
877 /* Return from interrupt should be enough to cause scheduler check */
881 static void ipi_call_interrupt(void)
883 /* Invoke generic function invocation code in smp.c */
884 smp_call_function_interrupt();
887 DECLARE_PER_CPU(struct clock_event_device
, smtc_dummy_clockevent_device
);
889 void ipi_decode(struct smtc_ipi
*pipi
)
891 unsigned int cpu
= smp_processor_id();
892 struct clock_event_device
*cd
;
893 void *arg_copy
= pipi
->arg
;
894 int type_copy
= pipi
->type
;
897 smtc_ipi_nq(&freeIPIq
, pipi
);
899 case SMTC_CLOCK_TICK
:
901 kstat_this_cpu
.irqs
[MIPS_CPU_IRQ_BASE
+ 1]++;
902 cd
= &per_cpu(smtc_dummy_clockevent_device
, cpu
);
903 ticks
= atomic_read(&ipi_timer_latch
[cpu
]);
904 atomic_sub(ticks
, &ipi_timer_latch
[cpu
]);
906 cd
->event_handler(cd
);
913 switch ((int)arg_copy
) {
914 case SMP_RESCHEDULE_YOURSELF
:
915 ipi_resched_interrupt();
917 case SMP_CALL_FUNCTION
:
918 ipi_call_interrupt();
921 printk("Impossible SMTC IPI Argument 0x%x\n",
926 #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
927 case IRQ_AFFINITY_IPI
:
929 * Accept a "forwarded" interrupt that was initially
930 * taken by a TC who doesn't have affinity for the IRQ.
932 do_IRQ_no_affinity((int)arg_copy
);
934 #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
936 printk("Impossible SMTC IPI Type 0x%x\n", type_copy
);
941 void deferred_smtc_ipi(void)
943 struct smtc_ipi
*pipi
;
946 int q
= smp_processor_id();
949 * Test is not atomic, but much faster than a dequeue,
950 * and the vast majority of invocations will have a null queue.
952 if (IPIQ
[q
].head
!= NULL
) {
953 while((pipi
= smtc_ipi_dq(&IPIQ
[q
])) != NULL
) {
954 /* ipi_decode() should be called with interrupts off */
955 local_irq_save(flags
);
957 local_irq_restore(flags
);
963 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
964 * set via cross-VPE MTTR manipulation of the Cause register. It would be
965 * in some regards preferable to have external logic for "doorbell" hardware
969 static int cpu_ipi_irq
= MIPS_CPU_IRQ_BASE
+ MIPS_CPU_IPI_IRQ
;
971 static irqreturn_t
ipi_interrupt(int irq
, void *dev_idm
)
973 int my_vpe
= cpu_data
[smp_processor_id()].vpe_id
;
974 int my_tc
= cpu_data
[smp_processor_id()].tc_id
;
976 struct smtc_ipi
*pipi
;
977 unsigned long tcstatus
;
980 unsigned int mtflags
;
981 unsigned int vpflags
;
984 * So long as cross-VPE interrupts are done via
985 * MFTR/MTTR read-modify-writes of Cause, we need
986 * to stop other VPEs whenever the local VPE does
989 local_irq_save(flags
);
991 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ
);
992 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ
);
995 local_irq_restore(flags
);
998 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
999 * queued for TCs on this VPE other than the current one.
1000 * Return-from-interrupt should cause us to drain the queue
1001 * for the current TC, so we ought not to have to do it explicitly here.
1004 for_each_online_cpu(cpu
) {
1005 if (cpu_data
[cpu
].vpe_id
!= my_vpe
)
1008 pipi
= smtc_ipi_dq(&IPIQ
[cpu
]);
1010 if (cpu_data
[cpu
].tc_id
!= my_tc
) {
1013 settc(cpu_data
[cpu
].tc_id
);
1014 write_tc_c0_tchalt(TCHALT_H
);
1016 tcstatus
= read_tc_c0_tcstatus();
1017 if ((tcstatus
& TCSTATUS_IXMT
) == 0) {
1018 post_direct_ipi(cpu
, pipi
);
1021 write_tc_c0_tchalt(0);
1024 smtc_ipi_req(&IPIQ
[cpu
], pipi
);
1028 * ipi_decode() should be called
1029 * with interrupts off
1031 local_irq_save(flags
);
1033 local_irq_restore(flags
);
1041 static void ipi_irq_dispatch(void)
1043 do_IRQ(cpu_ipi_irq
);
1046 static struct irqaction irq_ipi
= {
1047 .handler
= ipi_interrupt
,
1048 .flags
= IRQF_DISABLED
,
1050 .flags
= IRQF_PERCPU
1053 static void setup_cross_vpe_interrupts(unsigned int nvpe
)
1059 panic("SMTC Kernel requires Vectored Interrupt support");
1061 set_vi_handler(MIPS_CPU_IPI_IRQ
, ipi_irq_dispatch
);
1063 setup_irq_smtc(cpu_ipi_irq
, &irq_ipi
, (0x100 << MIPS_CPU_IPI_IRQ
));
1065 set_irq_handler(cpu_ipi_irq
, handle_percpu_irq
);
1069 * SMTC-specific hacks invoked from elsewhere in the kernel.
1071 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
1072 * called with interrupts disabled. We do rely on interrupts being disabled
1073 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
1074 * result in a recursive call to raw_local_irq_restore().
1077 static void __smtc_ipi_replay(void)
1079 unsigned int cpu
= smp_processor_id();
1082 * To the extent that we've ever turned interrupts off,
1083 * we may have accumulated deferred IPIs. This is subtle.
1084 * If we use the smtc_ipi_qdepth() macro, we'll get an
1085 * exact number - but we'll also disable interrupts
1086 * and create a window of failure where a new IPI gets
1087 * queued after we test the depth but before we re-enable
1088 * interrupts. So long as IXMT never gets set, however,
1089 * we should be OK: If we pick up something and dispatch
1090 * it here, that's great. If we see nothing, but concurrent
1091 * with this operation, another TC sends us an IPI, IXMT
1092 * is clear, and we'll handle it as a real pseudo-interrupt
1093 * and not a pseudo-pseudo interrupt.
1095 if (IPIQ
[cpu
].depth
> 0) {
1097 struct smtc_ipi_q
*q
= &IPIQ
[cpu
];
1098 struct smtc_ipi
*pipi
;
1099 extern void self_ipi(struct smtc_ipi
*);
1101 spin_lock(&q
->lock
);
1102 pipi
= __smtc_ipi_dq(q
);
1103 spin_unlock(&q
->lock
);
1108 smtc_cpu_stats
[cpu
].selfipis
++;
1113 void smtc_ipi_replay(void)
1115 raw_local_irq_disable();
1116 __smtc_ipi_replay();
1119 EXPORT_SYMBOL(smtc_ipi_replay
);
1121 void smtc_idle_loop_hook(void)
1123 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
1132 * printk within DMT-protected regions can deadlock,
1133 * so buffer diagnostic messages for later output.
1136 char id_ho_db_msg
[768]; /* worst-case use should be less than 700 */
1138 if (atomic_read(&idle_hook_initialized
) == 0) { /* fast test */
1139 if (atomic_add_return(1, &idle_hook_initialized
) == 1) {
1141 /* Tedious stuff to just do once */
1142 mvpconf0
= read_c0_mvpconf0();
1143 hook_ntcs
= ((mvpconf0
& MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1;
1144 if (hook_ntcs
> NR_CPUS
)
1145 hook_ntcs
= NR_CPUS
;
1146 for (tc
= 0; tc
< hook_ntcs
; tc
++) {
1148 clock_hang_reported
[tc
] = 0;
1150 for (vpe
= 0; vpe
< 2; vpe
++)
1151 for (im
= 0; im
< 8; im
++)
1152 imstuckcount
[vpe
][im
] = 0;
1153 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs
);
1154 atomic_set(&idle_hook_initialized
, 1000);
1156 /* Someone else is initializing in parallel - let 'em finish */
1157 while (atomic_read(&idle_hook_initialized
) < 1000)
1162 /* Have we stupidly left IXMT set somewhere? */
1163 if (read_c0_tcstatus() & 0x400) {
1164 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1166 printk("Dangling IXMT in cpu_idle()\n");
1169 /* Have we stupidly left an IM bit turned off? */
1170 #define IM_LIMIT 2000
1171 local_irq_save(flags
);
1173 pdb_msg
= &id_ho_db_msg
[0];
1174 im
= read_c0_status();
1175 vpe
= current_cpu_data
.vpe_id
;
1176 for (bit
= 0; bit
< 8; bit
++) {
1178 * In current prototype, I/O interrupts
1179 * are masked for VPE > 0
1181 if (vpemask
[vpe
][bit
]) {
1182 if (!(im
& (0x100 << bit
)))
1183 imstuckcount
[vpe
][bit
]++;
1185 imstuckcount
[vpe
][bit
] = 0;
1186 if (imstuckcount
[vpe
][bit
] > IM_LIMIT
) {
1187 set_c0_status(0x100 << bit
);
1189 imstuckcount
[vpe
][bit
] = 0;
1190 pdb_msg
+= sprintf(pdb_msg
,
1191 "Dangling IM %d fixed for VPE %d\n", bit
,
1198 * Now that we limit outstanding timer IPIs, check for hung TC
1200 for (tc
= 0; tc
< NR_CPUS
; tc
++) {
1201 /* Don't check ourself - we'll dequeue IPIs just below */
1202 if ((tc
!= smp_processor_id()) &&
1203 atomic_read(&ipi_timer_latch
[tc
]) > timerq_limit
) {
1204 if (clock_hang_reported
[tc
] == 0) {
1205 pdb_msg
+= sprintf(pdb_msg
,
1206 "TC %d looks hung with timer latch at %d\n",
1207 tc
, atomic_read(&ipi_timer_latch
[tc
]));
1208 clock_hang_reported
[tc
]++;
1213 local_irq_restore(flags
);
1214 if (pdb_msg
!= &id_ho_db_msg
[0])
1215 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg
);
1216 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1219 * Replay any accumulated deferred IPIs. If "Instant Replay"
1220 * is in use, there should never be any.
1222 #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1224 unsigned long flags
;
1226 local_irq_save(flags
);
1227 __smtc_ipi_replay();
1228 local_irq_restore(flags
);
1230 #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
1233 void smtc_soft_dump(void)
1237 printk("Counter Interrupts taken per CPU (TC)\n");
1238 for (i
=0; i
< NR_CPUS
; i
++) {
1239 printk("%d: %ld\n", i
, smtc_cpu_stats
[i
].timerints
);
1241 printk("Self-IPI invocations:\n");
1242 for (i
=0; i
< NR_CPUS
; i
++) {
1243 printk("%d: %ld\n", i
, smtc_cpu_stats
[i
].selfipis
);
1246 printk("Timer IPI Backlogs:\n");
1247 for (i
=0; i
< NR_CPUS
; i
++) {
1248 printk("%d: %d\n", i
, atomic_read(&ipi_timer_latch
[i
]));
1250 printk("%d Recoveries of \"stolen\" FPU\n",
1251 atomic_read(&smtc_fpu_recoveries
));
1256 * TLB management routines special to SMTC
1259 void smtc_get_new_mmu_context(struct mm_struct
*mm
, unsigned long cpu
)
1261 unsigned long flags
, mtflags
, tcstat
, prevhalt
, asid
;
1265 * It would be nice to be able to use a spinlock here,
1266 * but this is invoked from within TLB flush routines
1267 * that protect themselves with DVPE, so if a lock is
1268 * held by another TC, it'll never be freed.
1270 * DVPE/DMT must not be done with interrupts enabled,
1271 * so even so most callers will already have disabled
1272 * them, let's be really careful...
1275 local_irq_save(flags
);
1276 if (smtc_status
& SMTC_TLB_SHARED
) {
1281 tlb
= cpu_data
[cpu
].vpe_id
;
1283 asid
= asid_cache(cpu
);
1286 if (!((asid
+= ASID_INC
) & ASID_MASK
) ) {
1287 if (cpu_has_vtag_icache
)
1289 /* Traverse all online CPUs (hack requires contigous range) */
1290 for_each_online_cpu(i
) {
1292 * We don't need to worry about our own CPU, nor those of
1293 * CPUs who don't share our TLB.
1295 if ((i
!= smp_processor_id()) &&
1296 ((smtc_status
& SMTC_TLB_SHARED
) ||
1297 (cpu_data
[i
].vpe_id
== cpu_data
[cpu
].vpe_id
))) {
1298 settc(cpu_data
[i
].tc_id
);
1299 prevhalt
= read_tc_c0_tchalt() & TCHALT_H
;
1301 write_tc_c0_tchalt(TCHALT_H
);
1304 tcstat
= read_tc_c0_tcstatus();
1305 smtc_live_asid
[tlb
][(tcstat
& ASID_MASK
)] |= (asiduse
)(0x1 << i
);
1307 write_tc_c0_tchalt(0);
1310 if (!asid
) /* fix version if needed */
1311 asid
= ASID_FIRST_VERSION
;
1312 local_flush_tlb_all(); /* start new asid cycle */
1314 } while (smtc_live_asid
[tlb
][(asid
& ASID_MASK
)]);
1317 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1319 for_each_online_cpu(i
) {
1320 if ((smtc_status
& SMTC_TLB_SHARED
) ||
1321 (cpu_data
[i
].vpe_id
== cpu_data
[cpu
].vpe_id
))
1322 cpu_context(i
, mm
) = asid_cache(i
) = asid
;
1325 if (smtc_status
& SMTC_TLB_SHARED
)
1329 local_irq_restore(flags
);
1333 * Invoked from macros defined in mmu_context.h
1334 * which must already have disabled interrupts
1335 * and done a DVPE or DMT as appropriate.
1338 void smtc_flush_tlb_asid(unsigned long asid
)
1343 entry
= read_c0_wired();
1345 /* Traverse all non-wired entries */
1346 while (entry
< current_cpu_data
.tlbsize
) {
1347 write_c0_index(entry
);
1351 ehi
= read_c0_entryhi();
1352 if ((ehi
& ASID_MASK
) == asid
) {
1354 * Invalidate only entries with specified ASID,
1355 * makiing sure all entries differ.
1357 write_c0_entryhi(CKSEG0
+ (entry
<< (PAGE_SHIFT
+ 1)));
1358 write_c0_entrylo0(0);
1359 write_c0_entrylo1(0);
1361 tlb_write_indexed();
1365 write_c0_index(PARKED_INDEX
);
1370 * Support for single-threading cache flush operations.
1373 static int halt_state_save
[NR_CPUS
];
1376 * To really, really be sure that nothing is being done
1377 * by other TCs, halt them all. This code assumes that
1378 * a DVPE has already been done, so while their Halted
1379 * state is theoretically architecturally unstable, in
1380 * practice, it's not going to change while we're looking
1384 void smtc_cflush_lockdown(void)
1388 for_each_online_cpu(cpu
) {
1389 if (cpu
!= smp_processor_id()) {
1390 settc(cpu_data
[cpu
].tc_id
);
1391 halt_state_save
[cpu
] = read_tc_c0_tchalt();
1392 write_tc_c0_tchalt(TCHALT_H
);
1398 /* It would be cheating to change the cpu_online states during a flush! */
1400 void smtc_cflush_release(void)
1405 * Start with a hazard barrier to ensure
1406 * that all CACHE ops have played through.
1410 for_each_online_cpu(cpu
) {
1411 if (cpu
!= smp_processor_id()) {
1412 settc(cpu_data
[cpu
].tc_id
);
1413 write_tc_c0_tchalt(halt_state_save
[cpu
]);