2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2004 Mips Technologies, Inc
17 * Copyright (C) 2008 Kevin D. Kissell
20 #include <linux/clockchips.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/cpumask.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/module.h>
28 #include <linux/ftrace.h>
29 #include <linux/slab.h>
32 #include <asm/processor.h>
33 #include <asm/atomic.h>
34 #include <asm/system.h>
35 #include <asm/hardirq.h>
36 #include <asm/hazards.h>
38 #include <asm/mmu_context.h>
39 #include <asm/mipsregs.h>
40 #include <asm/cacheflush.h>
42 #include <asm/addrspace.h>
44 #include <asm/smtc_proc.h>
47 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
48 * in do_IRQ. These are passed in setup_irq_smtc() and stored
51 unsigned long irq_hwmask
[NR_IRQS
];
53 #define LOCK_MT_PRA() \
54 local_irq_save(flags); \
57 #define UNLOCK_MT_PRA() \
59 local_irq_restore(flags)
61 #define LOCK_CORE_PRA() \
62 local_irq_save(flags); \
65 #define UNLOCK_CORE_PRA() \
67 local_irq_restore(flags)
70 * Data structures purely associated with SMTC parallelism
75 * Table for tracking ASIDs whose lifetime is prolonged.
78 asiduse smtc_live_asid
[MAX_SMTC_TLBS
][MAX_SMTC_ASIDS
];
81 * Number of InterProcessor Interrupt (IPI) message buffers to allocate
84 #define IPIBUF_PER_CPU 4
86 struct smtc_ipi_q IPIQ
[NR_CPUS
];
87 static struct smtc_ipi_q freeIPIq
;
91 * Number of FPU contexts for each VPE
94 static int smtc_nconf1
[MAX_SMTC_VPES
];
97 /* Forward declarations */
99 void ipi_decode(struct smtc_ipi
*);
100 static void post_direct_ipi(int cpu
, struct smtc_ipi
*pipi
);
101 static void setup_cross_vpe_interrupts(unsigned int nvpe
);
102 void init_smtc_stats(void);
104 /* Global SMTC Status */
106 unsigned int smtc_status
;
108 /* Boot command line configuration overrides */
110 static int vpe0limit
;
111 static int ipibuffers
;
114 unsigned long smtc_asid_mask
= 0xff;
116 static int __init
vpe0tcs(char *str
)
118 get_option(&str
, &vpe0limit
);
123 static int __init
ipibufs(char *str
)
125 get_option(&str
, &ipibuffers
);
129 static int __init
stlb_disable(char *s
)
135 static int __init
asidmask_set(char *str
)
137 get_option(&str
, &asidmask
);
147 smtc_asid_mask
= (unsigned long)asidmask
;
150 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask
);
155 __setup("vpe0tcs=", vpe0tcs
);
156 __setup("ipibufs=", ipibufs
);
157 __setup("nostlb", stlb_disable
);
158 __setup("asidmask=", asidmask_set
);
160 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
162 static int hang_trig
;
164 static int __init
hangtrig_enable(char *s
)
171 __setup("hangtrig", hangtrig_enable
);
173 #define DEFAULT_BLOCKED_IPI_LIMIT 32
175 static int timerq_limit
= DEFAULT_BLOCKED_IPI_LIMIT
;
177 static int __init
tintq(char *str
)
179 get_option(&str
, &timerq_limit
);
183 __setup("tintq=", tintq
);
185 static int imstuckcount
[MAX_SMTC_VPES
][8];
186 /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
187 static int vpemask
[MAX_SMTC_VPES
][8] = {
188 {0, 0, 1, 0, 0, 0, 0, 1},
189 {0, 0, 0, 0, 0, 0, 0, 1}
191 int tcnoprog
[NR_CPUS
];
192 static atomic_t idle_hook_initialized
= ATOMIC_INIT(0);
193 static int clock_hang_reported
[NR_CPUS
];
195 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
198 * Configure shared TLB - VPC configuration bit must be set by caller
201 static void smtc_configure_tlb(void)
204 unsigned long mvpconf0
;
205 unsigned long config1val
;
207 /* Set up ASID preservation table */
208 for (vpes
=0; vpes
<MAX_SMTC_TLBS
; vpes
++) {
209 for(i
= 0; i
< MAX_SMTC_ASIDS
; i
++) {
210 smtc_live_asid
[vpes
][i
] = 0;
213 mvpconf0
= read_c0_mvpconf0();
215 if ((vpes
= ((mvpconf0
& MVPCONF0_PVPE
)
216 >> MVPCONF0_PVPE_SHIFT
) + 1) > 1) {
217 /* If we have multiple VPEs, try to share the TLB */
218 if ((mvpconf0
& MVPCONF0_TLBS
) && !nostlb
) {
220 * If TLB sizing is programmable, shared TLB
221 * size is the total available complement.
222 * Otherwise, we have to take the sum of all
223 * static VPE TLB entries.
225 if ((tlbsiz
= ((mvpconf0
& MVPCONF0_PTLBE
)
226 >> MVPCONF0_PTLBE_SHIFT
)) == 0) {
228 * If there's more than one VPE, there had better
229 * be more than one TC, because we need one to bind
230 * to each VPE in turn to be able to read
231 * its configuration state!
234 /* Stop the TC from doing anything foolish */
235 write_tc_c0_tchalt(TCHALT_H
);
237 /* No need to un-Halt - that happens later anyway */
238 for (i
=0; i
< vpes
; i
++) {
239 write_tc_c0_tcbind(i
);
241 * To be 100% sure we're really getting the right
242 * information, we exit the configuration state
243 * and do an IHB after each rebinding.
246 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC
);
249 * Only count if the MMU Type indicated is TLB
251 if (((read_vpe_c0_config() & MIPS_CONF_MT
) >> 7) == 1) {
252 config1val
= read_vpe_c0_config1();
253 tlbsiz
+= ((config1val
>> 25) & 0x3f) + 1;
256 /* Put core back in configuration state */
258 read_c0_mvpcontrol() | MVPCONTROL_VPC
);
262 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB
);
266 * Setup kernel data structures to use software total,
267 * rather than read the per-VPE Config1 value. The values
268 * for "CPU 0" gets copied to all the other CPUs as part
269 * of their initialization in smtc_cpu_setup().
272 /* MIPS32 limits TLB indices to 64 */
275 cpu_data
[0].tlbsize
= current_cpu_data
.tlbsize
= tlbsiz
;
276 smtc_status
|= SMTC_TLB_SHARED
;
277 local_flush_tlb_all();
279 printk("TLB of %d entry pairs shared by %d VPEs\n",
282 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
289 * Incrementally build the CPU map out of constituent MIPS MT cores,
290 * using the specified available VPEs and TCs. Plaform code needs
291 * to ensure that each MIPS MT core invokes this routine on reset,
294 * This version of the build_cpu_map and prepare_cpus routines assumes
295 * that *all* TCs of a MIPS MT core will be used for Linux, and that
296 * they will be spread across *all* available VPEs (to minimise the
297 * loss of efficiency due to exception service serialization).
298 * An improved version would pick up configuration information and
299 * possibly leave some TCs/VPEs as "slave" processors.
301 * Use c0_MVPConf0 to find out how many TCs are available, setting up
302 * cpu_possible_map and the logical/physical mappings.
305 int __init
smtc_build_cpu_map(int start_cpu_slot
)
310 * The CPU map isn't actually used for anything at this point,
311 * so it's not clear what else we should do apart from set
312 * everything up so that "logical" = "physical".
314 ntcs
= ((read_c0_mvpconf0() & MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1;
315 for (i
=start_cpu_slot
; i
<NR_CPUS
&& i
<ntcs
; i
++) {
316 set_cpu_possible(i
, true);
317 __cpu_number_map
[i
] = i
;
318 __cpu_logical_map
[i
] = i
;
320 #ifdef CONFIG_MIPS_MT_FPAFF
321 /* Initialize map of CPUs with FPUs */
322 cpus_clear(mt_fpu_cpumask
);
325 /* One of those TC's is the one booting, and not a secondary... */
326 printk("%i available secondary CPU TC(s)\n", i
- 1);
332 * Common setup before any secondaries are started
333 * Make sure all CPUs are in a sensible state before we boot any of the
336 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
337 * as possible across the available VPEs.
340 static void smtc_tc_setup(int vpe
, int tc
, int cpu
)
342 static int cp1contexts
[MAX_SMTC_VPES
];
345 * Make a local copy of the available FPU contexts in order
346 * to keep track of TCs that can have one.
350 cp1contexts
[0] = smtc_nconf1
[0] - 1;
351 cp1contexts
[1] = smtc_nconf1
[1];
355 write_tc_c0_tchalt(TCHALT_H
);
357 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
358 & ~(TCSTATUS_TKSU
| TCSTATUS_DA
| TCSTATUS_IXMT
))
361 * TCContext gets an offset from the base of the IPIQ array
362 * to be used in low-level code to detect the presence of
363 * an active IPI queue.
365 write_tc_c0_tccontext((sizeof(struct smtc_ipi_q
) * cpu
) << 16);
367 /* Bind TC to VPE. */
368 write_tc_c0_tcbind(vpe
);
370 /* In general, all TCs should have the same cpu_data indications. */
371 memcpy(&cpu_data
[cpu
], &cpu_data
[0], sizeof(struct cpuinfo_mips
));
373 /* Check to see if there is a FPU context available for this TC. */
374 if (!cp1contexts
[vpe
])
375 cpu_data
[cpu
].options
&= ~MIPS_CPU_FPU
;
379 /* Store the TC and VPE into the cpu_data structure. */
380 cpu_data
[cpu
].vpe_id
= vpe
;
381 cpu_data
[cpu
].tc_id
= tc
;
383 cpu_data
[cpu
].core
= (read_vpe_c0_ebase() >> 1) & 0xff;
387 * Tweak to get Count registers synced as closely as possible. The
388 * value seems good for 34K-class cores.
393 void smtc_prepare_cpus(int cpus
)
395 int i
, vpe
, tc
, ntc
, nvpe
, tcpervpe
[NR_CPUS
], slop
, cpu
;
399 struct smtc_ipi
*pipi
;
401 /* disable interrupts so we can disable MT */
402 local_irq_save(flags
);
403 /* disable MT so we can configure */
407 spin_lock_init(&freeIPIq
.lock
);
410 * We probably don't have as many VPEs as we do SMP "CPUs",
411 * but it's possible - and in any case we'll never use more!
413 for (i
=0; i
<NR_CPUS
; i
++) {
414 IPIQ
[i
].head
= IPIQ
[i
].tail
= NULL
;
415 spin_lock_init(&IPIQ
[i
].lock
);
417 IPIQ
[i
].resched_flag
= 0; /* No reschedules queued initially */
420 /* cpu_data index starts at zero */
422 cpu_data
[cpu
].vpe_id
= 0;
423 cpu_data
[cpu
].tc_id
= 0;
424 cpu_data
[cpu
].core
= (read_c0_ebase() >> 1) & 0xff;
427 /* Report on boot-time options */
428 mips_mt_set_cpuoptions();
430 printk("Limit of %d VPEs set\n", vpelimit
);
432 printk("Limit of %d TCs set\n", tclimit
);
434 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
437 printk("ASID mask value override to 0x%x\n", asidmask
);
440 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
442 printk("Logic Analyser Trigger on suspected TC hang\n");
443 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
445 /* Put MVPE's into 'configuration state' */
446 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC
);
448 val
= read_c0_mvpconf0();
449 nvpe
= ((val
& MVPCONF0_PVPE
) >> MVPCONF0_PVPE_SHIFT
) + 1;
450 if (vpelimit
> 0 && nvpe
> vpelimit
)
452 ntc
= ((val
& MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1;
455 if (tclimit
> 0 && ntc
> tclimit
)
458 for (i
= 0; i
< nvpe
; i
++) {
459 tcpervpe
[i
] = ntc
/ nvpe
;
461 if((slop
- i
) > 0) tcpervpe
[i
]++;
464 /* Handle command line override for VPE0 */
465 if (vpe0limit
> ntc
) vpe0limit
= ntc
;
468 if (vpe0limit
< tcpervpe
[0]) {
469 /* Reducing TC count - distribute to others */
470 slop
= tcpervpe
[0] - vpe0limit
;
471 slopslop
= slop
% (nvpe
- 1);
472 tcpervpe
[0] = vpe0limit
;
473 for (i
= 1; i
< nvpe
; i
++) {
474 tcpervpe
[i
] += slop
/ (nvpe
- 1);
475 if(slopslop
&& ((slopslop
- (i
- 1) > 0)))
478 } else if (vpe0limit
> tcpervpe
[0]) {
479 /* Increasing TC count - steal from others */
480 slop
= vpe0limit
- tcpervpe
[0];
481 slopslop
= slop
% (nvpe
- 1);
482 tcpervpe
[0] = vpe0limit
;
483 for (i
= 1; i
< nvpe
; i
++) {
484 tcpervpe
[i
] -= slop
/ (nvpe
- 1);
485 if(slopslop
&& ((slopslop
- (i
- 1) > 0)))
491 /* Set up shared TLB */
492 smtc_configure_tlb();
494 for (tc
= 0, vpe
= 0 ; (vpe
< nvpe
) && (tc
< ntc
) ; vpe
++) {
495 /* Get number of CP1 contexts for each VPE. */
499 * Do not call settc() for TC0 or the FPU context
500 * value will be incorrect. Besides, we know that
503 smtc_nconf1
[0] = ((read_vpe_c0_vpeconf1() &
504 VPECONF1_NCP1
) >> VPECONF1_NCP1_SHIFT
);
508 smtc_nconf1
[1] = ((read_vpe_c0_vpeconf1() &
509 VPECONF1_NCP1
) >> VPECONF1_NCP1_SHIFT
);
513 if (tcpervpe
[vpe
] == 0)
517 printk("VPE %d: TC", vpe
);
518 for (i
= 0; i
< tcpervpe
[vpe
]; i
++) {
520 * TC 0 is bound to VPE 0 at reset,
521 * and is presumably executing this
522 * code. Leave it alone!
525 smtc_tc_setup(vpe
, tc
, cpu
);
528 * Set MVP bit (possibly again). Do it
529 * here to catch CPUs that have no TCs
530 * bound to the VPE at reset. In that
531 * case, a TC must be bound to the VPE
532 * before we can set VPEControl[MVP]
534 write_vpe_c0_vpeconf0(
535 read_vpe_c0_vpeconf0() |
545 * Allow this VPE to control others.
547 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() |
551 * Clear any stale software interrupts from VPE's Cause
553 write_vpe_c0_cause(0);
556 * Clear ERL/EXL of VPEs other than 0
557 * and set restricted interrupt enable/mask.
559 write_vpe_c0_status((read_vpe_c0_status()
560 & ~(ST0_BEV
| ST0_ERL
| ST0_EXL
| ST0_IM
))
561 | (STATUSF_IP0
| STATUSF_IP1
| STATUSF_IP7
564 * set config to be the same as vpe0,
565 * particularly kseg0 coherency alg
567 write_vpe_c0_config(read_c0_config());
568 /* Clear any pending timer interrupt */
569 write_vpe_c0_compare(0);
570 /* Propagate Config7 */
571 write_vpe_c0_config7(read_c0_config7());
572 write_vpe_c0_count(read_c0_count() + CP0_SKEW
);
575 /* enable multi-threading within VPE */
576 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE
);
578 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA
);
582 * Pull any physically present but unused TCs out of circulation.
584 while (tc
< (((val
& MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1)) {
585 set_cpu_possible(tc
, false);
586 set_cpu_present(tc
, false);
590 /* release config state */
591 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC
);
595 /* Set up coprocessor affinity CPU mask(s) */
597 #ifdef CONFIG_MIPS_MT_FPAFF
598 for (tc
= 0; tc
< ntc
; tc
++) {
599 if (cpu_data
[tc
].options
& MIPS_CPU_FPU
)
600 cpu_set(tc
, mt_fpu_cpumask
);
604 /* set up ipi interrupts... */
606 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
608 setup_cross_vpe_interrupts(nvpe
);
610 /* Set up queue of free IPI "messages". */
611 nipi
= NR_CPUS
* IPIBUF_PER_CPU
;
615 pipi
= kmalloc(nipi
*sizeof(struct smtc_ipi
), GFP_KERNEL
);
617 panic("kmalloc of IPI message buffers failed\n");
619 printk("IPI buffer pool of %d buffers\n", nipi
);
620 for (i
= 0; i
< nipi
; i
++) {
621 smtc_ipi_nq(&freeIPIq
, pipi
);
625 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
628 local_irq_restore(flags
);
629 /* Initialize SMTC /proc statistics/diagnostics */
635 * Setup the PC, SP, and GP of a secondary processor and start it
637 * smp_bootstrap is the place to resume from
638 * __KSTK_TOS(idle) is apparently the stack pointer
639 * (unsigned long)idle->thread_info the gp
642 void __cpuinit
smtc_boot_secondary(int cpu
, struct task_struct
*idle
)
644 extern u32 kernelsp
[NR_CPUS
];
649 if (cpu_data
[cpu
].vpe_id
!= cpu_data
[smp_processor_id()].vpe_id
) {
652 settc(cpu_data
[cpu
].tc_id
);
655 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap
);
658 kernelsp
[cpu
] = __KSTK_TOS(idle
);
659 write_tc_gpr_sp(__KSTK_TOS(idle
));
662 write_tc_gpr_gp((unsigned long)task_thread_info(idle
));
664 smtc_status
|= SMTC_MTC_ACTIVE
;
665 write_tc_c0_tchalt(0);
666 if (cpu_data
[cpu
].vpe_id
!= cpu_data
[smp_processor_id()].vpe_id
) {
672 void smtc_init_secondary(void)
677 void smtc_smp_finish(void)
679 int cpu
= smp_processor_id();
682 * Lowest-numbered CPU per VPE starts a clock tick.
683 * Like per_cpu_trap_init() hack, this assumes that
684 * SMTC init code assigns TCs consdecutively and
685 * in ascending order across available VPEs.
687 if (cpu
> 0 && (cpu_data
[cpu
].vpe_id
!= cpu_data
[cpu
- 1].vpe_id
))
688 write_c0_compare(read_c0_count() + mips_hpt_frequency
/HZ
);
690 printk("TC %d going on-line as CPU %d\n",
691 cpu_data
[smp_processor_id()].tc_id
, smp_processor_id());
694 void smtc_cpus_done(void)
699 * Support for SMTC-optimized driver IRQ registration
703 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
704 * in do_IRQ. These are passed in setup_irq_smtc() and stored
708 int setup_irq_smtc(unsigned int irq
, struct irqaction
* new,
709 unsigned long hwmask
)
711 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
712 unsigned int vpe
= current_cpu_data
.vpe_id
;
714 vpemask
[vpe
][irq
- MIPS_CPU_IRQ_BASE
] = 1;
716 irq_hwmask
[irq
] = hwmask
;
718 return setup_irq(irq
, new);
721 #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
723 * Support for IRQ affinity to TCs
726 void smtc_set_irq_affinity(unsigned int irq
, cpumask_t affinity
)
729 * If a "fast path" cache of quickly decodable affinity state
730 * is maintained, this is where it gets done, on a call up
731 * from the platform affinity code.
735 void smtc_forward_irq(unsigned int irq
)
740 * OK wise guy, now figure out how to get the IRQ
741 * to be serviced on an authorized "CPU".
743 * Ideally, to handle the situation where an IRQ has multiple
744 * eligible CPUS, we would maintain state per IRQ that would
745 * allow a fair distribution of service requests. Since the
746 * expected use model is any-or-only-one, for simplicity
747 * and efficiency, we just pick the easiest one to find.
750 target
= cpumask_first(irq_desc
[irq
].affinity
);
753 * We depend on the platform code to have correctly processed
754 * IRQ affinity change requests to ensure that the IRQ affinity
755 * mask has been purged of bits corresponding to nonexistent and
756 * offline "CPUs", and to TCs bound to VPEs other than the VPE
757 * connected to the physical interrupt input for the interrupt
758 * in question. Otherwise we have a nasty problem with interrupt
759 * mask management. This is best handled in non-performance-critical
760 * platform IRQ affinity setting code, to minimize interrupt-time
764 /* If no one is eligible, service locally */
765 if (target
>= NR_CPUS
) {
766 do_IRQ_no_affinity(irq
);
770 smtc_send_ipi(target
, IRQ_AFFINITY_IPI
, irq
);
773 #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
776 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
777 * Within a VPE one TC can interrupt another by different approaches.
778 * The easiest to get right would probably be to make all TCs except
779 * the target IXMT and set a software interrupt, but an IXMT-based
780 * scheme requires that a handler must run before a new IPI could
781 * be sent, which would break the "broadcast" loops in MIPS MT.
782 * A more gonzo approach within a VPE is to halt the TC, extract
783 * its Restart, Status, and a couple of GPRs, and program the Restart
784 * address to emulate an interrupt.
786 * Within a VPE, one can be confident that the target TC isn't in
787 * a critical EXL state when halted, since the write to the Halt
788 * register could not have issued on the writing thread if the
789 * halting thread had EXL set. So k0 and k1 of the target TC
790 * can be used by the injection code. Across VPEs, one can't
791 * be certain that the target TC isn't in a critical exception
792 * state. So we try a two-step process of sending a software
793 * interrupt to the target VPE, which either handles the event
794 * itself (if it was the target) or injects the event within
798 static void smtc_ipi_qdump(void)
801 struct smtc_ipi
*temp
;
803 for (i
= 0; i
< NR_CPUS
;i
++) {
804 pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
805 i
, (unsigned)IPIQ
[i
].head
, (unsigned)IPIQ
[i
].tail
,
809 while (temp
!= IPIQ
[i
].tail
) {
810 pr_debug("%d %d %d: ", temp
->type
, temp
->dest
,
812 #ifdef SMTC_IPI_DEBUG
813 pr_debug("%u %lu\n", temp
->sender
, temp
->stamp
);
823 * The standard atomic.h primitives don't quite do what we want
824 * here: We need an atomic add-and-return-previous-value (which
825 * could be done with atomic_add_return and a decrement) and an
826 * atomic set/zero-and-return-previous-value (which can't really
827 * be done with the atomic.h primitives). And since this is
828 * MIPS MT, we can assume that we have LL/SC.
830 static inline int atomic_postincrement(atomic_t
*v
)
832 unsigned long result
;
836 __asm__
__volatile__(
842 : "=&r" (result
), "=&r" (temp
), "=m" (v
->counter
)
849 void smtc_send_ipi(int cpu
, int type
, unsigned int action
)
852 struct smtc_ipi
*pipi
;
855 unsigned long tcrestart
;
856 extern void r4k_wait_irqoff(void), __pastwait(void);
857 int set_resched_flag
= (type
== LINUX_SMP_IPI
&&
858 action
== SMP_RESCHEDULE_YOURSELF
);
860 if (cpu
== smp_processor_id()) {
861 printk("Cannot Send IPI to self!\n");
864 if (set_resched_flag
&& IPIQ
[cpu
].resched_flag
!= 0)
865 return; /* There is a reschedule queued already */
867 /* Set up a descriptor, to be delivered either promptly or queued */
868 pipi
= smtc_ipi_dq(&freeIPIq
);
871 mips_mt_regdump(dvpe());
872 panic("IPI Msg. Buffers Depleted\n");
875 pipi
->arg
= (void *)action
;
877 if (cpu_data
[cpu
].vpe_id
!= cpu_data
[smp_processor_id()].vpe_id
) {
878 /* If not on same VPE, enqueue and send cross-VPE interrupt */
879 IPIQ
[cpu
].resched_flag
|= set_resched_flag
;
880 smtc_ipi_nq(&IPIQ
[cpu
], pipi
);
882 settc(cpu_data
[cpu
].tc_id
);
883 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1
);
887 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
888 * since ASID shootdown on the other VPE may
889 * collide with this operation.
892 settc(cpu_data
[cpu
].tc_id
);
893 /* Halt the targeted TC */
894 write_tc_c0_tchalt(TCHALT_H
);
898 * Inspect TCStatus - if IXMT is set, we have to queue
899 * a message. Otherwise, we set up the "interrupt"
902 tcstatus
= read_tc_c0_tcstatus();
904 if ((tcstatus
& TCSTATUS_IXMT
) != 0) {
906 * If we're in the the irq-off version of the wait
907 * loop, we need to force exit from the wait and
908 * do a direct post of the IPI.
910 if (cpu_wait
== r4k_wait_irqoff
) {
911 tcrestart
= read_tc_c0_tcrestart();
912 if (tcrestart
>= (unsigned long)r4k_wait_irqoff
913 && tcrestart
< (unsigned long)__pastwait
) {
914 write_tc_c0_tcrestart(__pastwait
);
915 tcstatus
&= ~TCSTATUS_IXMT
;
916 write_tc_c0_tcstatus(tcstatus
);
921 * Otherwise we queue the message for the target TC
922 * to pick up when he does a local_irq_restore()
924 write_tc_c0_tchalt(0);
926 IPIQ
[cpu
].resched_flag
|= set_resched_flag
;
927 smtc_ipi_nq(&IPIQ
[cpu
], pipi
);
930 post_direct_ipi(cpu
, pipi
);
931 write_tc_c0_tchalt(0);
938 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
940 static void post_direct_ipi(int cpu
, struct smtc_ipi
*pipi
)
942 struct pt_regs
*kstack
;
943 unsigned long tcstatus
;
944 unsigned long tcrestart
;
945 extern u32 kernelsp
[NR_CPUS
];
946 extern void __smtc_ipi_vector(void);
947 //printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
949 /* Extract Status, EPC from halted TC */
950 tcstatus
= read_tc_c0_tcstatus();
951 tcrestart
= read_tc_c0_tcrestart();
952 /* If TCRestart indicates a WAIT instruction, advance the PC */
953 if ((tcrestart
& 0x80000000)
954 && ((*(unsigned int *)tcrestart
& 0xfe00003f) == 0x42000020)) {
958 * Save on TC's future kernel stack
960 * CU bit of Status is indicator that TC was
961 * already running on a kernel stack...
963 if (tcstatus
& ST0_CU0
) {
964 /* Note that this "- 1" is pointer arithmetic */
965 kstack
= ((struct pt_regs
*)read_tc_gpr_sp()) - 1;
967 kstack
= ((struct pt_regs
*)kernelsp
[cpu
]) - 1;
970 kstack
->cp0_epc
= (long)tcrestart
;
972 kstack
->cp0_tcstatus
= tcstatus
;
973 /* Pass token of operation to be performed kernel stack pad area */
974 kstack
->pad0
[4] = (unsigned long)pipi
;
975 /* Pass address of function to be called likewise */
976 kstack
->pad0
[5] = (unsigned long)&ipi_decode
;
977 /* Set interrupt exempt and kernel mode */
978 tcstatus
|= TCSTATUS_IXMT
;
979 tcstatus
&= ~TCSTATUS_TKSU
;
980 write_tc_c0_tcstatus(tcstatus
);
982 /* Set TC Restart address to be SMTC IPI vector */
983 write_tc_c0_tcrestart(__smtc_ipi_vector
);
986 static void ipi_resched_interrupt(void)
988 /* Return from interrupt should be enough to cause scheduler check */
991 static void ipi_call_interrupt(void)
993 /* Invoke generic function invocation code in smp.c */
994 smp_call_function_interrupt();
997 DECLARE_PER_CPU(struct clock_event_device
, mips_clockevent_device
);
999 static void __irq_entry
smtc_clock_tick_interrupt(void)
1001 unsigned int cpu
= smp_processor_id();
1002 struct clock_event_device
*cd
;
1003 int irq
= MIPS_CPU_IRQ_BASE
+ 1;
1006 kstat_incr_irqs_this_cpu(irq
, irq_to_desc(irq
));
1007 cd
= &per_cpu(mips_clockevent_device
, cpu
);
1008 cd
->event_handler(cd
);
1012 void ipi_decode(struct smtc_ipi
*pipi
)
1014 void *arg_copy
= pipi
->arg
;
1015 int type_copy
= pipi
->type
;
1017 smtc_ipi_nq(&freeIPIq
, pipi
);
1019 switch (type_copy
) {
1020 case SMTC_CLOCK_TICK
:
1021 smtc_clock_tick_interrupt();
1025 switch ((int)arg_copy
) {
1026 case SMP_RESCHEDULE_YOURSELF
:
1027 ipi_resched_interrupt();
1029 case SMP_CALL_FUNCTION
:
1030 ipi_call_interrupt();
1033 printk("Impossible SMTC IPI Argument %p\n", arg_copy
);
1037 #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
1038 case IRQ_AFFINITY_IPI
:
1040 * Accept a "forwarded" interrupt that was initially
1041 * taken by a TC who doesn't have affinity for the IRQ.
1043 do_IRQ_no_affinity((int)arg_copy
);
1045 #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
1047 printk("Impossible SMTC IPI Type 0x%x\n", type_copy
);
1053 * Similar to smtc_ipi_replay(), but invoked from context restore,
1054 * so it reuses the current exception frame rather than set up a
1055 * new one with self_ipi.
1058 void deferred_smtc_ipi(void)
1060 int cpu
= smp_processor_id();
1063 * Test is not atomic, but much faster than a dequeue,
1064 * and the vast majority of invocations will have a null queue.
1065 * If irq_disabled when this was called, then any IPIs queued
1066 * after we test last will be taken on the next irq_enable/restore.
1067 * If interrupts were enabled, then any IPIs added after the
1068 * last test will be taken directly.
1071 while (IPIQ
[cpu
].head
!= NULL
) {
1072 struct smtc_ipi_q
*q
= &IPIQ
[cpu
];
1073 struct smtc_ipi
*pipi
;
1074 unsigned long flags
;
1077 * It may be possible we'll come in with interrupts
1080 local_irq_save(flags
);
1081 spin_lock(&q
->lock
);
1082 pipi
= __smtc_ipi_dq(q
);
1083 spin_unlock(&q
->lock
);
1085 if (pipi
->type
== LINUX_SMP_IPI
&&
1086 (int)pipi
->arg
== SMP_RESCHEDULE_YOURSELF
)
1087 IPIQ
[cpu
].resched_flag
= 0;
1091 * The use of the __raw_local restore isn't
1092 * as obviously necessary here as in smtc_ipi_replay(),
1093 * but it's more efficient, given that we're already
1094 * running down the IPI queue.
1096 __raw_local_irq_restore(flags
);
1101 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
1102 * set via cross-VPE MTTR manipulation of the Cause register. It would be
1103 * in some regards preferable to have external logic for "doorbell" hardware
1107 static int cpu_ipi_irq
= MIPS_CPU_IRQ_BASE
+ MIPS_CPU_IPI_IRQ
;
1109 static irqreturn_t
ipi_interrupt(int irq
, void *dev_idm
)
1111 int my_vpe
= cpu_data
[smp_processor_id()].vpe_id
;
1112 int my_tc
= cpu_data
[smp_processor_id()].tc_id
;
1114 struct smtc_ipi
*pipi
;
1115 unsigned long tcstatus
;
1117 unsigned long flags
;
1118 unsigned int mtflags
;
1119 unsigned int vpflags
;
1122 * So long as cross-VPE interrupts are done via
1123 * MFTR/MTTR read-modify-writes of Cause, we need
1124 * to stop other VPEs whenever the local VPE does
1127 local_irq_save(flags
);
1129 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ
);
1130 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ
);
1131 irq_enable_hazard();
1133 local_irq_restore(flags
);
1136 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
1137 * queued for TCs on this VPE other than the current one.
1138 * Return-from-interrupt should cause us to drain the queue
1139 * for the current TC, so we ought not to have to do it explicitly here.
1142 for_each_online_cpu(cpu
) {
1143 if (cpu_data
[cpu
].vpe_id
!= my_vpe
)
1146 pipi
= smtc_ipi_dq(&IPIQ
[cpu
]);
1148 if (cpu_data
[cpu
].tc_id
!= my_tc
) {
1151 settc(cpu_data
[cpu
].tc_id
);
1152 write_tc_c0_tchalt(TCHALT_H
);
1154 tcstatus
= read_tc_c0_tcstatus();
1155 if ((tcstatus
& TCSTATUS_IXMT
) == 0) {
1156 post_direct_ipi(cpu
, pipi
);
1159 write_tc_c0_tchalt(0);
1162 smtc_ipi_req(&IPIQ
[cpu
], pipi
);
1166 * ipi_decode() should be called
1167 * with interrupts off
1169 local_irq_save(flags
);
1170 if (pipi
->type
== LINUX_SMP_IPI
&&
1171 (int)pipi
->arg
== SMP_RESCHEDULE_YOURSELF
)
1172 IPIQ
[cpu
].resched_flag
= 0;
1174 local_irq_restore(flags
);
1182 static void ipi_irq_dispatch(void)
1184 do_IRQ(cpu_ipi_irq
);
1187 static struct irqaction irq_ipi
= {
1188 .handler
= ipi_interrupt
,
1189 .flags
= IRQF_DISABLED
| IRQF_PERCPU
,
1193 static void setup_cross_vpe_interrupts(unsigned int nvpe
)
1199 panic("SMTC Kernel requires Vectored Interrupt support");
1201 set_vi_handler(MIPS_CPU_IPI_IRQ
, ipi_irq_dispatch
);
1203 setup_irq_smtc(cpu_ipi_irq
, &irq_ipi
, (0x100 << MIPS_CPU_IPI_IRQ
));
1205 set_irq_handler(cpu_ipi_irq
, handle_percpu_irq
);
1209 * SMTC-specific hacks invoked from elsewhere in the kernel.
1213 * smtc_ipi_replay is called from raw_local_irq_restore
1216 void smtc_ipi_replay(void)
1218 unsigned int cpu
= smp_processor_id();
1221 * To the extent that we've ever turned interrupts off,
1222 * we may have accumulated deferred IPIs. This is subtle.
1223 * we should be OK: If we pick up something and dispatch
1224 * it here, that's great. If we see nothing, but concurrent
1225 * with this operation, another TC sends us an IPI, IXMT
1226 * is clear, and we'll handle it as a real pseudo-interrupt
1227 * and not a pseudo-pseudo interrupt. The important thing
1228 * is to do the last check for queued message *after* the
1229 * re-enabling of interrupts.
1231 while (IPIQ
[cpu
].head
!= NULL
) {
1232 struct smtc_ipi_q
*q
= &IPIQ
[cpu
];
1233 struct smtc_ipi
*pipi
;
1234 unsigned long flags
;
1237 * It's just possible we'll come in with interrupts
1240 local_irq_save(flags
);
1242 spin_lock(&q
->lock
);
1243 pipi
= __smtc_ipi_dq(q
);
1244 spin_unlock(&q
->lock
);
1246 ** But use a raw restore here to avoid recursion.
1248 __raw_local_irq_restore(flags
);
1252 smtc_cpu_stats
[cpu
].selfipis
++;
1257 EXPORT_SYMBOL(smtc_ipi_replay
);
1259 void smtc_idle_loop_hook(void)
1261 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
1270 * printk within DMT-protected regions can deadlock,
1271 * so buffer diagnostic messages for later output.
1274 char id_ho_db_msg
[768]; /* worst-case use should be less than 700 */
1276 if (atomic_read(&idle_hook_initialized
) == 0) { /* fast test */
1277 if (atomic_add_return(1, &idle_hook_initialized
) == 1) {
1279 /* Tedious stuff to just do once */
1280 mvpconf0
= read_c0_mvpconf0();
1281 hook_ntcs
= ((mvpconf0
& MVPCONF0_PTC
) >> MVPCONF0_PTC_SHIFT
) + 1;
1282 if (hook_ntcs
> NR_CPUS
)
1283 hook_ntcs
= NR_CPUS
;
1284 for (tc
= 0; tc
< hook_ntcs
; tc
++) {
1286 clock_hang_reported
[tc
] = 0;
1288 for (vpe
= 0; vpe
< 2; vpe
++)
1289 for (im
= 0; im
< 8; im
++)
1290 imstuckcount
[vpe
][im
] = 0;
1291 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs
);
1292 atomic_set(&idle_hook_initialized
, 1000);
1294 /* Someone else is initializing in parallel - let 'em finish */
1295 while (atomic_read(&idle_hook_initialized
) < 1000)
1300 /* Have we stupidly left IXMT set somewhere? */
1301 if (read_c0_tcstatus() & 0x400) {
1302 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1304 printk("Dangling IXMT in cpu_idle()\n");
1307 /* Have we stupidly left an IM bit turned off? */
1308 #define IM_LIMIT 2000
1309 local_irq_save(flags
);
1311 pdb_msg
= &id_ho_db_msg
[0];
1312 im
= read_c0_status();
1313 vpe
= current_cpu_data
.vpe_id
;
1314 for (bit
= 0; bit
< 8; bit
++) {
1316 * In current prototype, I/O interrupts
1317 * are masked for VPE > 0
1319 if (vpemask
[vpe
][bit
]) {
1320 if (!(im
& (0x100 << bit
)))
1321 imstuckcount
[vpe
][bit
]++;
1323 imstuckcount
[vpe
][bit
] = 0;
1324 if (imstuckcount
[vpe
][bit
] > IM_LIMIT
) {
1325 set_c0_status(0x100 << bit
);
1327 imstuckcount
[vpe
][bit
] = 0;
1328 pdb_msg
+= sprintf(pdb_msg
,
1329 "Dangling IM %d fixed for VPE %d\n", bit
,
1336 local_irq_restore(flags
);
1337 if (pdb_msg
!= &id_ho_db_msg
[0])
1338 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg
);
1339 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1344 void smtc_soft_dump(void)
1348 printk("Counter Interrupts taken per CPU (TC)\n");
1349 for (i
=0; i
< NR_CPUS
; i
++) {
1350 printk("%d: %ld\n", i
, smtc_cpu_stats
[i
].timerints
);
1352 printk("Self-IPI invocations:\n");
1353 for (i
=0; i
< NR_CPUS
; i
++) {
1354 printk("%d: %ld\n", i
, smtc_cpu_stats
[i
].selfipis
);
1357 printk("%d Recoveries of \"stolen\" FPU\n",
1358 atomic_read(&smtc_fpu_recoveries
));
1363 * TLB management routines special to SMTC
1366 void smtc_get_new_mmu_context(struct mm_struct
*mm
, unsigned long cpu
)
1368 unsigned long flags
, mtflags
, tcstat
, prevhalt
, asid
;
1372 * It would be nice to be able to use a spinlock here,
1373 * but this is invoked from within TLB flush routines
1374 * that protect themselves with DVPE, so if a lock is
1375 * held by another TC, it'll never be freed.
1377 * DVPE/DMT must not be done with interrupts enabled,
1378 * so even so most callers will already have disabled
1379 * them, let's be really careful...
1382 local_irq_save(flags
);
1383 if (smtc_status
& SMTC_TLB_SHARED
) {
1388 tlb
= cpu_data
[cpu
].vpe_id
;
1390 asid
= asid_cache(cpu
);
1393 if (!((asid
+= ASID_INC
) & ASID_MASK
) ) {
1394 if (cpu_has_vtag_icache
)
1396 /* Traverse all online CPUs (hack requires contiguous range) */
1397 for_each_online_cpu(i
) {
1399 * We don't need to worry about our own CPU, nor those of
1400 * CPUs who don't share our TLB.
1402 if ((i
!= smp_processor_id()) &&
1403 ((smtc_status
& SMTC_TLB_SHARED
) ||
1404 (cpu_data
[i
].vpe_id
== cpu_data
[cpu
].vpe_id
))) {
1405 settc(cpu_data
[i
].tc_id
);
1406 prevhalt
= read_tc_c0_tchalt() & TCHALT_H
;
1408 write_tc_c0_tchalt(TCHALT_H
);
1411 tcstat
= read_tc_c0_tcstatus();
1412 smtc_live_asid
[tlb
][(tcstat
& ASID_MASK
)] |= (asiduse
)(0x1 << i
);
1414 write_tc_c0_tchalt(0);
1417 if (!asid
) /* fix version if needed */
1418 asid
= ASID_FIRST_VERSION
;
1419 local_flush_tlb_all(); /* start new asid cycle */
1421 } while (smtc_live_asid
[tlb
][(asid
& ASID_MASK
)]);
1424 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1426 for_each_online_cpu(i
) {
1427 if ((smtc_status
& SMTC_TLB_SHARED
) ||
1428 (cpu_data
[i
].vpe_id
== cpu_data
[cpu
].vpe_id
))
1429 cpu_context(i
, mm
) = asid_cache(i
) = asid
;
1432 if (smtc_status
& SMTC_TLB_SHARED
)
1436 local_irq_restore(flags
);
1440 * Invoked from macros defined in mmu_context.h
1441 * which must already have disabled interrupts
1442 * and done a DVPE or DMT as appropriate.
1445 void smtc_flush_tlb_asid(unsigned long asid
)
1450 entry
= read_c0_wired();
1452 /* Traverse all non-wired entries */
1453 while (entry
< current_cpu_data
.tlbsize
) {
1454 write_c0_index(entry
);
1458 ehi
= read_c0_entryhi();
1459 if ((ehi
& ASID_MASK
) == asid
) {
1461 * Invalidate only entries with specified ASID,
1462 * makiing sure all entries differ.
1464 write_c0_entryhi(CKSEG0
+ (entry
<< (PAGE_SHIFT
+ 1)));
1465 write_c0_entrylo0(0);
1466 write_c0_entrylo1(0);
1468 tlb_write_indexed();
1472 write_c0_index(PARKED_INDEX
);
1477 * Support for single-threading cache flush operations.
1480 static int halt_state_save
[NR_CPUS
];
1483 * To really, really be sure that nothing is being done
1484 * by other TCs, halt them all. This code assumes that
1485 * a DVPE has already been done, so while their Halted
1486 * state is theoretically architecturally unstable, in
1487 * practice, it's not going to change while we're looking
1491 void smtc_cflush_lockdown(void)
1495 for_each_online_cpu(cpu
) {
1496 if (cpu
!= smp_processor_id()) {
1497 settc(cpu_data
[cpu
].tc_id
);
1498 halt_state_save
[cpu
] = read_tc_c0_tchalt();
1499 write_tc_c0_tchalt(TCHALT_H
);
1505 /* It would be cheating to change the cpu_online states during a flush! */
1507 void smtc_cflush_release(void)
1512 * Start with a hazard barrier to ensure
1513 * that all CACHE ops have played through.
1517 for_each_online_cpu(cpu
) {
1518 if (cpu
!= smp_processor_id()) {
1519 settc(cpu_data
[cpu
].tc_id
);
1520 write_tc_c0_tchalt(halt_state_save
[cpu
]);