2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * TILE SMP support routines.
17 #include <linux/smp.h>
18 #include <linux/interrupt.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
22 #include <asm/cacheflush.h>
24 HV_Topology smp_topology __write_once
;
25 EXPORT_SYMBOL(smp_topology
);
28 static unsigned long __iomem
*ipi_mappings
[NR_CPUS
];
33 * Top-level send_IPI*() functions to send messages to other cpus.
36 /* Set by smp_send_stop() to avoid recursive panics. */
37 static int stopping_cpus
;
39 void send_IPI_single(int cpu
, int tag
)
41 HV_Recipient recip
= {
44 .state
= HV_TO_BE_SENT
46 int rc
= hv_send_message(&recip
, 1, (HV_VirtAddr
)&tag
, sizeof(tag
));
50 void send_IPI_many(const struct cpumask
*mask
, int tag
)
52 HV_Recipient recip
[NR_CPUS
];
55 int my_cpu
= smp_processor_id();
56 for_each_cpu(cpu
, mask
) {
58 BUG_ON(cpu
== my_cpu
);
60 r
->y
= cpu
/ smp_width
;
61 r
->x
= cpu
% smp_width
;
62 r
->state
= HV_TO_BE_SENT
;
65 while (sent
< nrecip
) {
66 int rc
= hv_send_message(recip
, nrecip
,
67 (HV_VirtAddr
)&tag
, sizeof(tag
));
69 if (!stopping_cpus
) /* avoid recursive panic */
70 panic("hv_send_message returned %d", rc
);
77 void send_IPI_allbutself(int tag
)
80 cpumask_copy(&mask
, cpu_online_mask
);
81 cpumask_clear_cpu(smp_processor_id(), &mask
);
82 send_IPI_many(&mask
, tag
);
87 * Provide smp_call_function_mask, but also run function locally
88 * if specified in the mask.
90 void on_each_cpu_mask(const struct cpumask
*mask
, void (*func
)(void *),
91 void *info
, bool wait
)
94 smp_call_function_many(mask
, func
, info
, wait
);
95 if (cpumask_test_cpu(cpu
, mask
)) {
105 * Functions related to starting/stopping cpus.
108 /* Handler to start the current cpu. */
109 static void smp_start_cpu_interrupt(void)
111 get_irq_regs()->pc
= start_cpu_function_addr
;
114 /* Handler to stop the current cpu. */
115 static void smp_stop_cpu_interrupt(void)
117 set_cpu_online(smp_processor_id(), 0);
118 arch_local_irq_disable_all();
123 /* This function calls the 'stop' function on all other CPUs in the system. */
124 void smp_send_stop(void)
127 send_IPI_allbutself(MSG_TAG_STOP_CPU
);
132 * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
134 void evaluate_message(int tag
)
137 case MSG_TAG_START_CPU
: /* Start up a cpu */
138 smp_start_cpu_interrupt();
141 case MSG_TAG_STOP_CPU
: /* Sent to shut down slave CPU's */
142 smp_stop_cpu_interrupt();
145 case MSG_TAG_CALL_FUNCTION_MANY
: /* Call function on cpumask */
146 generic_smp_call_function_interrupt();
149 case MSG_TAG_CALL_FUNCTION_SINGLE
: /* Call function on one other CPU */
150 generic_smp_call_function_single_interrupt();
154 panic("Unknown IPI message tag %d", tag
);
161 * flush_icache_range() code uses smp_call_function().
169 static void ipi_flush_icache_range(void *info
)
171 struct ipi_flush
*flush
= (struct ipi_flush
*) info
;
172 __flush_icache_range(flush
->start
, flush
->end
);
175 void flush_icache_range(unsigned long start
, unsigned long end
)
177 struct ipi_flush flush
= { start
, end
};
179 on_each_cpu(ipi_flush_icache_range
, &flush
, 1);
184 /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
185 static irqreturn_t
handle_reschedule_ipi(int irq
, void *token
)
188 * Nothing to do here; when we return from interrupt, the
189 * rescheduling will occur there. But do bump the interrupt
190 * profiler count in the meantime.
192 __get_cpu_var(irq_stat
).irq_resched_count
++;
197 static struct irqaction resched_action
= {
198 .handler
= handle_reschedule_ipi
,
200 .dev_id
= handle_reschedule_ipi
/* unique token */,
203 void __init
ipi_init(void)
207 /* Map IPI trigger MMIO addresses. */
208 for_each_possible_cpu(cpu
) {
211 unsigned long offset
;
215 if (hv_get_ipi_pte(tile
, KERNEL_PL
, &pte
) != 0)
216 panic("Failed to initialize IPI for cpu %d\n", cpu
);
218 offset
= hv_pte_get_pfn(pte
) << PAGE_SHIFT
;
219 ipi_mappings
[cpu
] = ioremap_prot(offset
, PAGE_SIZE
, pte
);
223 /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */
224 tile_irq_activate(IRQ_RESCHEDULE
, TILE_IRQ_PERCPU
);
225 BUG_ON(setup_irq(IRQ_RESCHEDULE
, &resched_action
));
230 void smp_send_reschedule(int cpu
)
232 WARN_ON(cpu_is_offline(cpu
));
235 * We just want to do an MMIO store. The traditional writeq()
236 * functions aren't really correct here, since they're always
237 * directed at the PCI shim. For now, just do a raw store,
238 * casting away the __iomem attribute.
240 ((unsigned long __force
*)ipi_mappings
[cpu
])[IRQ_RESCHEDULE
] = 0;
245 void smp_send_reschedule(int cpu
)
249 WARN_ON(cpu_is_offline(cpu
));
251 coord
.y
= cpu_y(cpu
);
252 coord
.x
= cpu_x(cpu
);
253 hv_trigger_ipi(coord
, IRQ_RESCHEDULE
);
256 #endif /* CHIP_HAS_IPI() */