[PATCH] cell: enable pause(0) in cpu_idle
[linux-2.6/verdex.git] / arch / powerpc / platforms / cell / pervasive.c
blob85152544c1530dec87325a2b500d904b69f1dcd3
1 /*
2 * CBE Pervasive Monitor and Debug
4 * (C) Copyright IBM Corporation 2005
6 * Authors: Maximino Aguilar (maguilar@us.ibm.com)
7 * Michael N. Day (mnday@us.ibm.com)
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #undef DEBUG
26 #include <linux/config.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/percpu.h>
30 #include <linux/types.h>
31 #include <linux/kallsyms.h>
33 #include <asm/io.h>
34 #include <asm/machdep.h>
35 #include <asm/prom.h>
36 #include <asm/pgtable.h>
37 #include <asm/reg.h>
39 #include "pervasive.h"
41 static DEFINE_SPINLOCK(cbe_pervasive_lock);
42 struct cbe_pervasive {
43 struct pmd_regs __iomem *regs;
44 unsigned int thread;
47 /* can't use per_cpu from setup_arch */
48 static struct cbe_pervasive cbe_pervasive[NR_CPUS];
50 static void __init cbe_enable_pause_zero(void)
52 unsigned long thread_switch_control;
53 unsigned long temp_register;
54 struct cbe_pervasive *p;
55 int thread;
57 spin_lock_irq(&cbe_pervasive_lock);
58 p = &cbe_pervasive[smp_processor_id()];
60 if (!cbe_pervasive->regs)
61 goto out;
63 pr_debug("Power Management: CPU %d\n", smp_processor_id());
65 /* Enable Pause(0) control bit */
66 temp_register = in_be64(&p->regs->pm_control);
68 out_be64(&p->regs->pm_control,
69 temp_register|PMD_PAUSE_ZERO_CONTROL);
71 /* Enable DEC and EE interrupt request */
72 thread_switch_control = mfspr(SPRN_TSC_CELL);
73 thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
75 switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) {
76 case CTRL_CT0:
77 thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
78 thread = 0;
79 break;
80 case CTRL_CT1:
81 thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
82 thread = 1;
83 break;
84 default:
85 printk(KERN_WARNING "%s: unknown configuration\n",
86 __FUNCTION__);
87 thread = -1;
88 break;
91 if (p->thread != thread)
92 printk(KERN_WARNING "%s: device tree inconsistant, "
93 "cpu %i: %d/%d\n", __FUNCTION__,
94 smp_processor_id(),
95 p->thread, thread);
97 mtspr(SPRN_TSC_CELL, thread_switch_control);
99 out:
100 spin_unlock_irq(&cbe_pervasive_lock);
103 static void cbe_idle(void)
105 unsigned long ctrl;
107 cbe_enable_pause_zero();
109 while (1) {
110 if (!need_resched()) {
111 local_irq_disable();
112 while (!need_resched()) {
113 /* go into low thread priority */
114 HMT_low();
117 * atomically disable thread execution
118 * and runlatch.
119 * External and Decrementer exceptions
120 * are still handled when the thread
121 * is disabled but now enter in
122 * cbe_system_reset_exception()
124 ctrl = mfspr(SPRN_CTRLF);
125 ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
126 mtspr(SPRN_CTRLT, ctrl);
128 /* restore thread prio */
129 HMT_medium();
130 local_irq_enable();
134 * turn runlatch on again before scheduling the
135 * process we just woke up
137 ppc64_runlatch_on();
139 preempt_enable_no_resched();
140 schedule();
141 preempt_disable();
145 int cbe_system_reset_exception(struct pt_regs *regs)
147 switch (regs->msr & SRR1_WAKEMASK) {
148 case SRR1_WAKEEE:
149 do_IRQ(regs);
150 break;
151 case SRR1_WAKEDEC:
152 timer_interrupt(regs);
153 break;
154 case SRR1_WAKEMT:
155 /* no action required */
156 break;
157 default:
158 /* do system reset */
159 return 0;
161 /* everything handled */
162 return 1;
165 static int __init cbe_find_pmd_mmio(int cpu, struct cbe_pervasive *p)
167 struct device_node *node;
168 unsigned int *int_servers;
169 char *addr;
170 unsigned long real_address;
171 unsigned int size;
173 struct pmd_regs __iomem *pmd_mmio_area;
174 int hardid, thread;
175 int proplen;
177 pmd_mmio_area = NULL;
178 hardid = get_hard_smp_processor_id(cpu);
179 for (node = NULL; (node = of_find_node_by_type(node, "cpu"));) {
180 int_servers = (void *) get_property(node,
181 "ibm,ppc-interrupt-server#s", &proplen);
182 if (!int_servers) {
183 printk(KERN_WARNING "%s misses "
184 "ibm,ppc-interrupt-server#s property",
185 node->full_name);
186 continue;
188 for (thread = 0; thread < proplen / sizeof (int); thread++) {
189 if (hardid == int_servers[thread]) {
190 addr = get_property(node, "pervasive", NULL);
191 goto found;
196 printk(KERN_WARNING "%s: CPU %d not found\n", __FUNCTION__, cpu);
197 return -EINVAL;
199 found:
200 real_address = *(unsigned long*) addr;
201 addr += sizeof (unsigned long);
202 size = *(unsigned int*) addr;
204 pr_debug("pervasive area for CPU %d at %lx, size %x\n",
205 cpu, real_address, size);
206 p->regs = __ioremap(real_address, size, _PAGE_NO_CACHE);
207 p->thread = thread;
208 return 0;
211 void __init cell_pervasive_init(void)
213 struct cbe_pervasive *p;
214 int cpu;
215 int ret;
217 if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
218 return;
220 for_each_cpu(cpu) {
221 p = &cbe_pervasive[cpu];
222 ret = cbe_find_pmd_mmio(cpu, p);
223 if (ret)
224 return;
227 ppc_md.idle_loop = cbe_idle;
228 ppc_md.system_reset_exception = cbe_system_reset_exception;