Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / arch / powerpc / platforms / cell / spu_base.c
blob712001f6b7dad366cd1b2ab78107811cf6f485fb
1 /*
2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #undef DEBUG
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/ptrace.h>
29 #include <linux/slab.h>
30 #include <linux/wait.h>
31 #include <linux/mm.h>
32 #include <linux/io.h>
33 #include <linux/mutex.h>
34 #include <linux/linux_logo.h>
35 #include <asm/spu.h>
36 #include <asm/spu_priv1.h>
37 #include <asm/spu_csa.h>
38 #include <asm/xmon.h>
39 #include <asm/prom.h>
41 const struct spu_management_ops *spu_management_ops;
42 EXPORT_SYMBOL_GPL(spu_management_ops);
44 const struct spu_priv1_ops *spu_priv1_ops;
45 EXPORT_SYMBOL_GPL(spu_priv1_ops);
47 struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
48 EXPORT_SYMBOL_GPL(cbe_spu_info);
51 * The spufs fault-handling code needs to call force_sig_info to raise signals
52 * on DMA errors. Export it here to avoid general kernel-wide access to this
53 * function
55 EXPORT_SYMBOL_GPL(force_sig_info);
58 * Protects cbe_spu_info and spu->number.
60 static DEFINE_SPINLOCK(spu_lock);
63 * List of all spus in the system.
65 * This list is iterated by callers from irq context and callers that
66 * want to sleep. Thus modifications need to be done with both
67 * spu_full_list_lock and spu_full_list_mutex held, while iterating
68 * through it requires either of these locks.
70 * In addition spu_full_list_lock protects all assignmens to
71 * spu->mm.
73 static LIST_HEAD(spu_full_list);
74 static DEFINE_SPINLOCK(spu_full_list_lock);
75 static DEFINE_MUTEX(spu_full_list_mutex);
77 struct spu_slb {
78 u64 esid, vsid;
81 void spu_invalidate_slbs(struct spu *spu)
83 struct spu_priv2 __iomem *priv2 = spu->priv2;
84 unsigned long flags;
86 spin_lock_irqsave(&spu->register_lock, flags);
87 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
88 out_be64(&priv2->slb_invalidate_all_W, 0UL);
89 spin_unlock_irqrestore(&spu->register_lock, flags);
91 EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
93 /* This is called by the MM core when a segment size is changed, to
94 * request a flush of all the SPEs using a given mm
96 void spu_flush_all_slbs(struct mm_struct *mm)
98 struct spu *spu;
99 unsigned long flags;
101 spin_lock_irqsave(&spu_full_list_lock, flags);
102 list_for_each_entry(spu, &spu_full_list, full_list) {
103 if (spu->mm == mm)
104 spu_invalidate_slbs(spu);
106 spin_unlock_irqrestore(&spu_full_list_lock, flags);
109 /* The hack below stinks... try to do something better one of
110 * these days... Does it even work properly with NR_CPUS == 1 ?
112 static inline void mm_needs_global_tlbie(struct mm_struct *mm)
114 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
116 /* Global TLBIE broadcast required with SPEs. */
117 __cpus_setall(&mm->cpu_vm_mask, nr);
120 void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
122 unsigned long flags;
124 spin_lock_irqsave(&spu_full_list_lock, flags);
125 spu->mm = mm;
126 spin_unlock_irqrestore(&spu_full_list_lock, flags);
127 if (mm)
128 mm_needs_global_tlbie(mm);
130 EXPORT_SYMBOL_GPL(spu_associate_mm);
132 int spu_64k_pages_available(void)
134 return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
136 EXPORT_SYMBOL_GPL(spu_64k_pages_available);
138 static void spu_restart_dma(struct spu *spu)
140 struct spu_priv2 __iomem *priv2 = spu->priv2;
142 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
143 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
146 static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
148 struct spu_priv2 __iomem *priv2 = spu->priv2;
150 pr_debug("%s: adding SLB[%d] 0x%016lx 0x%016lx\n",
151 __func__, slbe, slb->vsid, slb->esid);
153 out_be64(&priv2->slb_index_W, slbe);
154 /* set invalid before writing vsid */
155 out_be64(&priv2->slb_esid_RW, 0);
156 /* now it's safe to write the vsid */
157 out_be64(&priv2->slb_vsid_RW, slb->vsid);
158 /* setting the new esid makes the entry valid again */
159 out_be64(&priv2->slb_esid_RW, slb->esid);
162 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
164 struct mm_struct *mm = spu->mm;
165 struct spu_slb slb;
166 int psize;
168 pr_debug("%s\n", __FUNCTION__);
170 slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
172 switch(REGION_ID(ea)) {
173 case USER_REGION_ID:
174 #ifdef CONFIG_PPC_MM_SLICES
175 psize = get_slice_psize(mm, ea);
176 #else
177 psize = mm->context.user_psize;
178 #endif
179 slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
180 << SLB_VSID_SHIFT) | SLB_VSID_USER;
181 break;
182 case VMALLOC_REGION_ID:
183 if (ea < VMALLOC_END)
184 psize = mmu_vmalloc_psize;
185 else
186 psize = mmu_io_psize;
187 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
188 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
189 break;
190 case KERNEL_REGION_ID:
191 psize = mmu_linear_psize;
192 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
193 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
194 break;
195 default:
196 /* Future: support kernel segments so that drivers
197 * can use SPUs.
199 pr_debug("invalid region access at %016lx\n", ea);
200 return 1;
202 slb.vsid |= mmu_psize_defs[psize].sllp;
204 spu_load_slb(spu, spu->slb_replace, &slb);
206 spu->slb_replace++;
207 if (spu->slb_replace >= 8)
208 spu->slb_replace = 0;
210 spu_restart_dma(spu);
211 spu->stats.slb_flt++;
212 return 0;
215 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
216 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
218 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
220 /* Handle kernel space hash faults immediately.
221 User hash faults need to be deferred to process context. */
222 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
223 && REGION_ID(ea) != USER_REGION_ID
224 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
225 spu_restart_dma(spu);
226 return 0;
229 spu->class_0_pending = 0;
230 spu->dar = ea;
231 spu->dsisr = dsisr;
233 spu->stop_callback(spu);
235 return 0;
238 static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
240 unsigned long ea = (unsigned long)addr;
241 u64 llp;
243 if (REGION_ID(ea) == KERNEL_REGION_ID)
244 llp = mmu_psize_defs[mmu_linear_psize].sllp;
245 else
246 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
248 slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
249 SLB_VSID_KERNEL | llp;
250 slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
254 * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
255 * address @new_addr is present.
257 static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
258 void *new_addr)
260 unsigned long ea = (unsigned long)new_addr;
261 int i;
263 for (i = 0; i < nr_slbs; i++)
264 if (!((slbs[i].esid ^ ea) & ESID_MASK))
265 return 1;
267 return 0;
271 * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
272 * need to map both the context save area, and the save/restore code.
274 * Because the lscsa and code may cross segment boundaires, we check to see
275 * if mappings are required for the start and end of each range. We currently
276 * assume that the mappings are smaller that one segment - if not, something
277 * is seriously wrong.
279 void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
280 void *code, int code_size)
282 struct spu_slb slbs[4];
283 int i, nr_slbs = 0;
284 /* start and end addresses of both mappings */
285 void *addrs[] = {
286 lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
287 code, code + code_size - 1
290 /* check the set of addresses, and create a new entry in the slbs array
291 * if there isn't already a SLB for that address */
292 for (i = 0; i < ARRAY_SIZE(addrs); i++) {
293 if (__slb_present(slbs, nr_slbs, addrs[i]))
294 continue;
296 __spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
297 nr_slbs++;
300 spin_lock_irq(&spu->register_lock);
301 /* Add the set of SLBs */
302 for (i = 0; i < nr_slbs; i++)
303 spu_load_slb(spu, i, &slbs[i]);
304 spin_unlock_irq(&spu->register_lock);
306 EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
308 static irqreturn_t
309 spu_irq_class_0(int irq, void *data)
311 struct spu *spu;
312 unsigned long stat, mask;
314 spu = data;
316 spin_lock(&spu->register_lock);
317 mask = spu_int_mask_get(spu, 0);
318 stat = spu_int_stat_get(spu, 0) & mask;
320 spu->class_0_pending |= stat;
321 spu->dsisr = spu_mfc_dsisr_get(spu);
322 spu->dar = spu_mfc_dar_get(spu);
323 spin_unlock(&spu->register_lock);
325 spu->stop_callback(spu);
327 spu_int_stat_clear(spu, 0, stat);
329 return IRQ_HANDLED;
332 static irqreturn_t
333 spu_irq_class_1(int irq, void *data)
335 struct spu *spu;
336 unsigned long stat, mask, dar, dsisr;
338 spu = data;
340 /* atomically read & clear class1 status. */
341 spin_lock(&spu->register_lock);
342 mask = spu_int_mask_get(spu, 1);
343 stat = spu_int_stat_get(spu, 1) & mask;
344 dar = spu_mfc_dar_get(spu);
345 dsisr = spu_mfc_dsisr_get(spu);
346 if (stat & CLASS1_STORAGE_FAULT_INTR)
347 spu_mfc_dsisr_set(spu, 0ul);
348 spu_int_stat_clear(spu, 1, stat);
350 if (stat & CLASS1_SEGMENT_FAULT_INTR)
351 __spu_trap_data_seg(spu, dar);
353 spin_unlock(&spu->register_lock);
354 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
355 dar, dsisr);
357 if (stat & CLASS1_STORAGE_FAULT_INTR)
358 __spu_trap_data_map(spu, dar, dsisr);
360 if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
363 if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
366 return stat ? IRQ_HANDLED : IRQ_NONE;
369 static irqreturn_t
370 spu_irq_class_2(int irq, void *data)
372 struct spu *spu;
373 unsigned long stat;
374 unsigned long mask;
375 const int mailbox_intrs =
376 CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
378 spu = data;
379 spin_lock(&spu->register_lock);
380 stat = spu_int_stat_get(spu, 2);
381 mask = spu_int_mask_get(spu, 2);
382 /* ignore interrupts we're not waiting for */
383 stat &= mask;
385 /* mailbox interrupts are level triggered. mask them now before
386 * acknowledging */
387 if (stat & mailbox_intrs)
388 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
389 /* acknowledge all interrupts before the callbacks */
390 spu_int_stat_clear(spu, 2, stat);
391 spin_unlock(&spu->register_lock);
393 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
395 if (stat & CLASS2_MAILBOX_INTR)
396 spu->ibox_callback(spu);
398 if (stat & CLASS2_SPU_STOP_INTR)
399 spu->stop_callback(spu);
401 if (stat & CLASS2_SPU_HALT_INTR)
402 spu->stop_callback(spu);
404 if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
405 spu->mfc_callback(spu);
407 if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
408 spu->wbox_callback(spu);
410 spu->stats.class2_intr++;
411 return stat ? IRQ_HANDLED : IRQ_NONE;
414 static int spu_request_irqs(struct spu *spu)
416 int ret = 0;
418 if (spu->irqs[0] != NO_IRQ) {
419 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
420 spu->number);
421 ret = request_irq(spu->irqs[0], spu_irq_class_0,
422 IRQF_DISABLED,
423 spu->irq_c0, spu);
424 if (ret)
425 goto bail0;
427 if (spu->irqs[1] != NO_IRQ) {
428 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
429 spu->number);
430 ret = request_irq(spu->irqs[1], spu_irq_class_1,
431 IRQF_DISABLED,
432 spu->irq_c1, spu);
433 if (ret)
434 goto bail1;
436 if (spu->irqs[2] != NO_IRQ) {
437 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
438 spu->number);
439 ret = request_irq(spu->irqs[2], spu_irq_class_2,
440 IRQF_DISABLED,
441 spu->irq_c2, spu);
442 if (ret)
443 goto bail2;
445 return 0;
447 bail2:
448 if (spu->irqs[1] != NO_IRQ)
449 free_irq(spu->irqs[1], spu);
450 bail1:
451 if (spu->irqs[0] != NO_IRQ)
452 free_irq(spu->irqs[0], spu);
453 bail0:
454 return ret;
457 static void spu_free_irqs(struct spu *spu)
459 if (spu->irqs[0] != NO_IRQ)
460 free_irq(spu->irqs[0], spu);
461 if (spu->irqs[1] != NO_IRQ)
462 free_irq(spu->irqs[1], spu);
463 if (spu->irqs[2] != NO_IRQ)
464 free_irq(spu->irqs[2], spu);
467 void spu_init_channels(struct spu *spu)
469 static const struct {
470 unsigned channel;
471 unsigned count;
472 } zero_list[] = {
473 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
474 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
475 }, count_list[] = {
476 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
477 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
478 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
480 struct spu_priv2 __iomem *priv2;
481 int i;
483 priv2 = spu->priv2;
485 /* initialize all channel data to zero */
486 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
487 int count;
489 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
490 for (count = 0; count < zero_list[i].count; count++)
491 out_be64(&priv2->spu_chnldata_RW, 0);
494 /* initialize channel counts to meaningful values */
495 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
496 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
497 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
500 EXPORT_SYMBOL_GPL(spu_init_channels);
502 static int spu_shutdown(struct sys_device *sysdev)
504 struct spu *spu = container_of(sysdev, struct spu, sysdev);
506 spu_free_irqs(spu);
507 spu_destroy_spu(spu);
508 return 0;
511 static struct sysdev_class spu_sysdev_class = {
512 .name = "spu",
513 .shutdown = spu_shutdown,
516 int spu_add_sysdev_attr(struct sysdev_attribute *attr)
518 struct spu *spu;
520 mutex_lock(&spu_full_list_mutex);
521 list_for_each_entry(spu, &spu_full_list, full_list)
522 sysdev_create_file(&spu->sysdev, attr);
523 mutex_unlock(&spu_full_list_mutex);
525 return 0;
527 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
529 int spu_add_sysdev_attr_group(struct attribute_group *attrs)
531 struct spu *spu;
532 int rc = 0;
534 mutex_lock(&spu_full_list_mutex);
535 list_for_each_entry(spu, &spu_full_list, full_list) {
536 rc = sysfs_create_group(&spu->sysdev.kobj, attrs);
538 /* we're in trouble here, but try unwinding anyway */
539 if (rc) {
540 printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
541 __func__, attrs->name);
543 list_for_each_entry_continue_reverse(spu,
544 &spu_full_list, full_list)
545 sysfs_remove_group(&spu->sysdev.kobj, attrs);
546 break;
550 mutex_unlock(&spu_full_list_mutex);
552 return rc;
554 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
557 void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
559 struct spu *spu;
561 mutex_lock(&spu_full_list_mutex);
562 list_for_each_entry(spu, &spu_full_list, full_list)
563 sysdev_remove_file(&spu->sysdev, attr);
564 mutex_unlock(&spu_full_list_mutex);
566 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
568 void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
570 struct spu *spu;
572 mutex_lock(&spu_full_list_mutex);
573 list_for_each_entry(spu, &spu_full_list, full_list)
574 sysfs_remove_group(&spu->sysdev.kobj, attrs);
575 mutex_unlock(&spu_full_list_mutex);
577 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
579 static int spu_create_sysdev(struct spu *spu)
581 int ret;
583 spu->sysdev.id = spu->number;
584 spu->sysdev.cls = &spu_sysdev_class;
585 ret = sysdev_register(&spu->sysdev);
586 if (ret) {
587 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
588 spu->number);
589 return ret;
592 sysfs_add_device_to_node(&spu->sysdev, spu->node);
594 return 0;
597 static int __init create_spu(void *data)
599 struct spu *spu;
600 int ret;
601 static int number;
602 unsigned long flags;
603 struct timespec ts;
605 ret = -ENOMEM;
606 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
607 if (!spu)
608 goto out;
610 spu->alloc_state = SPU_FREE;
612 spin_lock_init(&spu->register_lock);
613 spin_lock(&spu_lock);
614 spu->number = number++;
615 spin_unlock(&spu_lock);
617 ret = spu_create_spu(spu, data);
619 if (ret)
620 goto out_free;
622 spu_mfc_sdr_setup(spu);
623 spu_mfc_sr1_set(spu, 0x33);
624 ret = spu_request_irqs(spu);
625 if (ret)
626 goto out_destroy;
628 ret = spu_create_sysdev(spu);
629 if (ret)
630 goto out_free_irqs;
632 mutex_lock(&cbe_spu_info[spu->node].list_mutex);
633 list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
634 cbe_spu_info[spu->node].n_spus++;
635 mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
637 mutex_lock(&spu_full_list_mutex);
638 spin_lock_irqsave(&spu_full_list_lock, flags);
639 list_add(&spu->full_list, &spu_full_list);
640 spin_unlock_irqrestore(&spu_full_list_lock, flags);
641 mutex_unlock(&spu_full_list_mutex);
643 spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
644 ktime_get_ts(&ts);
645 spu->stats.tstamp = timespec_to_ns(&ts);
647 INIT_LIST_HEAD(&spu->aff_list);
649 goto out;
651 out_free_irqs:
652 spu_free_irqs(spu);
653 out_destroy:
654 spu_destroy_spu(spu);
655 out_free:
656 kfree(spu);
657 out:
658 return ret;
661 static const char *spu_state_names[] = {
662 "user", "system", "iowait", "idle"
665 static unsigned long long spu_acct_time(struct spu *spu,
666 enum spu_utilization_state state)
668 struct timespec ts;
669 unsigned long long time = spu->stats.times[state];
672 * If the spu is idle or the context is stopped, utilization
673 * statistics are not updated. Apply the time delta from the
674 * last recorded state of the spu.
676 if (spu->stats.util_state == state) {
677 ktime_get_ts(&ts);
678 time += timespec_to_ns(&ts) - spu->stats.tstamp;
681 return time / NSEC_PER_MSEC;
685 static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf)
687 struct spu *spu = container_of(sysdev, struct spu, sysdev);
689 return sprintf(buf, "%s %llu %llu %llu %llu "
690 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
691 spu_state_names[spu->stats.util_state],
692 spu_acct_time(spu, SPU_UTIL_USER),
693 spu_acct_time(spu, SPU_UTIL_SYSTEM),
694 spu_acct_time(spu, SPU_UTIL_IOWAIT),
695 spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
696 spu->stats.vol_ctx_switch,
697 spu->stats.invol_ctx_switch,
698 spu->stats.slb_flt,
699 spu->stats.hash_flt,
700 spu->stats.min_flt,
701 spu->stats.maj_flt,
702 spu->stats.class2_intr,
703 spu->stats.libassist);
706 static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
708 static int __init init_spu_base(void)
710 int i, ret = 0;
712 for (i = 0; i < MAX_NUMNODES; i++) {
713 mutex_init(&cbe_spu_info[i].list_mutex);
714 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
717 if (!spu_management_ops)
718 goto out;
720 /* create sysdev class for spus */
721 ret = sysdev_class_register(&spu_sysdev_class);
722 if (ret)
723 goto out;
725 ret = spu_enumerate_spus(create_spu);
727 if (ret < 0) {
728 printk(KERN_WARNING "%s: Error initializing spus\n",
729 __FUNCTION__);
730 goto out_unregister_sysdev_class;
733 if (ret > 0) {
735 * We cannot put the forward declaration in
736 * <linux/linux_logo.h> because of conflicting session type
737 * conflicts for const and __initdata with different compiler
738 * versions
740 extern const struct linux_logo logo_spe_clut224;
742 fb_append_extra_logo(&logo_spe_clut224, ret);
745 mutex_lock(&spu_full_list_mutex);
746 xmon_register_spus(&spu_full_list);
747 crash_register_spus(&spu_full_list);
748 mutex_unlock(&spu_full_list_mutex);
749 spu_add_sysdev_attr(&attr_stat);
751 spu_init_affinity();
753 return 0;
755 out_unregister_sysdev_class:
756 sysdev_class_unregister(&spu_sysdev_class);
757 out:
758 return ret;
760 module_init(init_spu_base);
762 MODULE_LICENSE("GPL");
763 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");