[PATCH] spufs: fix mailbox polling
[linux-2.6/verdex.git] / arch / powerpc / platforms / cell / spu_base.c
blobf9da79eb3db03608492da050104f426d166b8cbe
1 /*
2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #undef DEBUG
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/poll.h>
29 #include <linux/ptrace.h>
30 #include <linux/slab.h>
31 #include <linux/wait.h>
33 #include <asm/io.h>
34 #include <asm/prom.h>
35 #include <asm/semaphore.h>
36 #include <asm/spu.h>
37 #include <asm/mmu_context.h>
39 #include "interrupt.h"
41 static int __spu_trap_invalid_dma(struct spu *spu)
43 pr_debug("%s\n", __FUNCTION__);
44 force_sig(SIGBUS, /* info, */ current);
45 return 0;
48 static int __spu_trap_dma_align(struct spu *spu)
50 pr_debug("%s\n", __FUNCTION__);
51 force_sig(SIGBUS, /* info, */ current);
52 return 0;
55 static int __spu_trap_error(struct spu *spu)
57 pr_debug("%s\n", __FUNCTION__);
58 force_sig(SIGILL, /* info, */ current);
59 return 0;
62 static void spu_restart_dma(struct spu *spu)
64 struct spu_priv2 __iomem *priv2 = spu->priv2;
66 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING_nr, &spu->flags))
67 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
70 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
72 struct spu_priv2 __iomem *priv2 = spu->priv2;
73 struct mm_struct *mm = spu->mm;
74 u64 esid, vsid;
76 pr_debug("%s\n", __FUNCTION__);
78 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags)) {
79 /* SLBs are pre-loaded for context switch, so
80 * we should never get here!
82 printk("%s: invalid access during switch!\n", __func__);
83 return 1;
85 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
86 /* Future: support kernel segments so that drivers
87 * can use SPUs.
89 pr_debug("invalid region access at %016lx\n", ea);
90 return 1;
93 esid = (ea & ESID_MASK) | SLB_ESID_V;
94 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
95 if (in_hugepage_area(mm->context, ea))
96 vsid |= SLB_VSID_L;
98 out_be64(&priv2->slb_index_W, spu->slb_replace);
99 out_be64(&priv2->slb_vsid_RW, vsid);
100 out_be64(&priv2->slb_esid_RW, esid);
102 spu->slb_replace++;
103 if (spu->slb_replace >= 8)
104 spu->slb_replace = 0;
106 spu_restart_dma(spu);
108 return 0;
111 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
112 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
114 pr_debug("%s\n", __FUNCTION__);
116 /* Handle kernel space hash faults immediately.
117 User hash faults need to be deferred to process context. */
118 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
119 && REGION_ID(ea) != USER_REGION_ID
120 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
121 spu_restart_dma(spu);
122 return 0;
125 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags)) {
126 printk("%s: invalid access during switch!\n", __func__);
127 return 1;
130 spu->dar = ea;
131 spu->dsisr = dsisr;
132 mb();
133 if (spu->stop_callback)
134 spu->stop_callback(spu);
135 return 0;
138 static int __spu_trap_mailbox(struct spu *spu)
140 if (spu->ibox_callback)
141 spu->ibox_callback(spu);
143 /* atomically disable SPU mailbox interrupts */
144 spin_lock(&spu->register_lock);
145 out_be64(&spu->priv1->int_mask_class2_RW,
146 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1);
147 spin_unlock(&spu->register_lock);
148 return 0;
151 static int __spu_trap_stop(struct spu *spu)
153 pr_debug("%s\n", __FUNCTION__);
154 spu->stop_code = in_be32(&spu->problem->spu_status_R);
155 if (spu->stop_callback)
156 spu->stop_callback(spu);
157 return 0;
160 static int __spu_trap_halt(struct spu *spu)
162 pr_debug("%s\n", __FUNCTION__);
163 spu->stop_code = in_be32(&spu->problem->spu_status_R);
164 if (spu->stop_callback)
165 spu->stop_callback(spu);
166 return 0;
169 static int __spu_trap_tag_group(struct spu *spu)
171 pr_debug("%s\n", __FUNCTION__);
172 /* wake_up(&spu->dma_wq); */
173 return 0;
176 static int __spu_trap_spubox(struct spu *spu)
178 if (spu->wbox_callback)
179 spu->wbox_callback(spu);
181 /* atomically disable SPU mailbox interrupts */
182 spin_lock(&spu->register_lock);
183 out_be64(&spu->priv1->int_mask_class2_RW,
184 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10);
185 spin_unlock(&spu->register_lock);
186 return 0;
189 static irqreturn_t
190 spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
192 struct spu *spu;
194 spu = data;
195 spu->class_0_pending = 1;
196 if (spu->stop_callback)
197 spu->stop_callback(spu);
199 return IRQ_HANDLED;
203 spu_irq_class_0_bottom(struct spu *spu)
205 unsigned long stat, mask;
207 spu->class_0_pending = 0;
209 mask = in_be64(&spu->priv1->int_mask_class0_RW);
210 stat = in_be64(&spu->priv1->int_stat_class0_RW);
212 stat &= mask;
214 if (stat & 1) /* invalid MFC DMA */
215 __spu_trap_invalid_dma(spu);
217 if (stat & 2) /* invalid DMA alignment */
218 __spu_trap_dma_align(spu);
220 if (stat & 4) /* error on SPU */
221 __spu_trap_error(spu);
223 out_be64(&spu->priv1->int_stat_class0_RW, stat);
225 return (stat & 0x7) ? -EIO : 0;
227 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
229 static irqreturn_t
230 spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
232 struct spu *spu;
233 unsigned long stat, mask, dar, dsisr;
235 spu = data;
237 /* atomically read & clear class1 status. */
238 spin_lock(&spu->register_lock);
239 mask = in_be64(&spu->priv1->int_mask_class1_RW);
240 stat = in_be64(&spu->priv1->int_stat_class1_RW) & mask;
241 dar = in_be64(&spu->priv1->mfc_dar_RW);
242 dsisr = in_be64(&spu->priv1->mfc_dsisr_RW);
243 out_be64(&spu->priv1->mfc_dsisr_RW, 0UL);
244 out_be64(&spu->priv1->int_stat_class1_RW, stat);
245 spin_unlock(&spu->register_lock);
247 if (stat & 1) /* segment fault */
248 __spu_trap_data_seg(spu, dar);
250 if (stat & 2) { /* mapping fault */
251 __spu_trap_data_map(spu, dar, dsisr);
254 if (stat & 4) /* ls compare & suspend on get */
257 if (stat & 8) /* ls compare & suspend on put */
260 return stat ? IRQ_HANDLED : IRQ_NONE;
262 EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
264 static irqreturn_t
265 spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
267 struct spu *spu;
268 unsigned long stat;
269 unsigned long mask;
271 spu = data;
272 stat = in_be64(&spu->priv1->int_stat_class2_RW);
273 mask = in_be64(&spu->priv1->int_mask_class2_RW);
275 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
277 stat &= mask;
279 if (stat & 1) /* PPC core mailbox */
280 __spu_trap_mailbox(spu);
282 if (stat & 2) /* SPU stop-and-signal */
283 __spu_trap_stop(spu);
285 if (stat & 4) /* SPU halted */
286 __spu_trap_halt(spu);
288 if (stat & 8) /* DMA tag group complete */
289 __spu_trap_tag_group(spu);
291 if (stat & 0x10) /* SPU mailbox threshold */
292 __spu_trap_spubox(spu);
294 out_be64(&spu->priv1->int_stat_class2_RW, stat);
295 return stat ? IRQ_HANDLED : IRQ_NONE;
298 static int
299 spu_request_irqs(struct spu *spu)
301 int ret;
302 int irq_base;
304 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
306 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
307 ret = request_irq(irq_base + spu->isrc,
308 spu_irq_class_0, 0, spu->irq_c0, spu);
309 if (ret)
310 goto out;
311 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
313 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
314 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
315 spu_irq_class_1, 0, spu->irq_c1, spu);
316 if (ret)
317 goto out1;
318 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
320 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
321 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
322 spu_irq_class_2, 0, spu->irq_c2, spu);
323 if (ret)
324 goto out2;
325 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
326 goto out;
328 out2:
329 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
330 out1:
331 free_irq(irq_base + spu->isrc, spu);
332 out:
333 return ret;
336 static void
337 spu_free_irqs(struct spu *spu)
339 int irq_base;
341 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
343 free_irq(irq_base + spu->isrc, spu);
344 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
345 free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
348 static LIST_HEAD(spu_list);
349 static DECLARE_MUTEX(spu_mutex);
351 static void spu_init_channels(struct spu *spu)
353 static const struct {
354 unsigned channel;
355 unsigned count;
356 } zero_list[] = {
357 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
358 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
359 }, count_list[] = {
360 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
361 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
362 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
364 struct spu_priv2 *priv2;
365 int i;
367 priv2 = spu->priv2;
369 /* initialize all channel data to zero */
370 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
371 int count;
373 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
374 for (count = 0; count < zero_list[i].count; count++)
375 out_be64(&priv2->spu_chnldata_RW, 0);
378 /* initialize channel counts to meaningful values */
379 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
380 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
381 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
385 static void spu_init_regs(struct spu *spu)
387 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
388 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
389 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
392 struct spu *spu_alloc(void)
394 struct spu *spu;
396 down(&spu_mutex);
397 if (!list_empty(&spu_list)) {
398 spu = list_entry(spu_list.next, struct spu, list);
399 list_del_init(&spu->list);
400 pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
401 } else {
402 pr_debug("No SPU left\n");
403 spu = NULL;
405 up(&spu_mutex);
407 if (spu) {
408 spu_init_channels(spu);
409 spu_init_regs(spu);
412 return spu;
414 EXPORT_SYMBOL_GPL(spu_alloc);
416 void spu_free(struct spu *spu)
418 down(&spu_mutex);
419 list_add_tail(&spu->list, &spu_list);
420 up(&spu_mutex);
422 EXPORT_SYMBOL_GPL(spu_free);
424 static int spu_handle_mm_fault(struct spu *spu)
426 struct mm_struct *mm = spu->mm;
427 struct vm_area_struct *vma;
428 u64 ea, dsisr, is_write;
429 int ret;
431 ea = spu->dar;
432 dsisr = spu->dsisr;
433 #if 0
434 if (!IS_VALID_EA(ea)) {
435 return -EFAULT;
437 #endif /* XXX */
438 if (mm == NULL) {
439 return -EFAULT;
441 if (mm->pgd == NULL) {
442 return -EFAULT;
445 down_read(&mm->mmap_sem);
446 vma = find_vma(mm, ea);
447 if (!vma)
448 goto bad_area;
449 if (vma->vm_start <= ea)
450 goto good_area;
451 if (!(vma->vm_flags & VM_GROWSDOWN))
452 goto bad_area;
453 #if 0
454 if (expand_stack(vma, ea))
455 goto bad_area;
456 #endif /* XXX */
457 good_area:
458 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
459 if (is_write) {
460 if (!(vma->vm_flags & VM_WRITE))
461 goto bad_area;
462 } else {
463 if (dsisr & MFC_DSISR_ACCESS_DENIED)
464 goto bad_area;
465 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
466 goto bad_area;
468 ret = 0;
469 switch (handle_mm_fault(mm, vma, ea, is_write)) {
470 case VM_FAULT_MINOR:
471 current->min_flt++;
472 break;
473 case VM_FAULT_MAJOR:
474 current->maj_flt++;
475 break;
476 case VM_FAULT_SIGBUS:
477 ret = -EFAULT;
478 goto bad_area;
479 case VM_FAULT_OOM:
480 ret = -ENOMEM;
481 goto bad_area;
482 default:
483 BUG();
485 up_read(&mm->mmap_sem);
486 return ret;
488 bad_area:
489 up_read(&mm->mmap_sem);
490 return -EFAULT;
493 int spu_irq_class_1_bottom(struct spu *spu)
495 u64 ea, dsisr, access, error = 0UL;
496 int ret = 0;
498 ea = spu->dar;
499 dsisr = spu->dsisr;
500 if (dsisr & MFC_DSISR_PTE_NOT_FOUND) {
501 access = (_PAGE_PRESENT | _PAGE_USER);
502 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
503 if (hash_page(ea, access, 0x300) != 0)
504 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
506 if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) ||
507 (dsisr & MFC_DSISR_ACCESS_DENIED)) {
508 if ((ret = spu_handle_mm_fault(spu)) != 0)
509 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
510 else
511 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
513 spu->dar = 0UL;
514 spu->dsisr = 0UL;
515 if (!error) {
516 spu_restart_dma(spu);
517 } else {
518 __spu_trap_invalid_dma(spu);
520 return ret;
523 static void __iomem * __init map_spe_prop(struct device_node *n,
524 const char *name)
526 struct address_prop {
527 unsigned long address;
528 unsigned int len;
529 } __attribute__((packed)) *prop;
531 void *p;
532 int proplen;
534 p = get_property(n, name, &proplen);
535 if (proplen != sizeof (struct address_prop))
536 return NULL;
538 prop = p;
540 return ioremap(prop->address, prop->len);
543 static void spu_unmap(struct spu *spu)
545 iounmap(spu->priv2);
546 iounmap(spu->priv1);
547 iounmap(spu->problem);
548 iounmap((u8 __iomem *)spu->local_store);
551 static int __init spu_map_device(struct spu *spu, struct device_node *spe)
553 char *prop;
554 int ret;
556 ret = -ENODEV;
557 prop = get_property(spe, "isrc", NULL);
558 if (!prop)
559 goto out;
560 spu->isrc = *(unsigned int *)prop;
562 spu->name = get_property(spe, "name", NULL);
563 if (!spu->name)
564 goto out;
566 prop = get_property(spe, "local-store", NULL);
567 if (!prop)
568 goto out;
569 spu->local_store_phys = *(unsigned long *)prop;
571 /* we use local store as ram, not io memory */
572 spu->local_store = (void __force *)map_spe_prop(spe, "local-store");
573 if (!spu->local_store)
574 goto out;
576 spu->problem= map_spe_prop(spe, "problem");
577 if (!spu->problem)
578 goto out_unmap;
580 spu->priv1= map_spe_prop(spe, "priv1");
581 if (!spu->priv1)
582 goto out_unmap;
584 spu->priv2= map_spe_prop(spe, "priv2");
585 if (!spu->priv2)
586 goto out_unmap;
587 ret = 0;
588 goto out;
590 out_unmap:
591 spu_unmap(spu);
592 out:
593 return ret;
596 static int __init find_spu_node_id(struct device_node *spe)
598 unsigned int *id;
599 struct device_node *cpu;
601 cpu = spe->parent->parent;
602 id = (unsigned int *)get_property(cpu, "node-id", NULL);
604 return id ? *id : 0;
607 static int __init create_spu(struct device_node *spe)
609 struct spu *spu;
610 int ret;
611 static int number;
613 ret = -ENOMEM;
614 spu = kmalloc(sizeof (*spu), GFP_KERNEL);
615 if (!spu)
616 goto out;
618 ret = spu_map_device(spu, spe);
619 if (ret)
620 goto out_free;
622 spu->node = find_spu_node_id(spe);
623 spu->stop_code = 0;
624 spu->slb_replace = 0;
625 spu->mm = NULL;
626 spu->ctx = NULL;
627 spu->rq = NULL;
628 spu->pid = 0;
629 spu->class_0_pending = 0;
630 spu->flags = 0UL;
631 spu->dar = 0UL;
632 spu->dsisr = 0UL;
633 spin_lock_init(&spu->register_lock);
635 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
636 out_be64(&spu->priv1->mfc_sr1_RW, 0x33);
638 spu->ibox_callback = NULL;
639 spu->wbox_callback = NULL;
640 spu->stop_callback = NULL;
642 down(&spu_mutex);
643 spu->number = number++;
644 ret = spu_request_irqs(spu);
645 if (ret)
646 goto out_unmap;
648 list_add(&spu->list, &spu_list);
649 up(&spu_mutex);
651 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
652 spu->name, spu->isrc, spu->local_store,
653 spu->problem, spu->priv1, spu->priv2, spu->number);
654 goto out;
656 out_unmap:
657 up(&spu_mutex);
658 spu_unmap(spu);
659 out_free:
660 kfree(spu);
661 out:
662 return ret;
665 static void destroy_spu(struct spu *spu)
667 list_del_init(&spu->list);
669 spu_free_irqs(spu);
670 spu_unmap(spu);
671 kfree(spu);
674 static void cleanup_spu_base(void)
676 struct spu *spu, *tmp;
677 down(&spu_mutex);
678 list_for_each_entry_safe(spu, tmp, &spu_list, list)
679 destroy_spu(spu);
680 up(&spu_mutex);
682 module_exit(cleanup_spu_base);
684 static int __init init_spu_base(void)
686 struct device_node *node;
687 int ret;
689 ret = -ENODEV;
690 for (node = of_find_node_by_type(NULL, "spe");
691 node; node = of_find_node_by_type(node, "spe")) {
692 ret = create_spu(node);
693 if (ret) {
694 printk(KERN_WARNING "%s: Error initializing %s\n",
695 __FUNCTION__, node->name);
696 cleanup_spu_base();
697 break;
700 /* in some old firmware versions, the spe is called 'spc', so we
701 look for that as well */
702 for (node = of_find_node_by_type(NULL, "spc");
703 node; node = of_find_node_by_type(node, "spc")) {
704 ret = create_spu(node);
705 if (ret) {
706 printk(KERN_WARNING "%s: Error initializing %s\n",
707 __FUNCTION__, node->name);
708 cleanup_spu_base();
709 break;
712 return ret;
714 module_init(init_spu_base);
716 MODULE_LICENSE("GPL");
717 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");