[PATCH] spufs: clear dsisr on CLASS1[Mf] exception
[linux-2.6.git] / arch / powerpc / platforms / cell / spu_base.c
blob3a5302151e095137a22dd72ec6a3b0248756d377
1 /*
2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #undef DEBUG
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/poll.h>
29 #include <linux/ptrace.h>
30 #include <linux/slab.h>
31 #include <linux/wait.h>
33 #include <asm/io.h>
34 #include <asm/prom.h>
35 #include <asm/semaphore.h>
36 #include <asm/spu.h>
37 #include <asm/mmu_context.h>
39 #include "interrupt.h"
41 static int __spu_trap_invalid_dma(struct spu *spu)
43 pr_debug("%s\n", __FUNCTION__);
44 force_sig(SIGBUS, /* info, */ current);
45 return 0;
48 static int __spu_trap_dma_align(struct spu *spu)
50 pr_debug("%s\n", __FUNCTION__);
51 force_sig(SIGBUS, /* info, */ current);
52 return 0;
55 static int __spu_trap_error(struct spu *spu)
57 pr_debug("%s\n", __FUNCTION__);
58 force_sig(SIGILL, /* info, */ current);
59 return 0;
62 static void spu_restart_dma(struct spu *spu)
64 struct spu_priv2 __iomem *priv2 = spu->priv2;
66 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING_nr, &spu->flags))
67 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
70 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
72 struct spu_priv2 __iomem *priv2 = spu->priv2;
73 struct mm_struct *mm = spu->mm;
74 u64 esid, vsid;
76 pr_debug("%s\n", __FUNCTION__);
78 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags)) {
79 /* SLBs are pre-loaded for context switch, so
80 * we should never get here!
82 printk("%s: invalid access during switch!\n", __func__);
83 return 1;
85 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
86 /* Future: support kernel segments so that drivers
87 * can use SPUs.
89 pr_debug("invalid region access at %016lx\n", ea);
90 return 1;
93 esid = (ea & ESID_MASK) | SLB_ESID_V;
94 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
95 if (in_hugepage_area(mm->context, ea))
96 vsid |= SLB_VSID_L;
98 out_be64(&priv2->slb_index_W, spu->slb_replace);
99 out_be64(&priv2->slb_vsid_RW, vsid);
100 out_be64(&priv2->slb_esid_RW, esid);
102 spu->slb_replace++;
103 if (spu->slb_replace >= 8)
104 spu->slb_replace = 0;
106 spu_restart_dma(spu);
108 return 0;
111 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
112 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
114 pr_debug("%s\n", __FUNCTION__);
116 /* Handle kernel space hash faults immediately.
117 User hash faults need to be deferred to process context. */
118 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
119 && REGION_ID(ea) != USER_REGION_ID
120 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
121 spu_restart_dma(spu);
122 return 0;
125 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags)) {
126 printk("%s: invalid access during switch!\n", __func__);
127 return 1;
130 spu->dar = ea;
131 spu->dsisr = dsisr;
132 mb();
133 if (spu->stop_callback)
134 spu->stop_callback(spu);
135 return 0;
138 static int __spu_trap_mailbox(struct spu *spu)
140 if (spu->ibox_callback)
141 spu->ibox_callback(spu);
143 /* atomically disable SPU mailbox interrupts */
144 spin_lock(&spu->register_lock);
145 out_be64(&spu->priv1->int_mask_class2_RW,
146 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1);
147 spin_unlock(&spu->register_lock);
148 return 0;
151 static int __spu_trap_stop(struct spu *spu)
153 pr_debug("%s\n", __FUNCTION__);
154 spu->stop_code = in_be32(&spu->problem->spu_status_R);
155 if (spu->stop_callback)
156 spu->stop_callback(spu);
157 return 0;
160 static int __spu_trap_halt(struct spu *spu)
162 pr_debug("%s\n", __FUNCTION__);
163 spu->stop_code = in_be32(&spu->problem->spu_status_R);
164 if (spu->stop_callback)
165 spu->stop_callback(spu);
166 return 0;
169 static int __spu_trap_tag_group(struct spu *spu)
171 pr_debug("%s\n", __FUNCTION__);
172 /* wake_up(&spu->dma_wq); */
173 return 0;
176 static int __spu_trap_spubox(struct spu *spu)
178 if (spu->wbox_callback)
179 spu->wbox_callback(spu);
181 /* atomically disable SPU mailbox interrupts */
182 spin_lock(&spu->register_lock);
183 out_be64(&spu->priv1->int_mask_class2_RW,
184 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10);
185 spin_unlock(&spu->register_lock);
186 return 0;
189 static irqreturn_t
190 spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
192 struct spu *spu;
194 spu = data;
195 spu->class_0_pending = 1;
196 if (spu->stop_callback)
197 spu->stop_callback(spu);
199 return IRQ_HANDLED;
203 spu_irq_class_0_bottom(struct spu *spu)
205 unsigned long stat, mask;
207 spu->class_0_pending = 0;
209 mask = in_be64(&spu->priv1->int_mask_class0_RW);
210 stat = in_be64(&spu->priv1->int_stat_class0_RW);
212 stat &= mask;
214 if (stat & 1) /* invalid MFC DMA */
215 __spu_trap_invalid_dma(spu);
217 if (stat & 2) /* invalid DMA alignment */
218 __spu_trap_dma_align(spu);
220 if (stat & 4) /* error on SPU */
221 __spu_trap_error(spu);
223 out_be64(&spu->priv1->int_stat_class0_RW, stat);
225 return (stat & 0x7) ? -EIO : 0;
227 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
229 static irqreturn_t
230 spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
232 struct spu *spu;
233 unsigned long stat, mask, dar, dsisr;
235 spu = data;
237 /* atomically read & clear class1 status. */
238 spin_lock(&spu->register_lock);
239 mask = in_be64(&spu->priv1->int_mask_class1_RW);
240 stat = in_be64(&spu->priv1->int_stat_class1_RW) & mask;
241 dar = in_be64(&spu->priv1->mfc_dar_RW);
242 dsisr = in_be64(&spu->priv1->mfc_dsisr_RW);
243 if (stat & 2) /* mapping fault */
244 out_be64(&spu->priv1->mfc_dsisr_RW, 0UL);
245 out_be64(&spu->priv1->int_stat_class1_RW, stat);
246 spin_unlock(&spu->register_lock);
248 if (stat & 1) /* segment fault */
249 __spu_trap_data_seg(spu, dar);
251 if (stat & 2) { /* mapping fault */
252 __spu_trap_data_map(spu, dar, dsisr);
255 if (stat & 4) /* ls compare & suspend on get */
258 if (stat & 8) /* ls compare & suspend on put */
261 return stat ? IRQ_HANDLED : IRQ_NONE;
263 EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
265 static irqreturn_t
266 spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
268 struct spu *spu;
269 unsigned long stat;
270 unsigned long mask;
272 spu = data;
273 stat = in_be64(&spu->priv1->int_stat_class2_RW);
274 mask = in_be64(&spu->priv1->int_mask_class2_RW);
276 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
278 stat &= mask;
280 if (stat & 1) /* PPC core mailbox */
281 __spu_trap_mailbox(spu);
283 if (stat & 2) /* SPU stop-and-signal */
284 __spu_trap_stop(spu);
286 if (stat & 4) /* SPU halted */
287 __spu_trap_halt(spu);
289 if (stat & 8) /* DMA tag group complete */
290 __spu_trap_tag_group(spu);
292 if (stat & 0x10) /* SPU mailbox threshold */
293 __spu_trap_spubox(spu);
295 out_be64(&spu->priv1->int_stat_class2_RW, stat);
296 return stat ? IRQ_HANDLED : IRQ_NONE;
299 static int
300 spu_request_irqs(struct spu *spu)
302 int ret;
303 int irq_base;
305 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
307 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
308 ret = request_irq(irq_base + spu->isrc,
309 spu_irq_class_0, 0, spu->irq_c0, spu);
310 if (ret)
311 goto out;
312 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
314 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
315 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
316 spu_irq_class_1, 0, spu->irq_c1, spu);
317 if (ret)
318 goto out1;
319 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
321 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
322 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
323 spu_irq_class_2, 0, spu->irq_c2, spu);
324 if (ret)
325 goto out2;
326 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
327 goto out;
329 out2:
330 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
331 out1:
332 free_irq(irq_base + spu->isrc, spu);
333 out:
334 return ret;
337 static void
338 spu_free_irqs(struct spu *spu)
340 int irq_base;
342 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
344 free_irq(irq_base + spu->isrc, spu);
345 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
346 free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
349 static LIST_HEAD(spu_list);
350 static DECLARE_MUTEX(spu_mutex);
352 static void spu_init_channels(struct spu *spu)
354 static const struct {
355 unsigned channel;
356 unsigned count;
357 } zero_list[] = {
358 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
359 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
360 }, count_list[] = {
361 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
362 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
363 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
365 struct spu_priv2 *priv2;
366 int i;
368 priv2 = spu->priv2;
370 /* initialize all channel data to zero */
371 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
372 int count;
374 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
375 for (count = 0; count < zero_list[i].count; count++)
376 out_be64(&priv2->spu_chnldata_RW, 0);
379 /* initialize channel counts to meaningful values */
380 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
381 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
382 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
386 static void spu_init_regs(struct spu *spu)
388 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
389 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
390 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
393 struct spu *spu_alloc(void)
395 struct spu *spu;
397 down(&spu_mutex);
398 if (!list_empty(&spu_list)) {
399 spu = list_entry(spu_list.next, struct spu, list);
400 list_del_init(&spu->list);
401 pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
402 } else {
403 pr_debug("No SPU left\n");
404 spu = NULL;
406 up(&spu_mutex);
408 if (spu) {
409 spu_init_channels(spu);
410 spu_init_regs(spu);
413 return spu;
415 EXPORT_SYMBOL_GPL(spu_alloc);
417 void spu_free(struct spu *spu)
419 down(&spu_mutex);
420 list_add_tail(&spu->list, &spu_list);
421 up(&spu_mutex);
423 EXPORT_SYMBOL_GPL(spu_free);
425 static int spu_handle_mm_fault(struct spu *spu)
427 struct mm_struct *mm = spu->mm;
428 struct vm_area_struct *vma;
429 u64 ea, dsisr, is_write;
430 int ret;
432 ea = spu->dar;
433 dsisr = spu->dsisr;
434 #if 0
435 if (!IS_VALID_EA(ea)) {
436 return -EFAULT;
438 #endif /* XXX */
439 if (mm == NULL) {
440 return -EFAULT;
442 if (mm->pgd == NULL) {
443 return -EFAULT;
446 down_read(&mm->mmap_sem);
447 vma = find_vma(mm, ea);
448 if (!vma)
449 goto bad_area;
450 if (vma->vm_start <= ea)
451 goto good_area;
452 if (!(vma->vm_flags & VM_GROWSDOWN))
453 goto bad_area;
454 #if 0
455 if (expand_stack(vma, ea))
456 goto bad_area;
457 #endif /* XXX */
458 good_area:
459 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
460 if (is_write) {
461 if (!(vma->vm_flags & VM_WRITE))
462 goto bad_area;
463 } else {
464 if (dsisr & MFC_DSISR_ACCESS_DENIED)
465 goto bad_area;
466 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
467 goto bad_area;
469 ret = 0;
470 switch (handle_mm_fault(mm, vma, ea, is_write)) {
471 case VM_FAULT_MINOR:
472 current->min_flt++;
473 break;
474 case VM_FAULT_MAJOR:
475 current->maj_flt++;
476 break;
477 case VM_FAULT_SIGBUS:
478 ret = -EFAULT;
479 goto bad_area;
480 case VM_FAULT_OOM:
481 ret = -ENOMEM;
482 goto bad_area;
483 default:
484 BUG();
486 up_read(&mm->mmap_sem);
487 return ret;
489 bad_area:
490 up_read(&mm->mmap_sem);
491 return -EFAULT;
494 int spu_irq_class_1_bottom(struct spu *spu)
496 u64 ea, dsisr, access, error = 0UL;
497 int ret = 0;
499 ea = spu->dar;
500 dsisr = spu->dsisr;
501 if (dsisr & MFC_DSISR_PTE_NOT_FOUND) {
502 access = (_PAGE_PRESENT | _PAGE_USER);
503 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
504 if (hash_page(ea, access, 0x300) != 0)
505 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
507 if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) ||
508 (dsisr & MFC_DSISR_ACCESS_DENIED)) {
509 if ((ret = spu_handle_mm_fault(spu)) != 0)
510 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
511 else
512 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
514 spu->dar = 0UL;
515 spu->dsisr = 0UL;
516 if (!error) {
517 spu_restart_dma(spu);
518 } else {
519 __spu_trap_invalid_dma(spu);
521 return ret;
524 static void __iomem * __init map_spe_prop(struct device_node *n,
525 const char *name)
527 struct address_prop {
528 unsigned long address;
529 unsigned int len;
530 } __attribute__((packed)) *prop;
532 void *p;
533 int proplen;
535 p = get_property(n, name, &proplen);
536 if (proplen != sizeof (struct address_prop))
537 return NULL;
539 prop = p;
541 return ioremap(prop->address, prop->len);
544 static void spu_unmap(struct spu *spu)
546 iounmap(spu->priv2);
547 iounmap(spu->priv1);
548 iounmap(spu->problem);
549 iounmap((u8 __iomem *)spu->local_store);
552 static int __init spu_map_device(struct spu *spu, struct device_node *spe)
554 char *prop;
555 int ret;
557 ret = -ENODEV;
558 prop = get_property(spe, "isrc", NULL);
559 if (!prop)
560 goto out;
561 spu->isrc = *(unsigned int *)prop;
563 spu->name = get_property(spe, "name", NULL);
564 if (!spu->name)
565 goto out;
567 prop = get_property(spe, "local-store", NULL);
568 if (!prop)
569 goto out;
570 spu->local_store_phys = *(unsigned long *)prop;
572 /* we use local store as ram, not io memory */
573 spu->local_store = (void __force *)map_spe_prop(spe, "local-store");
574 if (!spu->local_store)
575 goto out;
577 spu->problem= map_spe_prop(spe, "problem");
578 if (!spu->problem)
579 goto out_unmap;
581 spu->priv1= map_spe_prop(spe, "priv1");
582 if (!spu->priv1)
583 goto out_unmap;
585 spu->priv2= map_spe_prop(spe, "priv2");
586 if (!spu->priv2)
587 goto out_unmap;
588 ret = 0;
589 goto out;
591 out_unmap:
592 spu_unmap(spu);
593 out:
594 return ret;
597 static int __init find_spu_node_id(struct device_node *spe)
599 unsigned int *id;
600 struct device_node *cpu;
602 cpu = spe->parent->parent;
603 id = (unsigned int *)get_property(cpu, "node-id", NULL);
605 return id ? *id : 0;
608 static int __init create_spu(struct device_node *spe)
610 struct spu *spu;
611 int ret;
612 static int number;
614 ret = -ENOMEM;
615 spu = kmalloc(sizeof (*spu), GFP_KERNEL);
616 if (!spu)
617 goto out;
619 ret = spu_map_device(spu, spe);
620 if (ret)
621 goto out_free;
623 spu->node = find_spu_node_id(spe);
624 spu->stop_code = 0;
625 spu->slb_replace = 0;
626 spu->mm = NULL;
627 spu->ctx = NULL;
628 spu->rq = NULL;
629 spu->pid = 0;
630 spu->class_0_pending = 0;
631 spu->flags = 0UL;
632 spu->dar = 0UL;
633 spu->dsisr = 0UL;
634 spin_lock_init(&spu->register_lock);
636 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
637 out_be64(&spu->priv1->mfc_sr1_RW, 0x33);
639 spu->ibox_callback = NULL;
640 spu->wbox_callback = NULL;
641 spu->stop_callback = NULL;
643 down(&spu_mutex);
644 spu->number = number++;
645 ret = spu_request_irqs(spu);
646 if (ret)
647 goto out_unmap;
649 list_add(&spu->list, &spu_list);
650 up(&spu_mutex);
652 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
653 spu->name, spu->isrc, spu->local_store,
654 spu->problem, spu->priv1, spu->priv2, spu->number);
655 goto out;
657 out_unmap:
658 up(&spu_mutex);
659 spu_unmap(spu);
660 out_free:
661 kfree(spu);
662 out:
663 return ret;
666 static void destroy_spu(struct spu *spu)
668 list_del_init(&spu->list);
670 spu_free_irqs(spu);
671 spu_unmap(spu);
672 kfree(spu);
675 static void cleanup_spu_base(void)
677 struct spu *spu, *tmp;
678 down(&spu_mutex);
679 list_for_each_entry_safe(spu, tmp, &spu_list, list)
680 destroy_spu(spu);
681 up(&spu_mutex);
683 module_exit(cleanup_spu_base);
685 static int __init init_spu_base(void)
687 struct device_node *node;
688 int ret;
690 ret = -ENODEV;
691 for (node = of_find_node_by_type(NULL, "spe");
692 node; node = of_find_node_by_type(node, "spe")) {
693 ret = create_spu(node);
694 if (ret) {
695 printk(KERN_WARNING "%s: Error initializing %s\n",
696 __FUNCTION__, node->name);
697 cleanup_spu_base();
698 break;
701 /* in some old firmware versions, the spe is called 'spc', so we
702 look for that as well */
703 for (node = of_find_node_by_type(NULL, "spc");
704 node; node = of_find_node_by_type(node, "spc")) {
705 ret = create_spu(node);
706 if (ret) {
707 printk(KERN_WARNING "%s: Error initializing %s\n",
708 __FUNCTION__, node->name);
709 cleanup_spu_base();
710 break;
713 return ret;
715 module_init(init_spu_base);
717 MODULE_LICENSE("GPL");
718 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");