2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/poll.h>
29 #include <linux/ptrace.h>
30 #include <linux/slab.h>
31 #include <linux/wait.h>
35 #include <linux/mutex.h>
37 #include <asm/spu_priv1.h>
38 #include <asm/mmu_context.h>
40 #include "interrupt.h"
42 const struct spu_priv1_ops
*spu_priv1_ops
;
44 EXPORT_SYMBOL_GPL(spu_priv1_ops
);
46 static int __spu_trap_invalid_dma(struct spu
*spu
)
48 pr_debug("%s\n", __FUNCTION__
);
49 force_sig(SIGBUS
, /* info, */ current
);
53 static int __spu_trap_dma_align(struct spu
*spu
)
55 pr_debug("%s\n", __FUNCTION__
);
56 force_sig(SIGBUS
, /* info, */ current
);
60 static int __spu_trap_error(struct spu
*spu
)
62 pr_debug("%s\n", __FUNCTION__
);
63 force_sig(SIGILL
, /* info, */ current
);
67 static void spu_restart_dma(struct spu
*spu
)
69 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
71 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING
, &spu
->flags
))
72 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_RESTART_DMA_COMMAND
);
75 static int __spu_trap_data_seg(struct spu
*spu
, unsigned long ea
)
77 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
78 struct mm_struct
*mm
= spu
->mm
;
81 pr_debug("%s\n", __FUNCTION__
);
83 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
)) {
84 /* SLBs are pre-loaded for context switch, so
85 * we should never get here!
87 printk("%s: invalid access during switch!\n", __func__
);
90 if (!mm
|| (REGION_ID(ea
) != USER_REGION_ID
)) {
91 /* Future: support kernel segments so that drivers
94 pr_debug("invalid region access at %016lx\n", ea
);
98 esid
= (ea
& ESID_MASK
) | SLB_ESID_V
;
99 #ifdef CONFIG_HUGETLB_PAGE
100 if (in_hugepage_area(mm
->context
, ea
))
101 llp
= mmu_psize_defs
[mmu_huge_psize
].sllp
;
104 llp
= mmu_psize_defs
[mmu_virtual_psize
].sllp
;
105 vsid
= (get_vsid(mm
->context
.id
, ea
) << SLB_VSID_SHIFT
) |
108 out_be64(&priv2
->slb_index_W
, spu
->slb_replace
);
109 out_be64(&priv2
->slb_vsid_RW
, vsid
);
110 out_be64(&priv2
->slb_esid_RW
, esid
);
113 if (spu
->slb_replace
>= 8)
114 spu
->slb_replace
= 0;
116 spu_restart_dma(spu
);
121 extern int hash_page(unsigned long ea
, unsigned long access
, unsigned long trap
); //XXX
122 static int __spu_trap_data_map(struct spu
*spu
, unsigned long ea
, u64 dsisr
)
124 pr_debug("%s, %lx, %lx\n", __FUNCTION__
, dsisr
, ea
);
126 /* Handle kernel space hash faults immediately.
127 User hash faults need to be deferred to process context. */
128 if ((dsisr
& MFC_DSISR_PTE_NOT_FOUND
)
129 && REGION_ID(ea
) != USER_REGION_ID
130 && hash_page(ea
, _PAGE_PRESENT
, 0x300) == 0) {
131 spu_restart_dma(spu
);
135 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
)) {
136 printk("%s: invalid access during switch!\n", __func__
);
143 spu
->stop_callback(spu
);
148 spu_irq_class_0(int irq
, void *data
, struct pt_regs
*regs
)
153 spu
->class_0_pending
= 1;
154 spu
->stop_callback(spu
);
160 spu_irq_class_0_bottom(struct spu
*spu
)
162 unsigned long stat
, mask
;
164 spu
->class_0_pending
= 0;
166 mask
= spu_int_mask_get(spu
, 0);
167 stat
= spu_int_stat_get(spu
, 0);
171 if (stat
& 1) /* invalid MFC DMA */
172 __spu_trap_invalid_dma(spu
);
174 if (stat
& 2) /* invalid DMA alignment */
175 __spu_trap_dma_align(spu
);
177 if (stat
& 4) /* error on SPU */
178 __spu_trap_error(spu
);
180 spu_int_stat_clear(spu
, 0, stat
);
182 return (stat
& 0x7) ? -EIO
: 0;
184 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom
);
187 spu_irq_class_1(int irq
, void *data
, struct pt_regs
*regs
)
190 unsigned long stat
, mask
, dar
, dsisr
;
194 /* atomically read & clear class1 status. */
195 spin_lock(&spu
->register_lock
);
196 mask
= spu_int_mask_get(spu
, 1);
197 stat
= spu_int_stat_get(spu
, 1) & mask
;
198 dar
= spu_mfc_dar_get(spu
);
199 dsisr
= spu_mfc_dsisr_get(spu
);
200 if (stat
& 2) /* mapping fault */
201 spu_mfc_dsisr_set(spu
, 0ul);
202 spu_int_stat_clear(spu
, 1, stat
);
203 spin_unlock(&spu
->register_lock
);
204 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__
, mask
, stat
,
207 if (stat
& 1) /* segment fault */
208 __spu_trap_data_seg(spu
, dar
);
210 if (stat
& 2) { /* mapping fault */
211 __spu_trap_data_map(spu
, dar
, dsisr
);
214 if (stat
& 4) /* ls compare & suspend on get */
217 if (stat
& 8) /* ls compare & suspend on put */
220 return stat
? IRQ_HANDLED
: IRQ_NONE
;
222 EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom
);
225 spu_irq_class_2(int irq
, void *data
, struct pt_regs
*regs
)
232 spin_lock(&spu
->register_lock
);
233 stat
= spu_int_stat_get(spu
, 2);
234 mask
= spu_int_mask_get(spu
, 2);
235 /* ignore interrupts we're not waiting for */
238 * mailbox interrupts (0x1 and 0x10) are level triggered.
239 * mask them now before acknowledging.
242 spu_int_mask_and(spu
, 2, ~(stat
& 0x11));
243 /* acknowledge all interrupts before the callbacks */
244 spu_int_stat_clear(spu
, 2, stat
);
245 spin_unlock(&spu
->register_lock
);
247 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq
, stat
, mask
);
249 if (stat
& 1) /* PPC core mailbox */
250 spu
->ibox_callback(spu
);
252 if (stat
& 2) /* SPU stop-and-signal */
253 spu
->stop_callback(spu
);
255 if (stat
& 4) /* SPU halted */
256 spu
->stop_callback(spu
);
258 if (stat
& 8) /* DMA tag group complete */
259 spu
->mfc_callback(spu
);
261 if (stat
& 0x10) /* SPU mailbox threshold */
262 spu
->wbox_callback(spu
);
264 return stat
? IRQ_HANDLED
: IRQ_NONE
;
268 spu_request_irqs(struct spu
*spu
)
273 irq_base
= IIC_NODE_STRIDE
* spu
->node
+ IIC_SPE_OFFSET
;
275 snprintf(spu
->irq_c0
, sizeof (spu
->irq_c0
), "spe%02d.0", spu
->number
);
276 ret
= request_irq(irq_base
+ spu
->isrc
,
277 spu_irq_class_0
, SA_INTERRUPT
, spu
->irq_c0
, spu
);
281 snprintf(spu
->irq_c1
, sizeof (spu
->irq_c1
), "spe%02d.1", spu
->number
);
282 ret
= request_irq(irq_base
+ IIC_CLASS_STRIDE
+ spu
->isrc
,
283 spu_irq_class_1
, SA_INTERRUPT
, spu
->irq_c1
, spu
);
287 snprintf(spu
->irq_c2
, sizeof (spu
->irq_c2
), "spe%02d.2", spu
->number
);
288 ret
= request_irq(irq_base
+ 2*IIC_CLASS_STRIDE
+ spu
->isrc
,
289 spu_irq_class_2
, SA_INTERRUPT
, spu
->irq_c2
, spu
);
295 free_irq(irq_base
+ IIC_CLASS_STRIDE
+ spu
->isrc
, spu
);
297 free_irq(irq_base
+ spu
->isrc
, spu
);
303 spu_free_irqs(struct spu
*spu
)
307 irq_base
= IIC_NODE_STRIDE
* spu
->node
+ IIC_SPE_OFFSET
;
309 free_irq(irq_base
+ spu
->isrc
, spu
);
310 free_irq(irq_base
+ IIC_CLASS_STRIDE
+ spu
->isrc
, spu
);
311 free_irq(irq_base
+ 2*IIC_CLASS_STRIDE
+ spu
->isrc
, spu
);
314 static LIST_HEAD(spu_list
);
315 static DEFINE_MUTEX(spu_mutex
);
317 static void spu_init_channels(struct spu
*spu
)
319 static const struct {
323 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
324 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
326 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
327 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
328 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
330 struct spu_priv2 __iomem
*priv2
;
335 /* initialize all channel data to zero */
336 for (i
= 0; i
< ARRAY_SIZE(zero_list
); i
++) {
339 out_be64(&priv2
->spu_chnlcntptr_RW
, zero_list
[i
].channel
);
340 for (count
= 0; count
< zero_list
[i
].count
; count
++)
341 out_be64(&priv2
->spu_chnldata_RW
, 0);
344 /* initialize channel counts to meaningful values */
345 for (i
= 0; i
< ARRAY_SIZE(count_list
); i
++) {
346 out_be64(&priv2
->spu_chnlcntptr_RW
, count_list
[i
].channel
);
347 out_be64(&priv2
->spu_chnlcnt_RW
, count_list
[i
].count
);
351 struct spu
*spu_alloc(void)
355 mutex_lock(&spu_mutex
);
356 if (!list_empty(&spu_list
)) {
357 spu
= list_entry(spu_list
.next
, struct spu
, list
);
358 list_del_init(&spu
->list
);
359 pr_debug("Got SPU %x %d\n", spu
->isrc
, spu
->number
);
361 pr_debug("No SPU left\n");
364 mutex_unlock(&spu_mutex
);
367 spu_init_channels(spu
);
371 EXPORT_SYMBOL_GPL(spu_alloc
);
373 void spu_free(struct spu
*spu
)
375 mutex_lock(&spu_mutex
);
376 list_add_tail(&spu
->list
, &spu_list
);
377 mutex_unlock(&spu_mutex
);
379 EXPORT_SYMBOL_GPL(spu_free
);
381 static int spu_handle_mm_fault(struct spu
*spu
)
383 struct mm_struct
*mm
= spu
->mm
;
384 struct vm_area_struct
*vma
;
385 u64 ea
, dsisr
, is_write
;
391 if (!IS_VALID_EA(ea
)) {
398 if (mm
->pgd
== NULL
) {
402 down_read(&mm
->mmap_sem
);
403 vma
= find_vma(mm
, ea
);
406 if (vma
->vm_start
<= ea
)
408 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
411 if (expand_stack(vma
, ea
))
415 is_write
= dsisr
& MFC_DSISR_ACCESS_PUT
;
417 if (!(vma
->vm_flags
& VM_WRITE
))
420 if (dsisr
& MFC_DSISR_ACCESS_DENIED
)
422 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
426 switch (handle_mm_fault(mm
, vma
, ea
, is_write
)) {
433 case VM_FAULT_SIGBUS
:
442 up_read(&mm
->mmap_sem
);
446 up_read(&mm
->mmap_sem
);
450 int spu_irq_class_1_bottom(struct spu
*spu
)
452 u64 ea
, dsisr
, access
, error
= 0UL;
457 if (dsisr
& (MFC_DSISR_PTE_NOT_FOUND
| MFC_DSISR_ACCESS_DENIED
)) {
460 access
= (_PAGE_PRESENT
| _PAGE_USER
);
461 access
|= (dsisr
& MFC_DSISR_ACCESS_PUT
) ? _PAGE_RW
: 0UL;
462 local_irq_save(flags
);
463 if (hash_page(ea
, access
, 0x300) != 0)
464 error
|= CLASS1_ENABLE_STORAGE_FAULT_INTR
;
465 local_irq_restore(flags
);
467 if (error
& CLASS1_ENABLE_STORAGE_FAULT_INTR
) {
468 if ((ret
= spu_handle_mm_fault(spu
)) != 0)
469 error
|= CLASS1_ENABLE_STORAGE_FAULT_INTR
;
471 error
&= ~CLASS1_ENABLE_STORAGE_FAULT_INTR
;
476 spu_restart_dma(spu
);
478 __spu_trap_invalid_dma(spu
);
483 static int __init
find_spu_node_id(struct device_node
*spe
)
486 struct device_node
*cpu
;
487 cpu
= spe
->parent
->parent
;
488 id
= (unsigned int *)get_property(cpu
, "node-id", NULL
);
492 static int __init
cell_spuprop_present(struct spu
*spu
, struct device_node
*spe
,
495 static DEFINE_MUTEX(add_spumem_mutex
);
497 struct address_prop
{
498 unsigned long address
;
500 } __attribute__((packed
)) *p
;
503 unsigned long start_pfn
, nr_pages
;
504 struct pglist_data
*pgdata
;
508 p
= (void*)get_property(spe
, prop
, &proplen
);
509 WARN_ON(proplen
!= sizeof (*p
));
511 start_pfn
= p
->address
>> PAGE_SHIFT
;
512 nr_pages
= ((unsigned long)p
->len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
514 pgdata
= NODE_DATA(spu
->nid
);
515 zone
= pgdata
->node_zones
;
517 /* XXX rethink locking here */
518 mutex_lock(&add_spumem_mutex
);
519 ret
= __add_pages(zone
, start_pfn
, nr_pages
);
520 mutex_unlock(&add_spumem_mutex
);
525 static void __iomem
* __init
map_spe_prop(struct spu
*spu
,
526 struct device_node
*n
, const char *name
)
528 struct address_prop
{
529 unsigned long address
;
531 } __attribute__((packed
)) *prop
;
538 p
= get_property(n
, name
, &proplen
);
539 if (proplen
!= sizeof (struct address_prop
))
544 err
= cell_spuprop_present(spu
, n
, name
);
545 if (err
&& (err
!= -EEXIST
))
548 ret
= ioremap(prop
->address
, prop
->len
);
554 static void spu_unmap(struct spu
*spu
)
558 iounmap(spu
->problem
);
559 iounmap((u8 __iomem
*)spu
->local_store
);
562 static int __init
spu_map_device(struct spu
*spu
, struct device_node
*node
)
568 prop
= get_property(node
, "isrc", NULL
);
571 spu
->isrc
= *(unsigned int *)prop
;
573 spu
->name
= get_property(node
, "name", NULL
);
577 prop
= get_property(node
, "local-store", NULL
);
580 spu
->local_store_phys
= *(unsigned long *)prop
;
582 /* we use local store as ram, not io memory */
583 spu
->local_store
= (void __force
*)
584 map_spe_prop(spu
, node
, "local-store");
585 if (!spu
->local_store
)
588 prop
= get_property(node
, "problem", NULL
);
591 spu
->problem_phys
= *(unsigned long *)prop
;
593 spu
->problem
= map_spe_prop(spu
, node
, "problem");
597 spu
->priv1
= map_spe_prop(spu
, node
, "priv1");
598 /* priv1 is not available on a hypervisor */
600 spu
->priv2
= map_spe_prop(spu
, node
, "priv2");
612 struct sysdev_class spu_sysdev_class
= {
616 static ssize_t
spu_show_isrc(struct sys_device
*sysdev
, char *buf
)
618 struct spu
*spu
= container_of(sysdev
, struct spu
, sysdev
);
619 return sprintf(buf
, "%d\n", spu
->isrc
);
622 static SYSDEV_ATTR(isrc
, 0400, spu_show_isrc
, NULL
);
624 extern int attach_sysdev_to_node(struct sys_device
*dev
, int nid
);
626 static int spu_create_sysdev(struct spu
*spu
)
630 spu
->sysdev
.id
= spu
->number
;
631 spu
->sysdev
.cls
= &spu_sysdev_class
;
632 ret
= sysdev_register(&spu
->sysdev
);
634 printk(KERN_ERR
"Can't register SPU %d with sysfs\n",
639 sysdev_create_file(&spu
->sysdev
, &attr_isrc
);
640 sysfs_add_device_to_node(&spu
->sysdev
, spu
->nid
);
645 static void spu_destroy_sysdev(struct spu
*spu
)
647 sysdev_remove_file(&spu
->sysdev
, &attr_isrc
);
648 sysfs_remove_device_from_node(&spu
->sysdev
, spu
->nid
);
649 sysdev_unregister(&spu
->sysdev
);
652 static int __init
create_spu(struct device_node
*spe
)
659 spu
= kzalloc(sizeof (*spu
), GFP_KERNEL
);
663 ret
= spu_map_device(spu
, spe
);
667 spu
->node
= find_spu_node_id(spe
);
668 spu
->nid
= of_node_to_nid(spe
);
671 spin_lock_init(&spu
->register_lock
);
672 spu_mfc_sdr_set(spu
, mfspr(SPRN_SDR1
));
673 spu_mfc_sr1_set(spu
, 0x33);
674 mutex_lock(&spu_mutex
);
676 spu
->number
= number
++;
677 ret
= spu_request_irqs(spu
);
681 ret
= spu_create_sysdev(spu
);
685 list_add(&spu
->list
, &spu_list
);
686 mutex_unlock(&spu_mutex
);
688 pr_debug(KERN_DEBUG
"Using SPE %s %02x %p %p %p %p %d\n",
689 spu
->name
, spu
->isrc
, spu
->local_store
,
690 spu
->problem
, spu
->priv1
, spu
->priv2
, spu
->number
);
697 mutex_unlock(&spu_mutex
);
705 static void destroy_spu(struct spu
*spu
)
707 list_del_init(&spu
->list
);
709 spu_destroy_sysdev(spu
);
715 static void cleanup_spu_base(void)
717 struct spu
*spu
, *tmp
;
718 mutex_lock(&spu_mutex
);
719 list_for_each_entry_safe(spu
, tmp
, &spu_list
, list
)
721 mutex_unlock(&spu_mutex
);
722 sysdev_class_unregister(&spu_sysdev_class
);
724 module_exit(cleanup_spu_base
);
726 static int __init
init_spu_base(void)
728 struct device_node
*node
;
731 /* create sysdev class for spus */
732 ret
= sysdev_class_register(&spu_sysdev_class
);
737 for (node
= of_find_node_by_type(NULL
, "spe");
738 node
; node
= of_find_node_by_type(node
, "spe")) {
739 ret
= create_spu(node
);
741 printk(KERN_WARNING
"%s: Error initializing %s\n",
742 __FUNCTION__
, node
->name
);
747 /* in some old firmware versions, the spe is called 'spc', so we
748 look for that as well */
749 for (node
= of_find_node_by_type(NULL
, "spc");
750 node
; node
= of_find_node_by_type(node
, "spc")) {
751 ret
= create_spu(node
);
753 printk(KERN_WARNING
"%s: Error initializing %s\n",
754 __FUNCTION__
, node
->name
);
761 module_init(init_spu_base
);
763 MODULE_LICENSE("GPL");
764 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");