2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/slab.h>
32 #include <linux/wait.h>
34 #include <asm/firmware.h>
37 #include <linux/mutex.h>
39 #include <asm/spu_priv1.h>
40 #include <asm/mmu_context.h>
42 #include "interrupt.h"
44 const struct spu_priv1_ops
*spu_priv1_ops
;
46 EXPORT_SYMBOL_GPL(spu_priv1_ops
);
48 static int __spu_trap_invalid_dma(struct spu
*spu
)
50 pr_debug("%s\n", __FUNCTION__
);
51 spu
->dma_callback(spu
, SPE_EVENT_INVALID_DMA
);
55 static int __spu_trap_dma_align(struct spu
*spu
)
57 pr_debug("%s\n", __FUNCTION__
);
58 spu
->dma_callback(spu
, SPE_EVENT_DMA_ALIGNMENT
);
62 static int __spu_trap_error(struct spu
*spu
)
64 pr_debug("%s\n", __FUNCTION__
);
65 spu
->dma_callback(spu
, SPE_EVENT_SPE_ERROR
);
69 static void spu_restart_dma(struct spu
*spu
)
71 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
73 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING
, &spu
->flags
))
74 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_RESTART_DMA_COMMAND
);
77 static int __spu_trap_data_seg(struct spu
*spu
, unsigned long ea
)
79 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
80 struct mm_struct
*mm
= spu
->mm
;
83 pr_debug("%s\n", __FUNCTION__
);
85 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
)) {
86 /* SLBs are pre-loaded for context switch, so
87 * we should never get here!
89 printk("%s: invalid access during switch!\n", __func__
);
92 esid
= (ea
& ESID_MASK
) | SLB_ESID_V
;
94 switch(REGION_ID(ea
)) {
96 #ifdef CONFIG_HUGETLB_PAGE
97 if (in_hugepage_area(mm
->context
, ea
))
98 llp
= mmu_psize_defs
[mmu_huge_psize
].sllp
;
101 llp
= mmu_psize_defs
[mmu_virtual_psize
].sllp
;
102 vsid
= (get_vsid(mm
->context
.id
, ea
) << SLB_VSID_SHIFT
) |
105 case VMALLOC_REGION_ID
:
106 llp
= mmu_psize_defs
[mmu_virtual_psize
].sllp
;
107 vsid
= (get_kernel_vsid(ea
) << SLB_VSID_SHIFT
) |
108 SLB_VSID_KERNEL
| llp
;
110 case KERNEL_REGION_ID
:
111 llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
112 vsid
= (get_kernel_vsid(ea
) << SLB_VSID_SHIFT
) |
113 SLB_VSID_KERNEL
| llp
;
116 /* Future: support kernel segments so that drivers
119 pr_debug("invalid region access at %016lx\n", ea
);
123 out_be64(&priv2
->slb_index_W
, spu
->slb_replace
);
124 out_be64(&priv2
->slb_vsid_RW
, vsid
);
125 out_be64(&priv2
->slb_esid_RW
, esid
);
128 if (spu
->slb_replace
>= 8)
129 spu
->slb_replace
= 0;
131 spu_restart_dma(spu
);
136 extern int hash_page(unsigned long ea
, unsigned long access
, unsigned long trap
); //XXX
137 static int __spu_trap_data_map(struct spu
*spu
, unsigned long ea
, u64 dsisr
)
139 pr_debug("%s, %lx, %lx\n", __FUNCTION__
, dsisr
, ea
);
141 /* Handle kernel space hash faults immediately.
142 User hash faults need to be deferred to process context. */
143 if ((dsisr
& MFC_DSISR_PTE_NOT_FOUND
)
144 && REGION_ID(ea
) != USER_REGION_ID
145 && hash_page(ea
, _PAGE_PRESENT
, 0x300) == 0) {
146 spu_restart_dma(spu
);
150 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
)) {
151 printk("%s: invalid access during switch!\n", __func__
);
158 spu
->stop_callback(spu
);
163 spu_irq_class_0(int irq
, void *data
)
168 spu
->class_0_pending
= 1;
169 spu
->stop_callback(spu
);
175 spu_irq_class_0_bottom(struct spu
*spu
)
177 unsigned long stat
, mask
;
179 spu
->class_0_pending
= 0;
181 mask
= spu_int_mask_get(spu
, 0);
182 stat
= spu_int_stat_get(spu
, 0);
186 if (stat
& 1) /* invalid DMA alignment */
187 __spu_trap_dma_align(spu
);
189 if (stat
& 2) /* invalid MFC DMA */
190 __spu_trap_invalid_dma(spu
);
192 if (stat
& 4) /* error on SPU */
193 __spu_trap_error(spu
);
195 spu_int_stat_clear(spu
, 0, stat
);
197 return (stat
& 0x7) ? -EIO
: 0;
199 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom
);
202 spu_irq_class_1(int irq
, void *data
)
205 unsigned long stat
, mask
, dar
, dsisr
;
209 /* atomically read & clear class1 status. */
210 spin_lock(&spu
->register_lock
);
211 mask
= spu_int_mask_get(spu
, 1);
212 stat
= spu_int_stat_get(spu
, 1) & mask
;
213 dar
= spu_mfc_dar_get(spu
);
214 dsisr
= spu_mfc_dsisr_get(spu
);
215 if (stat
& 2) /* mapping fault */
216 spu_mfc_dsisr_set(spu
, 0ul);
217 spu_int_stat_clear(spu
, 1, stat
);
218 spin_unlock(&spu
->register_lock
);
219 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__
, mask
, stat
,
222 if (stat
& 1) /* segment fault */
223 __spu_trap_data_seg(spu
, dar
);
225 if (stat
& 2) { /* mapping fault */
226 __spu_trap_data_map(spu
, dar
, dsisr
);
229 if (stat
& 4) /* ls compare & suspend on get */
232 if (stat
& 8) /* ls compare & suspend on put */
235 return stat
? IRQ_HANDLED
: IRQ_NONE
;
237 EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom
);
240 spu_irq_class_2(int irq
, void *data
)
247 spin_lock(&spu
->register_lock
);
248 stat
= spu_int_stat_get(spu
, 2);
249 mask
= spu_int_mask_get(spu
, 2);
250 /* ignore interrupts we're not waiting for */
253 * mailbox interrupts (0x1 and 0x10) are level triggered.
254 * mask them now before acknowledging.
257 spu_int_mask_and(spu
, 2, ~(stat
& 0x11));
258 /* acknowledge all interrupts before the callbacks */
259 spu_int_stat_clear(spu
, 2, stat
);
260 spin_unlock(&spu
->register_lock
);
262 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq
, stat
, mask
);
264 if (stat
& 1) /* PPC core mailbox */
265 spu
->ibox_callback(spu
);
267 if (stat
& 2) /* SPU stop-and-signal */
268 spu
->stop_callback(spu
);
270 if (stat
& 4) /* SPU halted */
271 spu
->stop_callback(spu
);
273 if (stat
& 8) /* DMA tag group complete */
274 spu
->mfc_callback(spu
);
276 if (stat
& 0x10) /* SPU mailbox threshold */
277 spu
->wbox_callback(spu
);
279 return stat
? IRQ_HANDLED
: IRQ_NONE
;
282 static int spu_request_irqs(struct spu
*spu
)
286 if (spu
->irqs
[0] != NO_IRQ
) {
287 snprintf(spu
->irq_c0
, sizeof (spu
->irq_c0
), "spe%02d.0",
289 ret
= request_irq(spu
->irqs
[0], spu_irq_class_0
,
295 if (spu
->irqs
[1] != NO_IRQ
) {
296 snprintf(spu
->irq_c1
, sizeof (spu
->irq_c1
), "spe%02d.1",
298 ret
= request_irq(spu
->irqs
[1], spu_irq_class_1
,
304 if (spu
->irqs
[2] != NO_IRQ
) {
305 snprintf(spu
->irq_c2
, sizeof (spu
->irq_c2
), "spe%02d.2",
307 ret
= request_irq(spu
->irqs
[2], spu_irq_class_2
,
316 if (spu
->irqs
[1] != NO_IRQ
)
317 free_irq(spu
->irqs
[1], spu
);
319 if (spu
->irqs
[0] != NO_IRQ
)
320 free_irq(spu
->irqs
[0], spu
);
325 static void spu_free_irqs(struct spu
*spu
)
327 if (spu
->irqs
[0] != NO_IRQ
)
328 free_irq(spu
->irqs
[0], spu
);
329 if (spu
->irqs
[1] != NO_IRQ
)
330 free_irq(spu
->irqs
[1], spu
);
331 if (spu
->irqs
[2] != NO_IRQ
)
332 free_irq(spu
->irqs
[2], spu
);
335 static struct list_head spu_list
[MAX_NUMNODES
];
336 static DEFINE_MUTEX(spu_mutex
);
338 static void spu_init_channels(struct spu
*spu
)
340 static const struct {
344 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
345 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
347 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
348 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
349 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
351 struct spu_priv2 __iomem
*priv2
;
356 /* initialize all channel data to zero */
357 for (i
= 0; i
< ARRAY_SIZE(zero_list
); i
++) {
360 out_be64(&priv2
->spu_chnlcntptr_RW
, zero_list
[i
].channel
);
361 for (count
= 0; count
< zero_list
[i
].count
; count
++)
362 out_be64(&priv2
->spu_chnldata_RW
, 0);
365 /* initialize channel counts to meaningful values */
366 for (i
= 0; i
< ARRAY_SIZE(count_list
); i
++) {
367 out_be64(&priv2
->spu_chnlcntptr_RW
, count_list
[i
].channel
);
368 out_be64(&priv2
->spu_chnlcnt_RW
, count_list
[i
].count
);
372 struct spu
*spu_alloc_node(int node
)
374 struct spu
*spu
= NULL
;
376 mutex_lock(&spu_mutex
);
377 if (!list_empty(&spu_list
[node
])) {
378 spu
= list_entry(spu_list
[node
].next
, struct spu
, list
);
379 list_del_init(&spu
->list
);
380 pr_debug("Got SPU %d %d\n", spu
->number
, spu
->node
);
381 spu_init_channels(spu
);
383 mutex_unlock(&spu_mutex
);
387 EXPORT_SYMBOL_GPL(spu_alloc_node
);
389 struct spu
*spu_alloc(void)
391 struct spu
*spu
= NULL
;
394 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
395 spu
= spu_alloc_node(node
);
403 void spu_free(struct spu
*spu
)
405 mutex_lock(&spu_mutex
);
406 list_add_tail(&spu
->list
, &spu_list
[spu
->node
]);
407 mutex_unlock(&spu_mutex
);
409 EXPORT_SYMBOL_GPL(spu_free
);
411 static int spu_handle_mm_fault(struct spu
*spu
)
413 struct mm_struct
*mm
= spu
->mm
;
414 struct vm_area_struct
*vma
;
415 u64 ea
, dsisr
, is_write
;
421 if (!IS_VALID_EA(ea
)) {
428 if (mm
->pgd
== NULL
) {
432 down_read(&mm
->mmap_sem
);
433 vma
= find_vma(mm
, ea
);
436 if (vma
->vm_start
<= ea
)
438 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
441 if (expand_stack(vma
, ea
))
445 is_write
= dsisr
& MFC_DSISR_ACCESS_PUT
;
447 if (!(vma
->vm_flags
& VM_WRITE
))
450 if (dsisr
& MFC_DSISR_ACCESS_DENIED
)
452 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
456 switch (handle_mm_fault(mm
, vma
, ea
, is_write
)) {
463 case VM_FAULT_SIGBUS
:
472 up_read(&mm
->mmap_sem
);
476 up_read(&mm
->mmap_sem
);
480 int spu_irq_class_1_bottom(struct spu
*spu
)
482 u64 ea
, dsisr
, access
, error
= 0UL;
487 if (dsisr
& (MFC_DSISR_PTE_NOT_FOUND
| MFC_DSISR_ACCESS_DENIED
)) {
490 access
= (_PAGE_PRESENT
| _PAGE_USER
);
491 access
|= (dsisr
& MFC_DSISR_ACCESS_PUT
) ? _PAGE_RW
: 0UL;
492 local_irq_save(flags
);
493 if (hash_page(ea
, access
, 0x300) != 0)
494 error
|= CLASS1_ENABLE_STORAGE_FAULT_INTR
;
495 local_irq_restore(flags
);
497 if (error
& CLASS1_ENABLE_STORAGE_FAULT_INTR
) {
498 if ((ret
= spu_handle_mm_fault(spu
)) != 0)
499 error
|= CLASS1_ENABLE_STORAGE_FAULT_INTR
;
501 error
&= ~CLASS1_ENABLE_STORAGE_FAULT_INTR
;
506 spu_restart_dma(spu
);
508 __spu_trap_invalid_dma(spu
);
513 static int __init
find_spu_node_id(struct device_node
*spe
)
515 const unsigned int *id
;
516 struct device_node
*cpu
;
517 cpu
= spe
->parent
->parent
;
518 id
= get_property(cpu
, "node-id", NULL
);
522 static int __init
cell_spuprop_present(struct spu
*spu
, struct device_node
*spe
,
525 static DEFINE_MUTEX(add_spumem_mutex
);
527 const struct address_prop
{
528 unsigned long address
;
530 } __attribute__((packed
)) *p
;
533 unsigned long start_pfn
, nr_pages
;
534 struct pglist_data
*pgdata
;
538 p
= get_property(spe
, prop
, &proplen
);
539 WARN_ON(proplen
!= sizeof (*p
));
541 start_pfn
= p
->address
>> PAGE_SHIFT
;
542 nr_pages
= ((unsigned long)p
->len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
544 pgdata
= NODE_DATA(spu
->nid
);
545 zone
= pgdata
->node_zones
;
547 /* XXX rethink locking here */
548 mutex_lock(&add_spumem_mutex
);
549 ret
= __add_pages(zone
, start_pfn
, nr_pages
);
550 mutex_unlock(&add_spumem_mutex
);
555 static void __iomem
* __init
map_spe_prop(struct spu
*spu
,
556 struct device_node
*n
, const char *name
)
558 const struct address_prop
{
559 unsigned long address
;
561 } __attribute__((packed
)) *prop
;
565 void __iomem
*ret
= NULL
;
568 p
= get_property(n
, name
, &proplen
);
569 if (proplen
!= sizeof (struct address_prop
))
574 err
= cell_spuprop_present(spu
, n
, name
);
575 if (err
&& (err
!= -EEXIST
))
578 ret
= ioremap(prop
->address
, prop
->len
);
584 static void spu_unmap(struct spu
*spu
)
588 iounmap(spu
->problem
);
589 iounmap((__force u8 __iomem
*)spu
->local_store
);
592 /* This function shall be abstracted for HV platforms */
593 static int __init
spu_map_interrupts_old(struct spu
*spu
, struct device_node
*np
)
598 /* Get the interrupt source unit from the device-tree */
599 tmp
= get_property(np
, "isrc", NULL
);
604 /* Add the node number */
605 isrc
|= spu
->node
<< IIC_IRQ_NODE_SHIFT
;
607 /* Now map interrupts of all 3 classes */
608 spu
->irqs
[0] = irq_create_mapping(NULL
, IIC_IRQ_CLASS_0
| isrc
);
609 spu
->irqs
[1] = irq_create_mapping(NULL
, IIC_IRQ_CLASS_1
| isrc
);
610 spu
->irqs
[2] = irq_create_mapping(NULL
, IIC_IRQ_CLASS_2
| isrc
);
612 /* Right now, we only fail if class 2 failed */
613 return spu
->irqs
[2] == NO_IRQ
? -EINVAL
: 0;
616 static int __init
spu_map_device_old(struct spu
*spu
, struct device_node
*node
)
622 spu
->name
= get_property(node
, "name", NULL
);
626 prop
= get_property(node
, "local-store", NULL
);
629 spu
->local_store_phys
= *(unsigned long *)prop
;
631 /* we use local store as ram, not io memory */
632 spu
->local_store
= (void __force
*)
633 map_spe_prop(spu
, node
, "local-store");
634 if (!spu
->local_store
)
637 prop
= get_property(node
, "problem", NULL
);
640 spu
->problem_phys
= *(unsigned long *)prop
;
642 spu
->problem
= map_spe_prop(spu
, node
, "problem");
646 spu
->priv1
= map_spe_prop(spu
, node
, "priv1");
647 /* priv1 is not available on a hypervisor */
649 spu
->priv2
= map_spe_prop(spu
, node
, "priv2");
661 static int __init
spu_map_interrupts(struct spu
*spu
, struct device_node
*np
)
667 for (i
=0; i
< 3; i
++) {
668 ret
= of_irq_map_one(np
, i
, &oirq
);
673 spu
->irqs
[i
] = irq_create_of_mapping(oirq
.controller
,
674 oirq
.specifier
, oirq
.size
);
675 if (spu
->irqs
[i
] == NO_IRQ
)
681 pr_debug("failed to map irq %x for spu %s\n", *oirq
.specifier
, spu
->name
);
682 for (; i
>= 0; i
--) {
683 if (spu
->irqs
[i
] != NO_IRQ
)
684 irq_dispose_mapping(spu
->irqs
[i
]);
689 static int spu_map_resource(struct device_node
*node
, int nr
,
690 void __iomem
** virt
, unsigned long *phys
)
692 struct resource resource
= { };
695 ret
= of_address_to_resource(node
, 0, &resource
);
700 *phys
= resource
.start
;
701 *virt
= ioremap(resource
.start
, resource
.end
- resource
.start
);
709 static int __init
spu_map_device(struct spu
*spu
, struct device_node
*node
)
712 spu
->name
= get_property(node
, "name", NULL
);
716 ret
= spu_map_resource(node
, 0, (void __iomem
**)&spu
->local_store
,
717 &spu
->local_store_phys
);
720 ret
= spu_map_resource(node
, 1, (void __iomem
**)&spu
->problem
,
724 ret
= spu_map_resource(node
, 2, (void __iomem
**)&spu
->priv2
,
729 if (!firmware_has_feature(FW_FEATURE_LPAR
))
730 ret
= spu_map_resource(node
, 3, (void __iomem
**)&spu
->priv1
,
739 pr_debug("failed to map spe %s: %d\n", spu
->name
, ret
);
743 struct sysdev_class spu_sysdev_class
= {
747 static int spu_create_sysdev(struct spu
*spu
)
751 spu
->sysdev
.id
= spu
->number
;
752 spu
->sysdev
.cls
= &spu_sysdev_class
;
753 ret
= sysdev_register(&spu
->sysdev
);
755 printk(KERN_ERR
"Can't register SPU %d with sysfs\n",
760 sysfs_add_device_to_node(&spu
->sysdev
, spu
->nid
);
765 static void spu_destroy_sysdev(struct spu
*spu
)
767 sysfs_remove_device_from_node(&spu
->sysdev
, spu
->nid
);
768 sysdev_unregister(&spu
->sysdev
);
771 static int __init
create_spu(struct device_node
*spe
)
778 spu
= kzalloc(sizeof (*spu
), GFP_KERNEL
);
782 spu
->node
= find_spu_node_id(spe
);
783 if (spu
->node
>= MAX_NUMNODES
) {
784 printk(KERN_WARNING
"SPE %s on node %d ignored,"
785 " node number too big\n", spe
->full_name
, spu
->node
);
786 printk(KERN_WARNING
"Check if CONFIG_NUMA is enabled.\n");
789 spu
->nid
= of_node_to_nid(spe
);
793 ret
= spu_map_device(spu
, spe
);
796 ret
= spu_map_device_old(spu
, spe
);
800 ret
= spu_map_interrupts(spu
, spe
);
802 ret
= spu_map_interrupts_old(spu
, spe
);
805 spin_lock_init(&spu
->register_lock
);
806 spu_mfc_sdr_setup(spu
);
807 spu_mfc_sr1_set(spu
, 0x33);
808 mutex_lock(&spu_mutex
);
810 spu
->number
= number
++;
811 ret
= spu_request_irqs(spu
);
815 ret
= spu_create_sysdev(spu
);
819 list_add(&spu
->list
, &spu_list
[spu
->node
]);
820 mutex_unlock(&spu_mutex
);
822 pr_debug(KERN_DEBUG
"Using SPE %s %p %p %p %p %d\n",
823 spu
->name
, spu
->local_store
,
824 spu
->problem
, spu
->priv1
, spu
->priv2
, spu
->number
);
830 mutex_unlock(&spu_mutex
);
839 static void destroy_spu(struct spu
*spu
)
841 list_del_init(&spu
->list
);
843 spu_destroy_sysdev(spu
);
849 static void cleanup_spu_base(void)
851 struct spu
*spu
, *tmp
;
854 mutex_lock(&spu_mutex
);
855 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
856 list_for_each_entry_safe(spu
, tmp
, &spu_list
[node
], list
)
859 mutex_unlock(&spu_mutex
);
860 sysdev_class_unregister(&spu_sysdev_class
);
862 module_exit(cleanup_spu_base
);
864 static int __init
init_spu_base(void)
866 struct device_node
*node
;
869 /* create sysdev class for spus */
870 ret
= sysdev_class_register(&spu_sysdev_class
);
874 for (i
= 0; i
< MAX_NUMNODES
; i
++)
875 INIT_LIST_HEAD(&spu_list
[i
]);
878 for (node
= of_find_node_by_type(NULL
, "spe");
879 node
; node
= of_find_node_by_type(node
, "spe")) {
880 ret
= create_spu(node
);
882 printk(KERN_WARNING
"%s: Error initializing %s\n",
883 __FUNCTION__
, node
->name
);
890 module_init(init_spu_base
);
892 MODULE_LICENSE("GPL");
893 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");