2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/poll.h>
29 #include <linux/ptrace.h>
30 #include <linux/slab.h>
31 #include <linux/wait.h>
35 #include <linux/mutex.h>
37 #include <asm/mmu_context.h>
39 #include "interrupt.h"
41 static int __spu_trap_invalid_dma(struct spu
*spu
)
43 pr_debug("%s\n", __FUNCTION__
);
44 force_sig(SIGBUS
, /* info, */ current
);
48 static int __spu_trap_dma_align(struct spu
*spu
)
50 pr_debug("%s\n", __FUNCTION__
);
51 force_sig(SIGBUS
, /* info, */ current
);
55 static int __spu_trap_error(struct spu
*spu
)
57 pr_debug("%s\n", __FUNCTION__
);
58 force_sig(SIGILL
, /* info, */ current
);
62 static void spu_restart_dma(struct spu
*spu
)
64 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
66 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING
, &spu
->flags
))
67 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_RESTART_DMA_COMMAND
);
70 static int __spu_trap_data_seg(struct spu
*spu
, unsigned long ea
)
72 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
73 struct mm_struct
*mm
= spu
->mm
;
76 pr_debug("%s\n", __FUNCTION__
);
78 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
)) {
79 /* SLBs are pre-loaded for context switch, so
80 * we should never get here!
82 printk("%s: invalid access during switch!\n", __func__
);
85 if (!mm
|| (REGION_ID(ea
) != USER_REGION_ID
)) {
86 /* Future: support kernel segments so that drivers
89 pr_debug("invalid region access at %016lx\n", ea
);
93 esid
= (ea
& ESID_MASK
) | SLB_ESID_V
;
94 vsid
= (get_vsid(mm
->context
.id
, ea
) << SLB_VSID_SHIFT
) | SLB_VSID_USER
;
95 if (in_hugepage_area(mm
->context
, ea
))
98 out_be64(&priv2
->slb_index_W
, spu
->slb_replace
);
99 out_be64(&priv2
->slb_vsid_RW
, vsid
);
100 out_be64(&priv2
->slb_esid_RW
, esid
);
103 if (spu
->slb_replace
>= 8)
104 spu
->slb_replace
= 0;
106 spu_restart_dma(spu
);
111 extern int hash_page(unsigned long ea
, unsigned long access
, unsigned long trap
); //XXX
112 static int __spu_trap_data_map(struct spu
*spu
, unsigned long ea
, u64 dsisr
)
114 pr_debug("%s\n", __FUNCTION__
);
116 /* Handle kernel space hash faults immediately.
117 User hash faults need to be deferred to process context. */
118 if ((dsisr
& MFC_DSISR_PTE_NOT_FOUND
)
119 && REGION_ID(ea
) != USER_REGION_ID
120 && hash_page(ea
, _PAGE_PRESENT
, 0x300) == 0) {
121 spu_restart_dma(spu
);
125 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
)) {
126 printk("%s: invalid access during switch!\n", __func__
);
133 if (spu
->stop_callback
)
134 spu
->stop_callback(spu
);
138 static int __spu_trap_mailbox(struct spu
*spu
)
140 if (spu
->ibox_callback
)
141 spu
->ibox_callback(spu
);
143 /* atomically disable SPU mailbox interrupts */
144 spin_lock(&spu
->register_lock
);
145 spu_int_mask_and(spu
, 2, ~0x1);
146 spin_unlock(&spu
->register_lock
);
150 static int __spu_trap_stop(struct spu
*spu
)
152 pr_debug("%s\n", __FUNCTION__
);
153 spu
->stop_code
= in_be32(&spu
->problem
->spu_status_R
);
154 if (spu
->stop_callback
)
155 spu
->stop_callback(spu
);
159 static int __spu_trap_halt(struct spu
*spu
)
161 pr_debug("%s\n", __FUNCTION__
);
162 spu
->stop_code
= in_be32(&spu
->problem
->spu_status_R
);
163 if (spu
->stop_callback
)
164 spu
->stop_callback(spu
);
168 static int __spu_trap_tag_group(struct spu
*spu
)
170 pr_debug("%s\n", __FUNCTION__
);
171 /* wake_up(&spu->dma_wq); */
175 static int __spu_trap_spubox(struct spu
*spu
)
177 if (spu
->wbox_callback
)
178 spu
->wbox_callback(spu
);
180 /* atomically disable SPU mailbox interrupts */
181 spin_lock(&spu
->register_lock
);
182 spu_int_mask_and(spu
, 2, ~0x10);
183 spin_unlock(&spu
->register_lock
);
188 spu_irq_class_0(int irq
, void *data
, struct pt_regs
*regs
)
193 spu
->class_0_pending
= 1;
194 if (spu
->stop_callback
)
195 spu
->stop_callback(spu
);
201 spu_irq_class_0_bottom(struct spu
*spu
)
203 unsigned long stat
, mask
;
205 spu
->class_0_pending
= 0;
207 mask
= spu_int_mask_get(spu
, 0);
208 stat
= spu_int_stat_get(spu
, 0);
212 if (stat
& 1) /* invalid MFC DMA */
213 __spu_trap_invalid_dma(spu
);
215 if (stat
& 2) /* invalid DMA alignment */
216 __spu_trap_dma_align(spu
);
218 if (stat
& 4) /* error on SPU */
219 __spu_trap_error(spu
);
221 spu_int_stat_clear(spu
, 0, stat
);
223 return (stat
& 0x7) ? -EIO
: 0;
225 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom
);
228 spu_irq_class_1(int irq
, void *data
, struct pt_regs
*regs
)
231 unsigned long stat
, mask
, dar
, dsisr
;
235 /* atomically read & clear class1 status. */
236 spin_lock(&spu
->register_lock
);
237 mask
= spu_int_mask_get(spu
, 1);
238 stat
= spu_int_stat_get(spu
, 1) & mask
;
239 dar
= spu_mfc_dar_get(spu
);
240 dsisr
= spu_mfc_dsisr_get(spu
);
241 if (stat
& 2) /* mapping fault */
242 spu_mfc_dsisr_set(spu
, 0ul);
243 spu_int_stat_clear(spu
, 1, stat
);
244 spin_unlock(&spu
->register_lock
);
246 if (stat
& 1) /* segment fault */
247 __spu_trap_data_seg(spu
, dar
);
249 if (stat
& 2) { /* mapping fault */
250 __spu_trap_data_map(spu
, dar
, dsisr
);
253 if (stat
& 4) /* ls compare & suspend on get */
256 if (stat
& 8) /* ls compare & suspend on put */
259 return stat
? IRQ_HANDLED
: IRQ_NONE
;
261 EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom
);
264 spu_irq_class_2(int irq
, void *data
, struct pt_regs
*regs
)
271 stat
= spu_int_stat_get(spu
, 2);
272 mask
= spu_int_mask_get(spu
, 2);
274 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq
, stat
, mask
);
278 if (stat
& 1) /* PPC core mailbox */
279 __spu_trap_mailbox(spu
);
281 if (stat
& 2) /* SPU stop-and-signal */
282 __spu_trap_stop(spu
);
284 if (stat
& 4) /* SPU halted */
285 __spu_trap_halt(spu
);
287 if (stat
& 8) /* DMA tag group complete */
288 __spu_trap_tag_group(spu
);
290 if (stat
& 0x10) /* SPU mailbox threshold */
291 __spu_trap_spubox(spu
);
293 spu_int_stat_clear(spu
, 2, stat
);
294 return stat
? IRQ_HANDLED
: IRQ_NONE
;
298 spu_request_irqs(struct spu
*spu
)
303 irq_base
= IIC_NODE_STRIDE
* spu
->node
+ IIC_SPE_OFFSET
;
305 snprintf(spu
->irq_c0
, sizeof (spu
->irq_c0
), "spe%02d.0", spu
->number
);
306 ret
= request_irq(irq_base
+ spu
->isrc
,
307 spu_irq_class_0
, 0, spu
->irq_c0
, spu
);
311 snprintf(spu
->irq_c1
, sizeof (spu
->irq_c1
), "spe%02d.1", spu
->number
);
312 ret
= request_irq(irq_base
+ IIC_CLASS_STRIDE
+ spu
->isrc
,
313 spu_irq_class_1
, 0, spu
->irq_c1
, spu
);
317 snprintf(spu
->irq_c2
, sizeof (spu
->irq_c2
), "spe%02d.2", spu
->number
);
318 ret
= request_irq(irq_base
+ 2*IIC_CLASS_STRIDE
+ spu
->isrc
,
319 spu_irq_class_2
, 0, spu
->irq_c2
, spu
);
325 free_irq(irq_base
+ IIC_CLASS_STRIDE
+ spu
->isrc
, spu
);
327 free_irq(irq_base
+ spu
->isrc
, spu
);
333 spu_free_irqs(struct spu
*spu
)
337 irq_base
= IIC_NODE_STRIDE
* spu
->node
+ IIC_SPE_OFFSET
;
339 free_irq(irq_base
+ spu
->isrc
, spu
);
340 free_irq(irq_base
+ IIC_CLASS_STRIDE
+ spu
->isrc
, spu
);
341 free_irq(irq_base
+ 2*IIC_CLASS_STRIDE
+ spu
->isrc
, spu
);
344 static LIST_HEAD(spu_list
);
345 static DEFINE_MUTEX(spu_mutex
);
347 static void spu_init_channels(struct spu
*spu
)
349 static const struct {
353 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
354 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
356 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
357 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
358 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
360 struct spu_priv2 __iomem
*priv2
;
365 /* initialize all channel data to zero */
366 for (i
= 0; i
< ARRAY_SIZE(zero_list
); i
++) {
369 out_be64(&priv2
->spu_chnlcntptr_RW
, zero_list
[i
].channel
);
370 for (count
= 0; count
< zero_list
[i
].count
; count
++)
371 out_be64(&priv2
->spu_chnldata_RW
, 0);
374 /* initialize channel counts to meaningful values */
375 for (i
= 0; i
< ARRAY_SIZE(count_list
); i
++) {
376 out_be64(&priv2
->spu_chnlcntptr_RW
, count_list
[i
].channel
);
377 out_be64(&priv2
->spu_chnlcnt_RW
, count_list
[i
].count
);
381 struct spu
*spu_alloc(void)
385 mutex_lock(&spu_mutex
);
386 if (!list_empty(&spu_list
)) {
387 spu
= list_entry(spu_list
.next
, struct spu
, list
);
388 list_del_init(&spu
->list
);
389 pr_debug("Got SPU %x %d\n", spu
->isrc
, spu
->number
);
391 pr_debug("No SPU left\n");
394 mutex_unlock(&spu_mutex
);
397 spu_init_channels(spu
);
401 EXPORT_SYMBOL_GPL(spu_alloc
);
403 void spu_free(struct spu
*spu
)
405 mutex_lock(&spu_mutex
);
406 list_add_tail(&spu
->list
, &spu_list
);
407 mutex_unlock(&spu_mutex
);
409 EXPORT_SYMBOL_GPL(spu_free
);
411 static int spu_handle_mm_fault(struct spu
*spu
)
413 struct mm_struct
*mm
= spu
->mm
;
414 struct vm_area_struct
*vma
;
415 u64 ea
, dsisr
, is_write
;
421 if (!IS_VALID_EA(ea
)) {
428 if (mm
->pgd
== NULL
) {
432 down_read(&mm
->mmap_sem
);
433 vma
= find_vma(mm
, ea
);
436 if (vma
->vm_start
<= ea
)
438 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
441 if (expand_stack(vma
, ea
))
445 is_write
= dsisr
& MFC_DSISR_ACCESS_PUT
;
447 if (!(vma
->vm_flags
& VM_WRITE
))
450 if (dsisr
& MFC_DSISR_ACCESS_DENIED
)
452 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
456 switch (handle_mm_fault(mm
, vma
, ea
, is_write
)) {
463 case VM_FAULT_SIGBUS
:
472 up_read(&mm
->mmap_sem
);
476 up_read(&mm
->mmap_sem
);
480 int spu_irq_class_1_bottom(struct spu
*spu
)
482 u64 ea
, dsisr
, access
, error
= 0UL;
487 if (dsisr
& MFC_DSISR_PTE_NOT_FOUND
) {
488 access
= (_PAGE_PRESENT
| _PAGE_USER
);
489 access
|= (dsisr
& MFC_DSISR_ACCESS_PUT
) ? _PAGE_RW
: 0UL;
490 if (hash_page(ea
, access
, 0x300) != 0)
491 error
|= CLASS1_ENABLE_STORAGE_FAULT_INTR
;
493 if ((error
& CLASS1_ENABLE_STORAGE_FAULT_INTR
) ||
494 (dsisr
& MFC_DSISR_ACCESS_DENIED
)) {
495 if ((ret
= spu_handle_mm_fault(spu
)) != 0)
496 error
|= CLASS1_ENABLE_STORAGE_FAULT_INTR
;
498 error
&= ~CLASS1_ENABLE_STORAGE_FAULT_INTR
;
503 spu_restart_dma(spu
);
505 __spu_trap_invalid_dma(spu
);
510 void spu_irq_setaffinity(struct spu
*spu
, int cpu
)
512 u64 target
= iic_get_target_id(cpu
);
513 u64 route
= target
<< 48 | target
<< 32 | target
<< 16;
514 spu_int_route_set(spu
, route
);
516 EXPORT_SYMBOL_GPL(spu_irq_setaffinity
);
518 static void __iomem
* __init
map_spe_prop(struct device_node
*n
,
521 struct address_prop
{
522 unsigned long address
;
524 } __attribute__((packed
)) *prop
;
529 p
= get_property(n
, name
, &proplen
);
530 if (proplen
!= sizeof (struct address_prop
))
535 return ioremap(prop
->address
, prop
->len
);
538 static void spu_unmap(struct spu
*spu
)
542 iounmap(spu
->problem
);
543 iounmap((u8 __iomem
*)spu
->local_store
);
546 static int __init
spu_map_device(struct spu
*spu
, struct device_node
*spe
)
552 prop
= get_property(spe
, "isrc", NULL
);
555 spu
->isrc
= *(unsigned int *)prop
;
557 spu
->name
= get_property(spe
, "name", NULL
);
561 prop
= get_property(spe
, "local-store", NULL
);
564 spu
->local_store_phys
= *(unsigned long *)prop
;
566 /* we use local store as ram, not io memory */
567 spu
->local_store
= (void __force
*)map_spe_prop(spe
, "local-store");
568 if (!spu
->local_store
)
571 spu
->problem
= map_spe_prop(spe
, "problem");
575 spu
->priv1
= map_spe_prop(spe
, "priv1");
576 /* priv1 is not available on a hypervisor */
578 spu
->priv2
= map_spe_prop(spe
, "priv2");
590 static int __init
find_spu_node_id(struct device_node
*spe
)
593 struct device_node
*cpu
;
595 cpu
= spe
->parent
->parent
;
596 id
= (unsigned int *)get_property(cpu
, "node-id", NULL
);
601 static int __init
create_spu(struct device_node
*spe
)
608 spu
= kmalloc(sizeof (*spu
), GFP_KERNEL
);
612 ret
= spu_map_device(spu
, spe
);
616 spu
->node
= find_spu_node_id(spe
);
618 spu
->slb_replace
= 0;
623 spu
->class_0_pending
= 0;
627 spin_lock_init(&spu
->register_lock
);
629 spu_mfc_sdr_set(spu
, mfspr(SPRN_SDR1
));
630 spu_mfc_sr1_set(spu
, 0x33);
632 spu
->ibox_callback
= NULL
;
633 spu
->wbox_callback
= NULL
;
634 spu
->stop_callback
= NULL
;
636 mutex_lock(&spu_mutex
);
637 spu
->number
= number
++;
638 ret
= spu_request_irqs(spu
);
642 list_add(&spu
->list
, &spu_list
);
643 mutex_unlock(&spu_mutex
);
645 pr_debug(KERN_DEBUG
"Using SPE %s %02x %p %p %p %p %d\n",
646 spu
->name
, spu
->isrc
, spu
->local_store
,
647 spu
->problem
, spu
->priv1
, spu
->priv2
, spu
->number
);
651 mutex_unlock(&spu_mutex
);
659 static void destroy_spu(struct spu
*spu
)
661 list_del_init(&spu
->list
);
668 static void cleanup_spu_base(void)
670 struct spu
*spu
, *tmp
;
671 mutex_lock(&spu_mutex
);
672 list_for_each_entry_safe(spu
, tmp
, &spu_list
, list
)
674 mutex_unlock(&spu_mutex
);
676 module_exit(cleanup_spu_base
);
678 static int __init
init_spu_base(void)
680 struct device_node
*node
;
684 for (node
= of_find_node_by_type(NULL
, "spe");
685 node
; node
= of_find_node_by_type(node
, "spe")) {
686 ret
= create_spu(node
);
688 printk(KERN_WARNING
"%s: Error initializing %s\n",
689 __FUNCTION__
, node
->name
);
694 /* in some old firmware versions, the spe is called 'spc', so we
695 look for that as well */
696 for (node
= of_find_node_by_type(NULL
, "spc");
697 node
; node
= of_find_node_by_type(node
, "spc")) {
698 ret
= create_spu(node
);
700 printk(KERN_WARNING
"%s: Error initializing %s\n",
701 __FUNCTION__
, node
->name
);
708 module_init(init_spu_base
);
710 MODULE_LICENSE("GPL");
711 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");