2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/ptrace.h>
29 #include <linux/slab.h>
30 #include <linux/wait.h>
33 #include <linux/mutex.h>
35 #include <asm/spu_priv1.h>
38 const struct spu_management_ops
*spu_management_ops
;
39 EXPORT_SYMBOL_GPL(spu_management_ops
);
41 const struct spu_priv1_ops
*spu_priv1_ops
;
43 static struct list_head spu_list
[MAX_NUMNODES
];
44 static LIST_HEAD(spu_full_list
);
45 static DEFINE_MUTEX(spu_mutex
);
46 static DEFINE_SPINLOCK(spu_list_lock
);
48 EXPORT_SYMBOL_GPL(spu_priv1_ops
);
50 void spu_invalidate_slbs(struct spu
*spu
)
52 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
54 if (spu_mfc_sr1_get(spu
) & MFC_STATE1_RELOCATE_MASK
)
55 out_be64(&priv2
->slb_invalidate_all_W
, 0UL);
57 EXPORT_SYMBOL_GPL(spu_invalidate_slbs
);
59 /* This is called by the MM core when a segment size is changed, to
60 * request a flush of all the SPEs using a given mm
62 void spu_flush_all_slbs(struct mm_struct
*mm
)
67 spin_lock_irqsave(&spu_list_lock
, flags
);
68 list_for_each_entry(spu
, &spu_full_list
, full_list
) {
70 spu_invalidate_slbs(spu
);
72 spin_unlock_irqrestore(&spu_list_lock
, flags
);
75 /* The hack below stinks... try to do something better one of
76 * these days... Does it even work properly with NR_CPUS == 1 ?
78 static inline void mm_needs_global_tlbie(struct mm_struct
*mm
)
80 int nr
= (NR_CPUS
> 1) ? NR_CPUS
: NR_CPUS
+ 1;
82 /* Global TLBIE broadcast required with SPEs. */
83 __cpus_setall(&mm
->cpu_vm_mask
, nr
);
86 void spu_associate_mm(struct spu
*spu
, struct mm_struct
*mm
)
90 spin_lock_irqsave(&spu_list_lock
, flags
);
92 spin_unlock_irqrestore(&spu_list_lock
, flags
);
94 mm_needs_global_tlbie(mm
);
96 EXPORT_SYMBOL_GPL(spu_associate_mm
);
98 static int __spu_trap_invalid_dma(struct spu
*spu
)
100 pr_debug("%s\n", __FUNCTION__
);
101 spu
->dma_callback(spu
, SPE_EVENT_INVALID_DMA
);
105 static int __spu_trap_dma_align(struct spu
*spu
)
107 pr_debug("%s\n", __FUNCTION__
);
108 spu
->dma_callback(spu
, SPE_EVENT_DMA_ALIGNMENT
);
112 static int __spu_trap_error(struct spu
*spu
)
114 pr_debug("%s\n", __FUNCTION__
);
115 spu
->dma_callback(spu
, SPE_EVENT_SPE_ERROR
);
119 static void spu_restart_dma(struct spu
*spu
)
121 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
123 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING
, &spu
->flags
))
124 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_RESTART_DMA_COMMAND
);
127 static int __spu_trap_data_seg(struct spu
*spu
, unsigned long ea
)
129 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
130 struct mm_struct
*mm
= spu
->mm
;
134 pr_debug("%s\n", __FUNCTION__
);
136 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
)) {
137 /* SLBs are pre-loaded for context switch, so
138 * we should never get here!
140 printk("%s: invalid access during switch!\n", __func__
);
143 esid
= (ea
& ESID_MASK
) | SLB_ESID_V
;
145 switch(REGION_ID(ea
)) {
147 #ifdef CONFIG_HUGETLB_PAGE
148 if (in_hugepage_area(mm
->context
, ea
))
149 psize
= mmu_huge_psize
;
152 psize
= mm
->context
.user_psize
;
153 vsid
= (get_vsid(mm
->context
.id
, ea
) << SLB_VSID_SHIFT
) |
156 case VMALLOC_REGION_ID
:
157 if (ea
< VMALLOC_END
)
158 psize
= mmu_vmalloc_psize
;
160 psize
= mmu_io_psize
;
161 vsid
= (get_kernel_vsid(ea
) << SLB_VSID_SHIFT
) |
164 case KERNEL_REGION_ID
:
165 psize
= mmu_linear_psize
;
166 vsid
= (get_kernel_vsid(ea
) << SLB_VSID_SHIFT
) |
170 /* Future: support kernel segments so that drivers
173 pr_debug("invalid region access at %016lx\n", ea
);
176 llp
= mmu_psize_defs
[psize
].sllp
;
178 out_be64(&priv2
->slb_index_W
, spu
->slb_replace
);
179 out_be64(&priv2
->slb_vsid_RW
, vsid
| llp
);
180 out_be64(&priv2
->slb_esid_RW
, esid
);
183 if (spu
->slb_replace
>= 8)
184 spu
->slb_replace
= 0;
186 spu_restart_dma(spu
);
191 extern int hash_page(unsigned long ea
, unsigned long access
, unsigned long trap
); //XXX
192 static int __spu_trap_data_map(struct spu
*spu
, unsigned long ea
, u64 dsisr
)
194 pr_debug("%s, %lx, %lx\n", __FUNCTION__
, dsisr
, ea
);
196 /* Handle kernel space hash faults immediately.
197 User hash faults need to be deferred to process context. */
198 if ((dsisr
& MFC_DSISR_PTE_NOT_FOUND
)
199 && REGION_ID(ea
) != USER_REGION_ID
200 && hash_page(ea
, _PAGE_PRESENT
, 0x300) == 0) {
201 spu_restart_dma(spu
);
205 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
)) {
206 printk("%s: invalid access during switch!\n", __func__
);
213 spu
->stop_callback(spu
);
218 spu_irq_class_0(int irq
, void *data
)
223 spu
->class_0_pending
= 1;
224 spu
->stop_callback(spu
);
230 spu_irq_class_0_bottom(struct spu
*spu
)
232 unsigned long stat
, mask
;
235 spu
->class_0_pending
= 0;
237 spin_lock_irqsave(&spu
->register_lock
, flags
);
238 mask
= spu_int_mask_get(spu
, 0);
239 stat
= spu_int_stat_get(spu
, 0);
243 if (stat
& 1) /* invalid DMA alignment */
244 __spu_trap_dma_align(spu
);
246 if (stat
& 2) /* invalid MFC DMA */
247 __spu_trap_invalid_dma(spu
);
249 if (stat
& 4) /* error on SPU */
250 __spu_trap_error(spu
);
252 spu_int_stat_clear(spu
, 0, stat
);
253 spin_unlock_irqrestore(&spu
->register_lock
, flags
);
255 return (stat
& 0x7) ? -EIO
: 0;
257 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom
);
260 spu_irq_class_1(int irq
, void *data
)
263 unsigned long stat
, mask
, dar
, dsisr
;
267 /* atomically read & clear class1 status. */
268 spin_lock(&spu
->register_lock
);
269 mask
= spu_int_mask_get(spu
, 1);
270 stat
= spu_int_stat_get(spu
, 1) & mask
;
271 dar
= spu_mfc_dar_get(spu
);
272 dsisr
= spu_mfc_dsisr_get(spu
);
273 if (stat
& 2) /* mapping fault */
274 spu_mfc_dsisr_set(spu
, 0ul);
275 spu_int_stat_clear(spu
, 1, stat
);
276 spin_unlock(&spu
->register_lock
);
277 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__
, mask
, stat
,
280 if (stat
& 1) /* segment fault */
281 __spu_trap_data_seg(spu
, dar
);
283 if (stat
& 2) { /* mapping fault */
284 __spu_trap_data_map(spu
, dar
, dsisr
);
287 if (stat
& 4) /* ls compare & suspend on get */
290 if (stat
& 8) /* ls compare & suspend on put */
293 return stat
? IRQ_HANDLED
: IRQ_NONE
;
297 spu_irq_class_2(int irq
, void *data
)
304 spin_lock(&spu
->register_lock
);
305 stat
= spu_int_stat_get(spu
, 2);
306 mask
= spu_int_mask_get(spu
, 2);
307 /* ignore interrupts we're not waiting for */
310 * mailbox interrupts (0x1 and 0x10) are level triggered.
311 * mask them now before acknowledging.
314 spu_int_mask_and(spu
, 2, ~(stat
& 0x11));
315 /* acknowledge all interrupts before the callbacks */
316 spu_int_stat_clear(spu
, 2, stat
);
317 spin_unlock(&spu
->register_lock
);
319 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq
, stat
, mask
);
321 if (stat
& 1) /* PPC core mailbox */
322 spu
->ibox_callback(spu
);
324 if (stat
& 2) /* SPU stop-and-signal */
325 spu
->stop_callback(spu
);
327 if (stat
& 4) /* SPU halted */
328 spu
->stop_callback(spu
);
330 if (stat
& 8) /* DMA tag group complete */
331 spu
->mfc_callback(spu
);
333 if (stat
& 0x10) /* SPU mailbox threshold */
334 spu
->wbox_callback(spu
);
336 return stat
? IRQ_HANDLED
: IRQ_NONE
;
339 static int spu_request_irqs(struct spu
*spu
)
343 if (spu
->irqs
[0] != NO_IRQ
) {
344 snprintf(spu
->irq_c0
, sizeof (spu
->irq_c0
), "spe%02d.0",
346 ret
= request_irq(spu
->irqs
[0], spu_irq_class_0
,
352 if (spu
->irqs
[1] != NO_IRQ
) {
353 snprintf(spu
->irq_c1
, sizeof (spu
->irq_c1
), "spe%02d.1",
355 ret
= request_irq(spu
->irqs
[1], spu_irq_class_1
,
361 if (spu
->irqs
[2] != NO_IRQ
) {
362 snprintf(spu
->irq_c2
, sizeof (spu
->irq_c2
), "spe%02d.2",
364 ret
= request_irq(spu
->irqs
[2], spu_irq_class_2
,
373 if (spu
->irqs
[1] != NO_IRQ
)
374 free_irq(spu
->irqs
[1], spu
);
376 if (spu
->irqs
[0] != NO_IRQ
)
377 free_irq(spu
->irqs
[0], spu
);
382 static void spu_free_irqs(struct spu
*spu
)
384 if (spu
->irqs
[0] != NO_IRQ
)
385 free_irq(spu
->irqs
[0], spu
);
386 if (spu
->irqs
[1] != NO_IRQ
)
387 free_irq(spu
->irqs
[1], spu
);
388 if (spu
->irqs
[2] != NO_IRQ
)
389 free_irq(spu
->irqs
[2], spu
);
392 static void spu_init_channels(struct spu
*spu
)
394 static const struct {
398 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
399 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
401 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
402 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
403 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
405 struct spu_priv2 __iomem
*priv2
;
410 /* initialize all channel data to zero */
411 for (i
= 0; i
< ARRAY_SIZE(zero_list
); i
++) {
414 out_be64(&priv2
->spu_chnlcntptr_RW
, zero_list
[i
].channel
);
415 for (count
= 0; count
< zero_list
[i
].count
; count
++)
416 out_be64(&priv2
->spu_chnldata_RW
, 0);
419 /* initialize channel counts to meaningful values */
420 for (i
= 0; i
< ARRAY_SIZE(count_list
); i
++) {
421 out_be64(&priv2
->spu_chnlcntptr_RW
, count_list
[i
].channel
);
422 out_be64(&priv2
->spu_chnlcnt_RW
, count_list
[i
].count
);
426 struct spu
*spu_alloc_node(int node
)
428 struct spu
*spu
= NULL
;
430 mutex_lock(&spu_mutex
);
431 if (!list_empty(&spu_list
[node
])) {
432 spu
= list_entry(spu_list
[node
].next
, struct spu
, list
);
433 list_del_init(&spu
->list
);
434 pr_debug("Got SPU %d %d\n", spu
->number
, spu
->node
);
436 mutex_unlock(&spu_mutex
);
439 spu_init_channels(spu
);
442 EXPORT_SYMBOL_GPL(spu_alloc_node
);
444 struct spu
*spu_alloc(void)
446 struct spu
*spu
= NULL
;
449 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
450 spu
= spu_alloc_node(node
);
458 void spu_free(struct spu
*spu
)
460 mutex_lock(&spu_mutex
);
461 list_add_tail(&spu
->list
, &spu_list
[spu
->node
]);
462 mutex_unlock(&spu_mutex
);
464 EXPORT_SYMBOL_GPL(spu_free
);
466 struct sysdev_class spu_sysdev_class
= {
470 int spu_add_sysdev_attr(struct sysdev_attribute
*attr
)
473 mutex_lock(&spu_mutex
);
475 list_for_each_entry(spu
, &spu_full_list
, full_list
)
476 sysdev_create_file(&spu
->sysdev
, attr
);
478 mutex_unlock(&spu_mutex
);
481 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr
);
483 int spu_add_sysdev_attr_group(struct attribute_group
*attrs
)
486 mutex_lock(&spu_mutex
);
488 list_for_each_entry(spu
, &spu_full_list
, full_list
)
489 sysfs_create_group(&spu
->sysdev
.kobj
, attrs
);
491 mutex_unlock(&spu_mutex
);
494 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group
);
497 void spu_remove_sysdev_attr(struct sysdev_attribute
*attr
)
500 mutex_lock(&spu_mutex
);
502 list_for_each_entry(spu
, &spu_full_list
, full_list
)
503 sysdev_remove_file(&spu
->sysdev
, attr
);
505 mutex_unlock(&spu_mutex
);
507 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr
);
509 void spu_remove_sysdev_attr_group(struct attribute_group
*attrs
)
512 mutex_lock(&spu_mutex
);
514 list_for_each_entry(spu
, &spu_full_list
, full_list
)
515 sysfs_remove_group(&spu
->sysdev
.kobj
, attrs
);
517 mutex_unlock(&spu_mutex
);
519 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group
);
521 static int spu_create_sysdev(struct spu
*spu
)
525 spu
->sysdev
.id
= spu
->number
;
526 spu
->sysdev
.cls
= &spu_sysdev_class
;
527 ret
= sysdev_register(&spu
->sysdev
);
529 printk(KERN_ERR
"Can't register SPU %d with sysfs\n",
534 sysfs_add_device_to_node(&spu
->sysdev
, spu
->node
);
539 static int __init
create_spu(void *data
)
547 spu
= kzalloc(sizeof (*spu
), GFP_KERNEL
);
551 spin_lock_init(&spu
->register_lock
);
552 mutex_lock(&spu_mutex
);
553 spu
->number
= number
++;
554 mutex_unlock(&spu_mutex
);
556 ret
= spu_create_spu(spu
, data
);
561 spu_mfc_sdr_setup(spu
);
562 spu_mfc_sr1_set(spu
, 0x33);
563 ret
= spu_request_irqs(spu
);
567 ret
= spu_create_sysdev(spu
);
571 mutex_lock(&spu_mutex
);
572 spin_lock_irqsave(&spu_list_lock
, flags
);
573 list_add(&spu
->list
, &spu_list
[spu
->node
]);
574 list_add(&spu
->full_list
, &spu_full_list
);
575 spin_unlock_irqrestore(&spu_list_lock
, flags
);
576 mutex_unlock(&spu_mutex
);
583 spu_destroy_spu(spu
);
590 static int __init
init_spu_base(void)
594 for (i
= 0; i
< MAX_NUMNODES
; i
++)
595 INIT_LIST_HEAD(&spu_list
[i
]);
597 if (!spu_management_ops
)
600 /* create sysdev class for spus */
601 ret
= sysdev_class_register(&spu_sysdev_class
);
605 ret
= spu_enumerate_spus(create_spu
);
608 printk(KERN_WARNING
"%s: Error initializing spus\n",
610 goto out_unregister_sysdev_class
;
613 xmon_register_spus(&spu_full_list
);
617 out_unregister_sysdev_class
:
618 sysdev_class_unregister(&spu_sysdev_class
);
623 module_init(init_spu_base
);
625 MODULE_LICENSE("GPL");
626 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");