2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/ptrace.h>
29 #include <linux/slab.h>
30 #include <linux/wait.h>
33 #include <linux/mutex.h>
34 #include <linux/linux_logo.h>
36 #include <asm/spu_priv1.h>
40 const struct spu_management_ops
*spu_management_ops
;
41 EXPORT_SYMBOL_GPL(spu_management_ops
);
43 const struct spu_priv1_ops
*spu_priv1_ops
;
44 EXPORT_SYMBOL_GPL(spu_priv1_ops
);
46 struct cbe_spu_info cbe_spu_info
[MAX_NUMNODES
];
47 EXPORT_SYMBOL_GPL(cbe_spu_info
);
50 * Protects cbe_spu_info and spu->number.
52 static DEFINE_SPINLOCK(spu_lock
);
55 * List of all spus in the system.
57 * This list is iterated by callers from irq context and callers that
58 * want to sleep. Thus modifications need to be done with both
59 * spu_full_list_lock and spu_full_list_mutex held, while iterating
60 * through it requires either of these locks.
62 * In addition spu_full_list_lock protects all assignmens to
65 static LIST_HEAD(spu_full_list
);
66 static DEFINE_SPINLOCK(spu_full_list_lock
);
67 static DEFINE_MUTEX(spu_full_list_mutex
);
69 void spu_invalidate_slbs(struct spu
*spu
)
71 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
73 if (spu_mfc_sr1_get(spu
) & MFC_STATE1_RELOCATE_MASK
)
74 out_be64(&priv2
->slb_invalidate_all_W
, 0UL);
76 EXPORT_SYMBOL_GPL(spu_invalidate_slbs
);
78 /* This is called by the MM core when a segment size is changed, to
79 * request a flush of all the SPEs using a given mm
81 void spu_flush_all_slbs(struct mm_struct
*mm
)
86 spin_lock_irqsave(&spu_full_list_lock
, flags
);
87 list_for_each_entry(spu
, &spu_full_list
, full_list
) {
89 spu_invalidate_slbs(spu
);
91 spin_unlock_irqrestore(&spu_full_list_lock
, flags
);
94 /* The hack below stinks... try to do something better one of
95 * these days... Does it even work properly with NR_CPUS == 1 ?
97 static inline void mm_needs_global_tlbie(struct mm_struct
*mm
)
99 int nr
= (NR_CPUS
> 1) ? NR_CPUS
: NR_CPUS
+ 1;
101 /* Global TLBIE broadcast required with SPEs. */
102 __cpus_setall(&mm
->cpu_vm_mask
, nr
);
105 void spu_associate_mm(struct spu
*spu
, struct mm_struct
*mm
)
109 spin_lock_irqsave(&spu_full_list_lock
, flags
);
111 spin_unlock_irqrestore(&spu_full_list_lock
, flags
);
113 mm_needs_global_tlbie(mm
);
115 EXPORT_SYMBOL_GPL(spu_associate_mm
);
117 static int __spu_trap_invalid_dma(struct spu
*spu
)
119 pr_debug("%s\n", __FUNCTION__
);
120 spu
->dma_callback(spu
, SPE_EVENT_INVALID_DMA
);
124 static int __spu_trap_dma_align(struct spu
*spu
)
126 pr_debug("%s\n", __FUNCTION__
);
127 spu
->dma_callback(spu
, SPE_EVENT_DMA_ALIGNMENT
);
131 static int __spu_trap_error(struct spu
*spu
)
133 pr_debug("%s\n", __FUNCTION__
);
134 spu
->dma_callback(spu
, SPE_EVENT_SPE_ERROR
);
138 static void spu_restart_dma(struct spu
*spu
)
140 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
142 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING
, &spu
->flags
))
143 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_RESTART_DMA_COMMAND
);
146 static int __spu_trap_data_seg(struct spu
*spu
, unsigned long ea
)
148 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
149 struct mm_struct
*mm
= spu
->mm
;
153 pr_debug("%s\n", __FUNCTION__
);
155 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
)) {
156 /* SLBs are pre-loaded for context switch, so
157 * we should never get here!
159 printk("%s: invalid access during switch!\n", __func__
);
162 esid
= (ea
& ESID_MASK
) | SLB_ESID_V
;
164 switch(REGION_ID(ea
)) {
166 #ifdef CONFIG_PPC_MM_SLICES
167 psize
= get_slice_psize(mm
, ea
);
169 psize
= mm
->context
.user_psize
;
171 vsid
= (get_vsid(mm
->context
.id
, ea
, MMU_SEGSIZE_256M
) << SLB_VSID_SHIFT
) |
174 case VMALLOC_REGION_ID
:
175 if (ea
< VMALLOC_END
)
176 psize
= mmu_vmalloc_psize
;
178 psize
= mmu_io_psize
;
179 vsid
= (get_kernel_vsid(ea
, MMU_SEGSIZE_256M
) << SLB_VSID_SHIFT
) |
182 case KERNEL_REGION_ID
:
183 psize
= mmu_linear_psize
;
184 vsid
= (get_kernel_vsid(ea
, MMU_SEGSIZE_256M
) << SLB_VSID_SHIFT
) |
188 /* Future: support kernel segments so that drivers
191 pr_debug("invalid region access at %016lx\n", ea
);
194 llp
= mmu_psize_defs
[psize
].sllp
;
196 out_be64(&priv2
->slb_index_W
, spu
->slb_replace
);
197 out_be64(&priv2
->slb_vsid_RW
, vsid
| llp
);
198 out_be64(&priv2
->slb_esid_RW
, esid
);
201 if (spu
->slb_replace
>= 8)
202 spu
->slb_replace
= 0;
204 spu_restart_dma(spu
);
205 spu
->stats
.slb_flt
++;
209 extern int hash_page(unsigned long ea
, unsigned long access
, unsigned long trap
); //XXX
210 static int __spu_trap_data_map(struct spu
*spu
, unsigned long ea
, u64 dsisr
)
212 pr_debug("%s, %lx, %lx\n", __FUNCTION__
, dsisr
, ea
);
214 /* Handle kernel space hash faults immediately.
215 User hash faults need to be deferred to process context. */
216 if ((dsisr
& MFC_DSISR_PTE_NOT_FOUND
)
217 && REGION_ID(ea
) != USER_REGION_ID
218 && hash_page(ea
, _PAGE_PRESENT
, 0x300) == 0) {
219 spu_restart_dma(spu
);
223 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE
, &spu
->flags
)) {
224 printk("%s: invalid access during switch!\n", __func__
);
231 spu
->stop_callback(spu
);
236 spu_irq_class_0(int irq
, void *data
)
239 unsigned long stat
, mask
;
243 mask
= spu_int_mask_get(spu
, 0);
244 stat
= spu_int_stat_get(spu
, 0);
247 spin_lock(&spu
->register_lock
);
248 spu
->class_0_pending
|= stat
;
249 spin_unlock(&spu
->register_lock
);
251 spu
->stop_callback(spu
);
253 spu_int_stat_clear(spu
, 0, stat
);
259 spu_irq_class_0_bottom(struct spu
*spu
)
264 spin_lock_irqsave(&spu
->register_lock
, flags
);
265 stat
= spu
->class_0_pending
;
266 spu
->class_0_pending
= 0;
268 if (stat
& 1) /* invalid DMA alignment */
269 __spu_trap_dma_align(spu
);
271 if (stat
& 2) /* invalid MFC DMA */
272 __spu_trap_invalid_dma(spu
);
274 if (stat
& 4) /* error on SPU */
275 __spu_trap_error(spu
);
277 spin_unlock_irqrestore(&spu
->register_lock
, flags
);
279 return (stat
& 0x7) ? -EIO
: 0;
281 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom
);
284 spu_irq_class_1(int irq
, void *data
)
287 unsigned long stat
, mask
, dar
, dsisr
;
291 /* atomically read & clear class1 status. */
292 spin_lock(&spu
->register_lock
);
293 mask
= spu_int_mask_get(spu
, 1);
294 stat
= spu_int_stat_get(spu
, 1) & mask
;
295 dar
= spu_mfc_dar_get(spu
);
296 dsisr
= spu_mfc_dsisr_get(spu
);
297 if (stat
& 2) /* mapping fault */
298 spu_mfc_dsisr_set(spu
, 0ul);
299 spu_int_stat_clear(spu
, 1, stat
);
300 spin_unlock(&spu
->register_lock
);
301 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__
, mask
, stat
,
304 if (stat
& 1) /* segment fault */
305 __spu_trap_data_seg(spu
, dar
);
307 if (stat
& 2) { /* mapping fault */
308 __spu_trap_data_map(spu
, dar
, dsisr
);
311 if (stat
& 4) /* ls compare & suspend on get */
314 if (stat
& 8) /* ls compare & suspend on put */
317 return stat
? IRQ_HANDLED
: IRQ_NONE
;
321 spu_irq_class_2(int irq
, void *data
)
328 spin_lock(&spu
->register_lock
);
329 stat
= spu_int_stat_get(spu
, 2);
330 mask
= spu_int_mask_get(spu
, 2);
331 /* ignore interrupts we're not waiting for */
334 * mailbox interrupts (0x1 and 0x10) are level triggered.
335 * mask them now before acknowledging.
338 spu_int_mask_and(spu
, 2, ~(stat
& 0x11));
339 /* acknowledge all interrupts before the callbacks */
340 spu_int_stat_clear(spu
, 2, stat
);
341 spin_unlock(&spu
->register_lock
);
343 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq
, stat
, mask
);
345 if (stat
& 1) /* PPC core mailbox */
346 spu
->ibox_callback(spu
);
348 if (stat
& 2) /* SPU stop-and-signal */
349 spu
->stop_callback(spu
);
351 if (stat
& 4) /* SPU halted */
352 spu
->stop_callback(spu
);
354 if (stat
& 8) /* DMA tag group complete */
355 spu
->mfc_callback(spu
);
357 if (stat
& 0x10) /* SPU mailbox threshold */
358 spu
->wbox_callback(spu
);
360 spu
->stats
.class2_intr
++;
361 return stat
? IRQ_HANDLED
: IRQ_NONE
;
364 static int spu_request_irqs(struct spu
*spu
)
368 if (spu
->irqs
[0] != NO_IRQ
) {
369 snprintf(spu
->irq_c0
, sizeof (spu
->irq_c0
), "spe%02d.0",
371 ret
= request_irq(spu
->irqs
[0], spu_irq_class_0
,
377 if (spu
->irqs
[1] != NO_IRQ
) {
378 snprintf(spu
->irq_c1
, sizeof (spu
->irq_c1
), "spe%02d.1",
380 ret
= request_irq(spu
->irqs
[1], spu_irq_class_1
,
386 if (spu
->irqs
[2] != NO_IRQ
) {
387 snprintf(spu
->irq_c2
, sizeof (spu
->irq_c2
), "spe%02d.2",
389 ret
= request_irq(spu
->irqs
[2], spu_irq_class_2
,
398 if (spu
->irqs
[1] != NO_IRQ
)
399 free_irq(spu
->irqs
[1], spu
);
401 if (spu
->irqs
[0] != NO_IRQ
)
402 free_irq(spu
->irqs
[0], spu
);
407 static void spu_free_irqs(struct spu
*spu
)
409 if (spu
->irqs
[0] != NO_IRQ
)
410 free_irq(spu
->irqs
[0], spu
);
411 if (spu
->irqs
[1] != NO_IRQ
)
412 free_irq(spu
->irqs
[1], spu
);
413 if (spu
->irqs
[2] != NO_IRQ
)
414 free_irq(spu
->irqs
[2], spu
);
417 void spu_init_channels(struct spu
*spu
)
419 static const struct {
423 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
424 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
426 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
427 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
428 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
430 struct spu_priv2 __iomem
*priv2
;
435 /* initialize all channel data to zero */
436 for (i
= 0; i
< ARRAY_SIZE(zero_list
); i
++) {
439 out_be64(&priv2
->spu_chnlcntptr_RW
, zero_list
[i
].channel
);
440 for (count
= 0; count
< zero_list
[i
].count
; count
++)
441 out_be64(&priv2
->spu_chnldata_RW
, 0);
444 /* initialize channel counts to meaningful values */
445 for (i
= 0; i
< ARRAY_SIZE(count_list
); i
++) {
446 out_be64(&priv2
->spu_chnlcntptr_RW
, count_list
[i
].channel
);
447 out_be64(&priv2
->spu_chnlcnt_RW
, count_list
[i
].count
);
450 EXPORT_SYMBOL_GPL(spu_init_channels
);
452 static int spu_shutdown(struct sys_device
*sysdev
)
454 struct spu
*spu
= container_of(sysdev
, struct spu
, sysdev
);
457 spu_destroy_spu(spu
);
461 static struct sysdev_class spu_sysdev_class
= {
463 .shutdown
= spu_shutdown
,
466 int spu_add_sysdev_attr(struct sysdev_attribute
*attr
)
470 mutex_lock(&spu_full_list_mutex
);
471 list_for_each_entry(spu
, &spu_full_list
, full_list
)
472 sysdev_create_file(&spu
->sysdev
, attr
);
473 mutex_unlock(&spu_full_list_mutex
);
477 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr
);
479 int spu_add_sysdev_attr_group(struct attribute_group
*attrs
)
483 mutex_lock(&spu_full_list_mutex
);
484 list_for_each_entry(spu
, &spu_full_list
, full_list
)
485 sysfs_create_group(&spu
->sysdev
.kobj
, attrs
);
486 mutex_unlock(&spu_full_list_mutex
);
490 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group
);
493 void spu_remove_sysdev_attr(struct sysdev_attribute
*attr
)
497 mutex_lock(&spu_full_list_mutex
);
498 list_for_each_entry(spu
, &spu_full_list
, full_list
)
499 sysdev_remove_file(&spu
->sysdev
, attr
);
500 mutex_unlock(&spu_full_list_mutex
);
502 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr
);
504 void spu_remove_sysdev_attr_group(struct attribute_group
*attrs
)
508 mutex_lock(&spu_full_list_mutex
);
509 list_for_each_entry(spu
, &spu_full_list
, full_list
)
510 sysfs_remove_group(&spu
->sysdev
.kobj
, attrs
);
511 mutex_unlock(&spu_full_list_mutex
);
513 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group
);
515 static int spu_create_sysdev(struct spu
*spu
)
519 spu
->sysdev
.id
= spu
->number
;
520 spu
->sysdev
.cls
= &spu_sysdev_class
;
521 ret
= sysdev_register(&spu
->sysdev
);
523 printk(KERN_ERR
"Can't register SPU %d with sysfs\n",
528 sysfs_add_device_to_node(&spu
->sysdev
, spu
->node
);
533 static int __init
create_spu(void *data
)
542 spu
= kzalloc(sizeof (*spu
), GFP_KERNEL
);
546 spu
->alloc_state
= SPU_FREE
;
548 spin_lock_init(&spu
->register_lock
);
549 spin_lock(&spu_lock
);
550 spu
->number
= number
++;
551 spin_unlock(&spu_lock
);
553 ret
= spu_create_spu(spu
, data
);
558 spu_mfc_sdr_setup(spu
);
559 spu_mfc_sr1_set(spu
, 0x33);
560 ret
= spu_request_irqs(spu
);
564 ret
= spu_create_sysdev(spu
);
568 mutex_lock(&cbe_spu_info
[spu
->node
].list_mutex
);
569 list_add(&spu
->cbe_list
, &cbe_spu_info
[spu
->node
].spus
);
570 cbe_spu_info
[spu
->node
].n_spus
++;
571 mutex_unlock(&cbe_spu_info
[spu
->node
].list_mutex
);
573 mutex_lock(&spu_full_list_mutex
);
574 spin_lock_irqsave(&spu_full_list_lock
, flags
);
575 list_add(&spu
->full_list
, &spu_full_list
);
576 spin_unlock_irqrestore(&spu_full_list_lock
, flags
);
577 mutex_unlock(&spu_full_list_mutex
);
579 spu
->stats
.util_state
= SPU_UTIL_IDLE_LOADED
;
581 spu
->stats
.tstamp
= timespec_to_ns(&ts
);
583 INIT_LIST_HEAD(&spu
->aff_list
);
590 spu_destroy_spu(spu
);
597 static const char *spu_state_names
[] = {
598 "user", "system", "iowait", "idle"
601 static unsigned long long spu_acct_time(struct spu
*spu
,
602 enum spu_utilization_state state
)
605 unsigned long long time
= spu
->stats
.times
[state
];
608 * If the spu is idle or the context is stopped, utilization
609 * statistics are not updated. Apply the time delta from the
610 * last recorded state of the spu.
612 if (spu
->stats
.util_state
== state
) {
614 time
+= timespec_to_ns(&ts
) - spu
->stats
.tstamp
;
617 return time
/ NSEC_PER_MSEC
;
621 static ssize_t
spu_stat_show(struct sys_device
*sysdev
, char *buf
)
623 struct spu
*spu
= container_of(sysdev
, struct spu
, sysdev
);
625 return sprintf(buf
, "%s %llu %llu %llu %llu "
626 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
627 spu_state_names
[spu
->stats
.util_state
],
628 spu_acct_time(spu
, SPU_UTIL_USER
),
629 spu_acct_time(spu
, SPU_UTIL_SYSTEM
),
630 spu_acct_time(spu
, SPU_UTIL_IOWAIT
),
631 spu_acct_time(spu
, SPU_UTIL_IDLE_LOADED
),
632 spu
->stats
.vol_ctx_switch
,
633 spu
->stats
.invol_ctx_switch
,
638 spu
->stats
.class2_intr
,
639 spu
->stats
.libassist
);
642 static SYSDEV_ATTR(stat
, 0644, spu_stat_show
, NULL
);
644 static int __init
init_spu_base(void)
648 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
649 mutex_init(&cbe_spu_info
[i
].list_mutex
);
650 INIT_LIST_HEAD(&cbe_spu_info
[i
].spus
);
653 if (!spu_management_ops
)
656 /* create sysdev class for spus */
657 ret
= sysdev_class_register(&spu_sysdev_class
);
661 ret
= spu_enumerate_spus(create_spu
);
664 printk(KERN_WARNING
"%s: Error initializing spus\n",
666 goto out_unregister_sysdev_class
;
671 * We cannot put the forward declaration in
672 * <linux/linux_logo.h> because of conflicting session type
673 * conflicts for const and __initdata with different compiler
676 extern const struct linux_logo logo_spe_clut224
;
678 fb_append_extra_logo(&logo_spe_clut224
, ret
);
681 mutex_lock(&spu_full_list_mutex
);
682 xmon_register_spus(&spu_full_list
);
683 crash_register_spus(&spu_full_list
);
684 mutex_unlock(&spu_full_list_mutex
);
685 spu_add_sysdev_attr(&attr_stat
);
691 out_unregister_sysdev_class
:
692 sysdev_class_unregister(&spu_sysdev_class
);
696 module_init(init_spu_base
);
698 MODULE_LICENSE("GPL");
699 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");