2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
32 #include <linux/marker.h>
35 #include <asm/semaphore.h>
37 #include <asm/spu_info.h>
38 #include <asm/uaccess.h>
42 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
44 /* Simple attribute files */
46 int (*get
)(void *, u64
*);
47 int (*set
)(void *, u64
);
48 char get_buf
[24]; /* enough to store a u64 and "\n\0" */
51 const char *fmt
; /* format for read operation */
52 struct mutex mutex
; /* protects access to these buffers */
55 static int spufs_attr_open(struct inode
*inode
, struct file
*file
,
56 int (*get
)(void *, u64
*), int (*set
)(void *, u64
),
59 struct spufs_attr
*attr
;
61 attr
= kmalloc(sizeof(*attr
), GFP_KERNEL
);
67 attr
->data
= inode
->i_private
;
69 mutex_init(&attr
->mutex
);
70 file
->private_data
= attr
;
72 return nonseekable_open(inode
, file
);
75 static int spufs_attr_release(struct inode
*inode
, struct file
*file
)
77 kfree(file
->private_data
);
81 static ssize_t
spufs_attr_read(struct file
*file
, char __user
*buf
,
82 size_t len
, loff_t
*ppos
)
84 struct spufs_attr
*attr
;
88 attr
= file
->private_data
;
92 ret
= mutex_lock_interruptible(&attr
->mutex
);
96 if (*ppos
) { /* continued read */
97 size
= strlen(attr
->get_buf
);
98 } else { /* first read */
100 ret
= attr
->get(attr
->data
, &val
);
104 size
= scnprintf(attr
->get_buf
, sizeof(attr
->get_buf
),
105 attr
->fmt
, (unsigned long long)val
);
108 ret
= simple_read_from_buffer(buf
, len
, ppos
, attr
->get_buf
, size
);
110 mutex_unlock(&attr
->mutex
);
114 static ssize_t
spufs_attr_write(struct file
*file
, const char __user
*buf
,
115 size_t len
, loff_t
*ppos
)
117 struct spufs_attr
*attr
;
122 attr
= file
->private_data
;
126 ret
= mutex_lock_interruptible(&attr
->mutex
);
131 size
= min(sizeof(attr
->set_buf
) - 1, len
);
132 if (copy_from_user(attr
->set_buf
, buf
, size
))
135 ret
= len
; /* claim we got the whole input */
136 attr
->set_buf
[size
] = '\0';
137 val
= simple_strtol(attr
->set_buf
, NULL
, 0);
138 attr
->set(attr
->data
, val
);
140 mutex_unlock(&attr
->mutex
);
144 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
145 static int __fops ## _open(struct inode *inode, struct file *file) \
147 __simple_attr_check_format(__fmt, 0ull); \
148 return spufs_attr_open(inode, file, __get, __set, __fmt); \
150 static struct file_operations __fops = { \
151 .owner = THIS_MODULE, \
152 .open = __fops ## _open, \
153 .release = spufs_attr_release, \
154 .read = spufs_attr_read, \
155 .write = spufs_attr_write, \
160 spufs_mem_open(struct inode
*inode
, struct file
*file
)
162 struct spufs_inode_info
*i
= SPUFS_I(inode
);
163 struct spu_context
*ctx
= i
->i_ctx
;
165 mutex_lock(&ctx
->mapping_lock
);
166 file
->private_data
= ctx
;
168 ctx
->local_store
= inode
->i_mapping
;
169 mutex_unlock(&ctx
->mapping_lock
);
174 spufs_mem_release(struct inode
*inode
, struct file
*file
)
176 struct spufs_inode_info
*i
= SPUFS_I(inode
);
177 struct spu_context
*ctx
= i
->i_ctx
;
179 mutex_lock(&ctx
->mapping_lock
);
181 ctx
->local_store
= NULL
;
182 mutex_unlock(&ctx
->mapping_lock
);
187 __spufs_mem_read(struct spu_context
*ctx
, char __user
*buffer
,
188 size_t size
, loff_t
*pos
)
190 char *local_store
= ctx
->ops
->get_ls(ctx
);
191 return simple_read_from_buffer(buffer
, size
, pos
, local_store
,
196 spufs_mem_read(struct file
*file
, char __user
*buffer
,
197 size_t size
, loff_t
*pos
)
199 struct spu_context
*ctx
= file
->private_data
;
202 ret
= spu_acquire(ctx
);
205 ret
= __spufs_mem_read(ctx
, buffer
, size
, pos
);
212 spufs_mem_write(struct file
*file
, const char __user
*buffer
,
213 size_t size
, loff_t
*ppos
)
215 struct spu_context
*ctx
= file
->private_data
;
224 if (size
> LS_SIZE
- pos
)
225 size
= LS_SIZE
- pos
;
227 ret
= spu_acquire(ctx
);
231 local_store
= ctx
->ops
->get_ls(ctx
);
232 ret
= copy_from_user(local_store
+ pos
, buffer
, size
);
241 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct
*vma
,
242 unsigned long address
)
244 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
245 unsigned long pfn
, offset
, addr0
= address
;
246 #ifdef CONFIG_SPU_FS_64K_LS
247 struct spu_state
*csa
= &ctx
->csa
;
250 /* Check what page size we are using */
251 psize
= get_slice_psize(vma
->vm_mm
, address
);
253 /* Some sanity checking */
254 BUG_ON(csa
->use_big_pages
!= (psize
== MMU_PAGE_64K
));
256 /* Wow, 64K, cool, we need to align the address though */
257 if (csa
->use_big_pages
) {
258 BUG_ON(vma
->vm_start
& 0xffff);
259 address
&= ~0xfffful
;
261 #endif /* CONFIG_SPU_FS_64K_LS */
263 offset
= (address
- vma
->vm_start
) + (vma
->vm_pgoff
<< PAGE_SHIFT
);
264 if (offset
>= LS_SIZE
)
267 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
268 addr0
, address
, offset
);
270 if (spu_acquire(ctx
))
271 return NOPFN_REFAULT
;
273 if (ctx
->state
== SPU_STATE_SAVED
) {
274 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
276 pfn
= vmalloc_to_pfn(ctx
->csa
.lscsa
->ls
+ offset
);
278 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
280 pfn
= (ctx
->spu
->local_store_phys
+ offset
) >> PAGE_SHIFT
;
282 vm_insert_pfn(vma
, address
, pfn
);
286 return NOPFN_REFAULT
;
290 static struct vm_operations_struct spufs_mem_mmap_vmops
= {
291 .nopfn
= spufs_mem_mmap_nopfn
,
294 static int spufs_mem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
296 #ifdef CONFIG_SPU_FS_64K_LS
297 struct spu_context
*ctx
= file
->private_data
;
298 struct spu_state
*csa
= &ctx
->csa
;
300 /* Sanity check VMA alignment */
301 if (csa
->use_big_pages
) {
302 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
303 " pgoff=0x%lx\n", vma
->vm_start
, vma
->vm_end
,
305 if (vma
->vm_start
& 0xffff)
307 if (vma
->vm_pgoff
& 0xf)
310 #endif /* CONFIG_SPU_FS_64K_LS */
312 if (!(vma
->vm_flags
& VM_SHARED
))
315 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
316 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
319 vma
->vm_ops
= &spufs_mem_mmap_vmops
;
323 #ifdef CONFIG_SPU_FS_64K_LS
324 static unsigned long spufs_get_unmapped_area(struct file
*file
,
325 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
328 struct spu_context
*ctx
= file
->private_data
;
329 struct spu_state
*csa
= &ctx
->csa
;
331 /* If not using big pages, fallback to normal MM g_u_a */
332 if (!csa
->use_big_pages
)
333 return current
->mm
->get_unmapped_area(file
, addr
, len
,
336 /* Else, try to obtain a 64K pages slice */
337 return slice_get_unmapped_area(addr
, len
, flags
,
340 #endif /* CONFIG_SPU_FS_64K_LS */
342 static const struct file_operations spufs_mem_fops
= {
343 .open
= spufs_mem_open
,
344 .release
= spufs_mem_release
,
345 .read
= spufs_mem_read
,
346 .write
= spufs_mem_write
,
347 .llseek
= generic_file_llseek
,
348 .mmap
= spufs_mem_mmap
,
349 #ifdef CONFIG_SPU_FS_64K_LS
350 .get_unmapped_area
= spufs_get_unmapped_area
,
354 static unsigned long spufs_ps_nopfn(struct vm_area_struct
*vma
,
355 unsigned long address
,
356 unsigned long ps_offs
,
357 unsigned long ps_size
)
359 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
360 unsigned long area
, offset
= address
- vma
->vm_start
;
362 spu_context_nospu_trace(spufs_ps_nopfn__enter
, ctx
);
364 offset
+= vma
->vm_pgoff
<< PAGE_SHIFT
;
365 if (offset
>= ps_size
)
369 * We have to wait for context to be loaded before we have
370 * pages to hand out to the user, but we don't want to wait
371 * with the mmap_sem held.
372 * It is possible to drop the mmap_sem here, but then we need
373 * to return NOPFN_REFAULT because the mappings may have
376 if (spu_acquire(ctx
))
377 return NOPFN_REFAULT
;
379 if (ctx
->state
== SPU_STATE_SAVED
) {
380 up_read(¤t
->mm
->mmap_sem
);
381 spu_context_nospu_trace(spufs_ps_nopfn__sleep
, ctx
);
382 spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
383 spu_context_trace(spufs_ps_nopfn__wake
, ctx
, ctx
->spu
);
384 down_read(¤t
->mm
->mmap_sem
);
386 area
= ctx
->spu
->problem_phys
+ ps_offs
;
387 vm_insert_pfn(vma
, address
, (area
+ offset
) >> PAGE_SHIFT
);
388 spu_context_trace(spufs_ps_nopfn__insert
, ctx
, ctx
->spu
);
392 return NOPFN_REFAULT
;
396 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct
*vma
,
397 unsigned long address
)
399 return spufs_ps_nopfn(vma
, address
, 0x4000, 0x1000);
402 static struct vm_operations_struct spufs_cntl_mmap_vmops
= {
403 .nopfn
= spufs_cntl_mmap_nopfn
,
407 * mmap support for problem state control area [0x4000 - 0x4fff].
409 static int spufs_cntl_mmap(struct file
*file
, struct vm_area_struct
*vma
)
411 if (!(vma
->vm_flags
& VM_SHARED
))
414 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
415 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
416 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
418 vma
->vm_ops
= &spufs_cntl_mmap_vmops
;
421 #else /* SPUFS_MMAP_4K */
422 #define spufs_cntl_mmap NULL
423 #endif /* !SPUFS_MMAP_4K */
425 static int spufs_cntl_get(void *data
, u64
*val
)
427 struct spu_context
*ctx
= data
;
430 ret
= spu_acquire(ctx
);
433 *val
= ctx
->ops
->status_read(ctx
);
439 static int spufs_cntl_set(void *data
, u64 val
)
441 struct spu_context
*ctx
= data
;
444 ret
= spu_acquire(ctx
);
447 ctx
->ops
->runcntl_write(ctx
, val
);
453 static int spufs_cntl_open(struct inode
*inode
, struct file
*file
)
455 struct spufs_inode_info
*i
= SPUFS_I(inode
);
456 struct spu_context
*ctx
= i
->i_ctx
;
458 mutex_lock(&ctx
->mapping_lock
);
459 file
->private_data
= ctx
;
461 ctx
->cntl
= inode
->i_mapping
;
462 mutex_unlock(&ctx
->mapping_lock
);
463 return spufs_attr_open(inode
, file
, spufs_cntl_get
,
464 spufs_cntl_set
, "0x%08lx");
468 spufs_cntl_release(struct inode
*inode
, struct file
*file
)
470 struct spufs_inode_info
*i
= SPUFS_I(inode
);
471 struct spu_context
*ctx
= i
->i_ctx
;
473 spufs_attr_release(inode
, file
);
475 mutex_lock(&ctx
->mapping_lock
);
478 mutex_unlock(&ctx
->mapping_lock
);
482 static const struct file_operations spufs_cntl_fops
= {
483 .open
= spufs_cntl_open
,
484 .release
= spufs_cntl_release
,
485 .read
= spufs_attr_read
,
486 .write
= spufs_attr_write
,
487 .mmap
= spufs_cntl_mmap
,
491 spufs_regs_open(struct inode
*inode
, struct file
*file
)
493 struct spufs_inode_info
*i
= SPUFS_I(inode
);
494 file
->private_data
= i
->i_ctx
;
499 __spufs_regs_read(struct spu_context
*ctx
, char __user
*buffer
,
500 size_t size
, loff_t
*pos
)
502 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
503 return simple_read_from_buffer(buffer
, size
, pos
,
504 lscsa
->gprs
, sizeof lscsa
->gprs
);
508 spufs_regs_read(struct file
*file
, char __user
*buffer
,
509 size_t size
, loff_t
*pos
)
512 struct spu_context
*ctx
= file
->private_data
;
514 ret
= spu_acquire_saved(ctx
);
517 ret
= __spufs_regs_read(ctx
, buffer
, size
, pos
);
518 spu_release_saved(ctx
);
523 spufs_regs_write(struct file
*file
, const char __user
*buffer
,
524 size_t size
, loff_t
*pos
)
526 struct spu_context
*ctx
= file
->private_data
;
527 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
530 size
= min_t(ssize_t
, sizeof lscsa
->gprs
- *pos
, size
);
535 ret
= spu_acquire_saved(ctx
);
539 ret
= copy_from_user(lscsa
->gprs
+ *pos
- size
,
540 buffer
, size
) ? -EFAULT
: size
;
542 spu_release_saved(ctx
);
546 static const struct file_operations spufs_regs_fops
= {
547 .open
= spufs_regs_open
,
548 .read
= spufs_regs_read
,
549 .write
= spufs_regs_write
,
550 .llseek
= generic_file_llseek
,
554 __spufs_fpcr_read(struct spu_context
*ctx
, char __user
* buffer
,
555 size_t size
, loff_t
* pos
)
557 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
558 return simple_read_from_buffer(buffer
, size
, pos
,
559 &lscsa
->fpcr
, sizeof(lscsa
->fpcr
));
563 spufs_fpcr_read(struct file
*file
, char __user
* buffer
,
564 size_t size
, loff_t
* pos
)
567 struct spu_context
*ctx
= file
->private_data
;
569 ret
= spu_acquire_saved(ctx
);
572 ret
= __spufs_fpcr_read(ctx
, buffer
, size
, pos
);
573 spu_release_saved(ctx
);
578 spufs_fpcr_write(struct file
*file
, const char __user
* buffer
,
579 size_t size
, loff_t
* pos
)
581 struct spu_context
*ctx
= file
->private_data
;
582 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
585 size
= min_t(ssize_t
, sizeof(lscsa
->fpcr
) - *pos
, size
);
589 ret
= spu_acquire_saved(ctx
);
594 ret
= copy_from_user((char *)&lscsa
->fpcr
+ *pos
- size
,
595 buffer
, size
) ? -EFAULT
: size
;
597 spu_release_saved(ctx
);
601 static const struct file_operations spufs_fpcr_fops
= {
602 .open
= spufs_regs_open
,
603 .read
= spufs_fpcr_read
,
604 .write
= spufs_fpcr_write
,
605 .llseek
= generic_file_llseek
,
608 /* generic open function for all pipe-like files */
609 static int spufs_pipe_open(struct inode
*inode
, struct file
*file
)
611 struct spufs_inode_info
*i
= SPUFS_I(inode
);
612 file
->private_data
= i
->i_ctx
;
614 return nonseekable_open(inode
, file
);
618 * Read as many bytes from the mailbox as possible, until
619 * one of the conditions becomes true:
621 * - no more data available in the mailbox
622 * - end of the user provided buffer
623 * - end of the mapped area
625 static ssize_t
spufs_mbox_read(struct file
*file
, char __user
*buf
,
626 size_t len
, loff_t
*pos
)
628 struct spu_context
*ctx
= file
->private_data
;
629 u32 mbox_data
, __user
*udata
;
635 if (!access_ok(VERIFY_WRITE
, buf
, len
))
638 udata
= (void __user
*)buf
;
640 count
= spu_acquire(ctx
);
644 for (count
= 0; (count
+ 4) <= len
; count
+= 4, udata
++) {
646 ret
= ctx
->ops
->mbox_read(ctx
, &mbox_data
);
651 * at the end of the mapped area, we can fault
652 * but still need to return the data we have
653 * read successfully so far.
655 ret
= __put_user(mbox_data
, udata
);
670 static const struct file_operations spufs_mbox_fops
= {
671 .open
= spufs_pipe_open
,
672 .read
= spufs_mbox_read
,
675 static ssize_t
spufs_mbox_stat_read(struct file
*file
, char __user
*buf
,
676 size_t len
, loff_t
*pos
)
678 struct spu_context
*ctx
= file
->private_data
;
685 ret
= spu_acquire(ctx
);
689 mbox_stat
= ctx
->ops
->mbox_stat_read(ctx
) & 0xff;
693 if (copy_to_user(buf
, &mbox_stat
, sizeof mbox_stat
))
699 static const struct file_operations spufs_mbox_stat_fops
= {
700 .open
= spufs_pipe_open
,
701 .read
= spufs_mbox_stat_read
,
704 /* low-level ibox access function */
705 size_t spu_ibox_read(struct spu_context
*ctx
, u32
*data
)
707 return ctx
->ops
->ibox_read(ctx
, data
);
710 static int spufs_ibox_fasync(int fd
, struct file
*file
, int on
)
712 struct spu_context
*ctx
= file
->private_data
;
714 return fasync_helper(fd
, file
, on
, &ctx
->ibox_fasync
);
717 /* interrupt-level ibox callback function. */
718 void spufs_ibox_callback(struct spu
*spu
)
720 struct spu_context
*ctx
= spu
->ctx
;
725 wake_up_all(&ctx
->ibox_wq
);
726 kill_fasync(&ctx
->ibox_fasync
, SIGIO
, POLLIN
);
730 * Read as many bytes from the interrupt mailbox as possible, until
731 * one of the conditions becomes true:
733 * - no more data available in the mailbox
734 * - end of the user provided buffer
735 * - end of the mapped area
737 * If the file is opened without O_NONBLOCK, we wait here until
738 * any data is available, but return when we have been able to
741 static ssize_t
spufs_ibox_read(struct file
*file
, char __user
*buf
,
742 size_t len
, loff_t
*pos
)
744 struct spu_context
*ctx
= file
->private_data
;
745 u32 ibox_data
, __user
*udata
;
751 if (!access_ok(VERIFY_WRITE
, buf
, len
))
754 udata
= (void __user
*)buf
;
756 count
= spu_acquire(ctx
);
760 /* wait only for the first element */
762 if (file
->f_flags
& O_NONBLOCK
) {
763 if (!spu_ibox_read(ctx
, &ibox_data
))
766 count
= spufs_wait(ctx
->ibox_wq
, spu_ibox_read(ctx
, &ibox_data
));
771 /* if we can't write at all, return -EFAULT */
772 count
= __put_user(ibox_data
, udata
);
776 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
778 ret
= ctx
->ops
->ibox_read(ctx
, &ibox_data
);
782 * at the end of the mapped area, we can fault
783 * but still need to return the data we have
784 * read successfully so far.
786 ret
= __put_user(ibox_data
, udata
);
797 static unsigned int spufs_ibox_poll(struct file
*file
, poll_table
*wait
)
799 struct spu_context
*ctx
= file
->private_data
;
802 poll_wait(file
, &ctx
->ibox_wq
, wait
);
805 * For now keep this uninterruptible and also ignore the rule
806 * that poll should not sleep. Will be fixed later.
808 mutex_lock(&ctx
->state_mutex
);
809 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLIN
| POLLRDNORM
);
815 static const struct file_operations spufs_ibox_fops
= {
816 .open
= spufs_pipe_open
,
817 .read
= spufs_ibox_read
,
818 .poll
= spufs_ibox_poll
,
819 .fasync
= spufs_ibox_fasync
,
822 static ssize_t
spufs_ibox_stat_read(struct file
*file
, char __user
*buf
,
823 size_t len
, loff_t
*pos
)
825 struct spu_context
*ctx
= file
->private_data
;
832 ret
= spu_acquire(ctx
);
835 ibox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 16) & 0xff;
838 if (copy_to_user(buf
, &ibox_stat
, sizeof ibox_stat
))
844 static const struct file_operations spufs_ibox_stat_fops
= {
845 .open
= spufs_pipe_open
,
846 .read
= spufs_ibox_stat_read
,
849 /* low-level mailbox write */
850 size_t spu_wbox_write(struct spu_context
*ctx
, u32 data
)
852 return ctx
->ops
->wbox_write(ctx
, data
);
855 static int spufs_wbox_fasync(int fd
, struct file
*file
, int on
)
857 struct spu_context
*ctx
= file
->private_data
;
860 ret
= fasync_helper(fd
, file
, on
, &ctx
->wbox_fasync
);
865 /* interrupt-level wbox callback function. */
866 void spufs_wbox_callback(struct spu
*spu
)
868 struct spu_context
*ctx
= spu
->ctx
;
873 wake_up_all(&ctx
->wbox_wq
);
874 kill_fasync(&ctx
->wbox_fasync
, SIGIO
, POLLOUT
);
878 * Write as many bytes to the interrupt mailbox as possible, until
879 * one of the conditions becomes true:
881 * - the mailbox is full
882 * - end of the user provided buffer
883 * - end of the mapped area
885 * If the file is opened without O_NONBLOCK, we wait here until
886 * space is availabyl, but return when we have been able to
889 static ssize_t
spufs_wbox_write(struct file
*file
, const char __user
*buf
,
890 size_t len
, loff_t
*pos
)
892 struct spu_context
*ctx
= file
->private_data
;
893 u32 wbox_data
, __user
*udata
;
899 udata
= (void __user
*)buf
;
900 if (!access_ok(VERIFY_READ
, buf
, len
))
903 if (__get_user(wbox_data
, udata
))
906 count
= spu_acquire(ctx
);
911 * make sure we can at least write one element, by waiting
912 * in case of !O_NONBLOCK
915 if (file
->f_flags
& O_NONBLOCK
) {
916 if (!spu_wbox_write(ctx
, wbox_data
))
919 count
= spufs_wait(ctx
->wbox_wq
, spu_wbox_write(ctx
, wbox_data
));
925 /* write as much as possible */
926 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
928 ret
= __get_user(wbox_data
, udata
);
932 ret
= spu_wbox_write(ctx
, wbox_data
);
942 static unsigned int spufs_wbox_poll(struct file
*file
, poll_table
*wait
)
944 struct spu_context
*ctx
= file
->private_data
;
947 poll_wait(file
, &ctx
->wbox_wq
, wait
);
950 * For now keep this uninterruptible and also ignore the rule
951 * that poll should not sleep. Will be fixed later.
953 mutex_lock(&ctx
->state_mutex
);
954 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLOUT
| POLLWRNORM
);
960 static const struct file_operations spufs_wbox_fops
= {
961 .open
= spufs_pipe_open
,
962 .write
= spufs_wbox_write
,
963 .poll
= spufs_wbox_poll
,
964 .fasync
= spufs_wbox_fasync
,
967 static ssize_t
spufs_wbox_stat_read(struct file
*file
, char __user
*buf
,
968 size_t len
, loff_t
*pos
)
970 struct spu_context
*ctx
= file
->private_data
;
977 ret
= spu_acquire(ctx
);
980 wbox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 8) & 0xff;
983 if (copy_to_user(buf
, &wbox_stat
, sizeof wbox_stat
))
989 static const struct file_operations spufs_wbox_stat_fops
= {
990 .open
= spufs_pipe_open
,
991 .read
= spufs_wbox_stat_read
,
994 static int spufs_signal1_open(struct inode
*inode
, struct file
*file
)
996 struct spufs_inode_info
*i
= SPUFS_I(inode
);
997 struct spu_context
*ctx
= i
->i_ctx
;
999 mutex_lock(&ctx
->mapping_lock
);
1000 file
->private_data
= ctx
;
1001 if (!i
->i_openers
++)
1002 ctx
->signal1
= inode
->i_mapping
;
1003 mutex_unlock(&ctx
->mapping_lock
);
1004 return nonseekable_open(inode
, file
);
1008 spufs_signal1_release(struct inode
*inode
, struct file
*file
)
1010 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1011 struct spu_context
*ctx
= i
->i_ctx
;
1013 mutex_lock(&ctx
->mapping_lock
);
1014 if (!--i
->i_openers
)
1015 ctx
->signal1
= NULL
;
1016 mutex_unlock(&ctx
->mapping_lock
);
1020 static ssize_t
__spufs_signal1_read(struct spu_context
*ctx
, char __user
*buf
,
1021 size_t len
, loff_t
*pos
)
1029 if (ctx
->csa
.spu_chnlcnt_RW
[3]) {
1030 data
= ctx
->csa
.spu_chnldata_RW
[3];
1037 if (copy_to_user(buf
, &data
, 4))
1044 static ssize_t
spufs_signal1_read(struct file
*file
, char __user
*buf
,
1045 size_t len
, loff_t
*pos
)
1048 struct spu_context
*ctx
= file
->private_data
;
1050 ret
= spu_acquire_saved(ctx
);
1053 ret
= __spufs_signal1_read(ctx
, buf
, len
, pos
);
1054 spu_release_saved(ctx
);
1059 static ssize_t
spufs_signal1_write(struct file
*file
, const char __user
*buf
,
1060 size_t len
, loff_t
*pos
)
1062 struct spu_context
*ctx
;
1066 ctx
= file
->private_data
;
1071 if (copy_from_user(&data
, buf
, 4))
1074 ret
= spu_acquire(ctx
);
1077 ctx
->ops
->signal1_write(ctx
, data
);
1083 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct
*vma
,
1084 unsigned long address
)
1086 #if PAGE_SIZE == 0x1000
1087 return spufs_ps_nopfn(vma
, address
, 0x14000, 0x1000);
1088 #elif PAGE_SIZE == 0x10000
1089 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1090 * signal 1 and 2 area
1092 return spufs_ps_nopfn(vma
, address
, 0x10000, 0x10000);
1094 #error unsupported page size
1098 static struct vm_operations_struct spufs_signal1_mmap_vmops
= {
1099 .nopfn
= spufs_signal1_mmap_nopfn
,
1102 static int spufs_signal1_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1104 if (!(vma
->vm_flags
& VM_SHARED
))
1107 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1108 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1109 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1111 vma
->vm_ops
= &spufs_signal1_mmap_vmops
;
1115 static const struct file_operations spufs_signal1_fops
= {
1116 .open
= spufs_signal1_open
,
1117 .release
= spufs_signal1_release
,
1118 .read
= spufs_signal1_read
,
1119 .write
= spufs_signal1_write
,
1120 .mmap
= spufs_signal1_mmap
,
1123 static const struct file_operations spufs_signal1_nosched_fops
= {
1124 .open
= spufs_signal1_open
,
1125 .release
= spufs_signal1_release
,
1126 .write
= spufs_signal1_write
,
1127 .mmap
= spufs_signal1_mmap
,
1130 static int spufs_signal2_open(struct inode
*inode
, struct file
*file
)
1132 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1133 struct spu_context
*ctx
= i
->i_ctx
;
1135 mutex_lock(&ctx
->mapping_lock
);
1136 file
->private_data
= ctx
;
1137 if (!i
->i_openers
++)
1138 ctx
->signal2
= inode
->i_mapping
;
1139 mutex_unlock(&ctx
->mapping_lock
);
1140 return nonseekable_open(inode
, file
);
1144 spufs_signal2_release(struct inode
*inode
, struct file
*file
)
1146 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1147 struct spu_context
*ctx
= i
->i_ctx
;
1149 mutex_lock(&ctx
->mapping_lock
);
1150 if (!--i
->i_openers
)
1151 ctx
->signal2
= NULL
;
1152 mutex_unlock(&ctx
->mapping_lock
);
1156 static ssize_t
__spufs_signal2_read(struct spu_context
*ctx
, char __user
*buf
,
1157 size_t len
, loff_t
*pos
)
1165 if (ctx
->csa
.spu_chnlcnt_RW
[4]) {
1166 data
= ctx
->csa
.spu_chnldata_RW
[4];
1173 if (copy_to_user(buf
, &data
, 4))
1180 static ssize_t
spufs_signal2_read(struct file
*file
, char __user
*buf
,
1181 size_t len
, loff_t
*pos
)
1183 struct spu_context
*ctx
= file
->private_data
;
1186 ret
= spu_acquire_saved(ctx
);
1189 ret
= __spufs_signal2_read(ctx
, buf
, len
, pos
);
1190 spu_release_saved(ctx
);
1195 static ssize_t
spufs_signal2_write(struct file
*file
, const char __user
*buf
,
1196 size_t len
, loff_t
*pos
)
1198 struct spu_context
*ctx
;
1202 ctx
= file
->private_data
;
1207 if (copy_from_user(&data
, buf
, 4))
1210 ret
= spu_acquire(ctx
);
1213 ctx
->ops
->signal2_write(ctx
, data
);
1220 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct
*vma
,
1221 unsigned long address
)
1223 #if PAGE_SIZE == 0x1000
1224 return spufs_ps_nopfn(vma
, address
, 0x1c000, 0x1000);
1225 #elif PAGE_SIZE == 0x10000
1226 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1227 * signal 1 and 2 area
1229 return spufs_ps_nopfn(vma
, address
, 0x10000, 0x10000);
1231 #error unsupported page size
1235 static struct vm_operations_struct spufs_signal2_mmap_vmops
= {
1236 .nopfn
= spufs_signal2_mmap_nopfn
,
1239 static int spufs_signal2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1241 if (!(vma
->vm_flags
& VM_SHARED
))
1244 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1245 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1246 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1248 vma
->vm_ops
= &spufs_signal2_mmap_vmops
;
1251 #else /* SPUFS_MMAP_4K */
1252 #define spufs_signal2_mmap NULL
1253 #endif /* !SPUFS_MMAP_4K */
1255 static const struct file_operations spufs_signal2_fops
= {
1256 .open
= spufs_signal2_open
,
1257 .release
= spufs_signal2_release
,
1258 .read
= spufs_signal2_read
,
1259 .write
= spufs_signal2_write
,
1260 .mmap
= spufs_signal2_mmap
,
1263 static const struct file_operations spufs_signal2_nosched_fops
= {
1264 .open
= spufs_signal2_open
,
1265 .release
= spufs_signal2_release
,
1266 .write
= spufs_signal2_write
,
1267 .mmap
= spufs_signal2_mmap
,
1271 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1272 * work of acquiring (or not) the SPU context before calling through
1273 * to the actual get routine. The set routine is called directly.
1275 #define SPU_ATTR_NOACQUIRE 0
1276 #define SPU_ATTR_ACQUIRE 1
1277 #define SPU_ATTR_ACQUIRE_SAVED 2
1279 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1280 static int __##__get(void *data, u64 *val) \
1282 struct spu_context *ctx = data; \
1285 if (__acquire == SPU_ATTR_ACQUIRE) { \
1286 ret = spu_acquire(ctx); \
1289 *val = __get(ctx); \
1291 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1292 ret = spu_acquire_saved(ctx); \
1295 *val = __get(ctx); \
1296 spu_release_saved(ctx); \
1298 *val = __get(ctx); \
1302 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1304 static int spufs_signal1_type_set(void *data
, u64 val
)
1306 struct spu_context
*ctx
= data
;
1309 ret
= spu_acquire(ctx
);
1312 ctx
->ops
->signal1_type_set(ctx
, val
);
1318 static u64
spufs_signal1_type_get(struct spu_context
*ctx
)
1320 return ctx
->ops
->signal1_type_get(ctx
);
1322 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type
, spufs_signal1_type_get
,
1323 spufs_signal1_type_set
, "%llu", SPU_ATTR_ACQUIRE
);
1326 static int spufs_signal2_type_set(void *data
, u64 val
)
1328 struct spu_context
*ctx
= data
;
1331 ret
= spu_acquire(ctx
);
1334 ctx
->ops
->signal2_type_set(ctx
, val
);
1340 static u64
spufs_signal2_type_get(struct spu_context
*ctx
)
1342 return ctx
->ops
->signal2_type_get(ctx
);
1344 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type
, spufs_signal2_type_get
,
1345 spufs_signal2_type_set
, "%llu", SPU_ATTR_ACQUIRE
);
1348 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct
*vma
,
1349 unsigned long address
)
1351 return spufs_ps_nopfn(vma
, address
, 0x0000, 0x1000);
1354 static struct vm_operations_struct spufs_mss_mmap_vmops
= {
1355 .nopfn
= spufs_mss_mmap_nopfn
,
1359 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1361 static int spufs_mss_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1363 if (!(vma
->vm_flags
& VM_SHARED
))
1366 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1367 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1368 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1370 vma
->vm_ops
= &spufs_mss_mmap_vmops
;
1373 #else /* SPUFS_MMAP_4K */
1374 #define spufs_mss_mmap NULL
1375 #endif /* !SPUFS_MMAP_4K */
1377 static int spufs_mss_open(struct inode
*inode
, struct file
*file
)
1379 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1380 struct spu_context
*ctx
= i
->i_ctx
;
1382 file
->private_data
= i
->i_ctx
;
1384 mutex_lock(&ctx
->mapping_lock
);
1385 if (!i
->i_openers
++)
1386 ctx
->mss
= inode
->i_mapping
;
1387 mutex_unlock(&ctx
->mapping_lock
);
1388 return nonseekable_open(inode
, file
);
1392 spufs_mss_release(struct inode
*inode
, struct file
*file
)
1394 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1395 struct spu_context
*ctx
= i
->i_ctx
;
1397 mutex_lock(&ctx
->mapping_lock
);
1398 if (!--i
->i_openers
)
1400 mutex_unlock(&ctx
->mapping_lock
);
1404 static const struct file_operations spufs_mss_fops
= {
1405 .open
= spufs_mss_open
,
1406 .release
= spufs_mss_release
,
1407 .mmap
= spufs_mss_mmap
,
1410 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct
*vma
,
1411 unsigned long address
)
1413 return spufs_ps_nopfn(vma
, address
, 0x0000, 0x20000);
1416 static struct vm_operations_struct spufs_psmap_mmap_vmops
= {
1417 .nopfn
= spufs_psmap_mmap_nopfn
,
1421 * mmap support for full problem state area [0x00000 - 0x1ffff].
1423 static int spufs_psmap_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1425 if (!(vma
->vm_flags
& VM_SHARED
))
1428 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1429 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1430 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1432 vma
->vm_ops
= &spufs_psmap_mmap_vmops
;
1436 static int spufs_psmap_open(struct inode
*inode
, struct file
*file
)
1438 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1439 struct spu_context
*ctx
= i
->i_ctx
;
1441 mutex_lock(&ctx
->mapping_lock
);
1442 file
->private_data
= i
->i_ctx
;
1443 if (!i
->i_openers
++)
1444 ctx
->psmap
= inode
->i_mapping
;
1445 mutex_unlock(&ctx
->mapping_lock
);
1446 return nonseekable_open(inode
, file
);
1450 spufs_psmap_release(struct inode
*inode
, struct file
*file
)
1452 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1453 struct spu_context
*ctx
= i
->i_ctx
;
1455 mutex_lock(&ctx
->mapping_lock
);
1456 if (!--i
->i_openers
)
1458 mutex_unlock(&ctx
->mapping_lock
);
1462 static const struct file_operations spufs_psmap_fops
= {
1463 .open
= spufs_psmap_open
,
1464 .release
= spufs_psmap_release
,
1465 .mmap
= spufs_psmap_mmap
,
1470 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct
*vma
,
1471 unsigned long address
)
1473 return spufs_ps_nopfn(vma
, address
, 0x3000, 0x1000);
1476 static struct vm_operations_struct spufs_mfc_mmap_vmops
= {
1477 .nopfn
= spufs_mfc_mmap_nopfn
,
1481 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1483 static int spufs_mfc_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1485 if (!(vma
->vm_flags
& VM_SHARED
))
1488 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1489 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1490 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1492 vma
->vm_ops
= &spufs_mfc_mmap_vmops
;
1495 #else /* SPUFS_MMAP_4K */
1496 #define spufs_mfc_mmap NULL
1497 #endif /* !SPUFS_MMAP_4K */
1499 static int spufs_mfc_open(struct inode
*inode
, struct file
*file
)
1501 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1502 struct spu_context
*ctx
= i
->i_ctx
;
1504 /* we don't want to deal with DMA into other processes */
1505 if (ctx
->owner
!= current
->mm
)
1508 if (atomic_read(&inode
->i_count
) != 1)
1511 mutex_lock(&ctx
->mapping_lock
);
1512 file
->private_data
= ctx
;
1513 if (!i
->i_openers
++)
1514 ctx
->mfc
= inode
->i_mapping
;
1515 mutex_unlock(&ctx
->mapping_lock
);
1516 return nonseekable_open(inode
, file
);
1520 spufs_mfc_release(struct inode
*inode
, struct file
*file
)
1522 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1523 struct spu_context
*ctx
= i
->i_ctx
;
1525 mutex_lock(&ctx
->mapping_lock
);
1526 if (!--i
->i_openers
)
1528 mutex_unlock(&ctx
->mapping_lock
);
1532 /* interrupt-level mfc callback function. */
1533 void spufs_mfc_callback(struct spu
*spu
)
1535 struct spu_context
*ctx
= spu
->ctx
;
1540 wake_up_all(&ctx
->mfc_wq
);
1542 pr_debug("%s %s\n", __FUNCTION__
, spu
->name
);
1543 if (ctx
->mfc_fasync
) {
1544 u32 free_elements
, tagstatus
;
1547 /* no need for spu_acquire in interrupt context */
1548 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1549 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1552 if (free_elements
& 0xffff)
1554 if (tagstatus
& ctx
->tagwait
)
1557 kill_fasync(&ctx
->mfc_fasync
, SIGIO
, mask
);
1561 static int spufs_read_mfc_tagstatus(struct spu_context
*ctx
, u32
*status
)
1563 /* See if there is one tag group is complete */
1564 /* FIXME we need locking around tagwait */
1565 *status
= ctx
->ops
->read_mfc_tagstatus(ctx
) & ctx
->tagwait
;
1566 ctx
->tagwait
&= ~*status
;
1570 /* enable interrupt waiting for any tag group,
1571 may silently fail if interrupts are already enabled */
1572 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1576 static ssize_t
spufs_mfc_read(struct file
*file
, char __user
*buffer
,
1577 size_t size
, loff_t
*pos
)
1579 struct spu_context
*ctx
= file
->private_data
;
1586 ret
= spu_acquire(ctx
);
1591 if (file
->f_flags
& O_NONBLOCK
) {
1592 status
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1593 if (!(status
& ctx
->tagwait
))
1596 /* XXX(hch): shouldn't we clear ret here? */
1597 ctx
->tagwait
&= ~status
;
1599 ret
= spufs_wait(ctx
->mfc_wq
,
1600 spufs_read_mfc_tagstatus(ctx
, &status
));
1608 if (copy_to_user(buffer
, &status
, 4))
1615 static int spufs_check_valid_dma(struct mfc_dma_command
*cmd
)
1617 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd
->lsa
,
1618 cmd
->ea
, cmd
->size
, cmd
->tag
, cmd
->cmd
);
1629 pr_debug("invalid DMA opcode %x\n", cmd
->cmd
);
1633 if ((cmd
->lsa
& 0xf) != (cmd
->ea
&0xf)) {
1634 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1639 switch (cmd
->size
& 0xf) {
1660 pr_debug("invalid DMA alignment %x for size %x\n",
1661 cmd
->lsa
& 0xf, cmd
->size
);
1665 if (cmd
->size
> 16 * 1024) {
1666 pr_debug("invalid DMA size %x\n", cmd
->size
);
1670 if (cmd
->tag
& 0xfff0) {
1671 /* we reserve the higher tag numbers for kernel use */
1672 pr_debug("invalid DMA tag\n");
1677 /* not supported in this version */
1678 pr_debug("invalid DMA class\n");
1685 static int spu_send_mfc_command(struct spu_context
*ctx
,
1686 struct mfc_dma_command cmd
,
1689 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1690 if (*error
== -EAGAIN
) {
1691 /* wait for any tag group to complete
1692 so we have space for the new command */
1693 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1694 /* try again, because the queue might be
1696 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1697 if (*error
== -EAGAIN
)
1703 static ssize_t
spufs_mfc_write(struct file
*file
, const char __user
*buffer
,
1704 size_t size
, loff_t
*pos
)
1706 struct spu_context
*ctx
= file
->private_data
;
1707 struct mfc_dma_command cmd
;
1710 if (size
!= sizeof cmd
)
1714 if (copy_from_user(&cmd
, buffer
, sizeof cmd
))
1717 ret
= spufs_check_valid_dma(&cmd
);
1721 ret
= spu_acquire(ctx
);
1725 ret
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
1729 if (file
->f_flags
& O_NONBLOCK
) {
1730 ret
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1733 ret
= spufs_wait(ctx
->mfc_wq
,
1734 spu_send_mfc_command(ctx
, cmd
, &status
));
1742 ctx
->tagwait
|= 1 << cmd
.tag
;
1751 static unsigned int spufs_mfc_poll(struct file
*file
,poll_table
*wait
)
1753 struct spu_context
*ctx
= file
->private_data
;
1754 u32 free_elements
, tagstatus
;
1757 poll_wait(file
, &ctx
->mfc_wq
, wait
);
1760 * For now keep this uninterruptible and also ignore the rule
1761 * that poll should not sleep. Will be fixed later.
1763 mutex_lock(&ctx
->state_mutex
);
1764 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2);
1765 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1766 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1770 if (free_elements
& 0xffff)
1771 mask
|= POLLOUT
| POLLWRNORM
;
1772 if (tagstatus
& ctx
->tagwait
)
1773 mask
|= POLLIN
| POLLRDNORM
;
1775 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__
,
1776 free_elements
, tagstatus
, ctx
->tagwait
);
1781 static int spufs_mfc_flush(struct file
*file
, fl_owner_t id
)
1783 struct spu_context
*ctx
= file
->private_data
;
1786 ret
= spu_acquire(ctx
);
1790 /* this currently hangs */
1791 ret
= spufs_wait(ctx
->mfc_wq
,
1792 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2));
1795 ret
= spufs_wait(ctx
->mfc_wq
,
1796 ctx
->ops
->read_mfc_tagstatus(ctx
) == ctx
->tagwait
);
1806 static int spufs_mfc_fsync(struct file
*file
, struct dentry
*dentry
,
1809 return spufs_mfc_flush(file
, NULL
);
1812 static int spufs_mfc_fasync(int fd
, struct file
*file
, int on
)
1814 struct spu_context
*ctx
= file
->private_data
;
1816 return fasync_helper(fd
, file
, on
, &ctx
->mfc_fasync
);
1819 static const struct file_operations spufs_mfc_fops
= {
1820 .open
= spufs_mfc_open
,
1821 .release
= spufs_mfc_release
,
1822 .read
= spufs_mfc_read
,
1823 .write
= spufs_mfc_write
,
1824 .poll
= spufs_mfc_poll
,
1825 .flush
= spufs_mfc_flush
,
1826 .fsync
= spufs_mfc_fsync
,
1827 .fasync
= spufs_mfc_fasync
,
1828 .mmap
= spufs_mfc_mmap
,
1831 static int spufs_npc_set(void *data
, u64 val
)
1833 struct spu_context
*ctx
= data
;
1836 ret
= spu_acquire(ctx
);
1839 ctx
->ops
->npc_write(ctx
, val
);
1845 static u64
spufs_npc_get(struct spu_context
*ctx
)
1847 return ctx
->ops
->npc_read(ctx
);
1849 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops
, spufs_npc_get
, spufs_npc_set
,
1850 "0x%llx\n", SPU_ATTR_ACQUIRE
);
1852 static int spufs_decr_set(void *data
, u64 val
)
1854 struct spu_context
*ctx
= data
;
1855 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1858 ret
= spu_acquire_saved(ctx
);
1861 lscsa
->decr
.slot
[0] = (u32
) val
;
1862 spu_release_saved(ctx
);
1867 static u64
spufs_decr_get(struct spu_context
*ctx
)
1869 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1870 return lscsa
->decr
.slot
[0];
1872 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops
, spufs_decr_get
, spufs_decr_set
,
1873 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
);
1875 static int spufs_decr_status_set(void *data
, u64 val
)
1877 struct spu_context
*ctx
= data
;
1880 ret
= spu_acquire_saved(ctx
);
1884 ctx
->csa
.priv2
.mfc_control_RW
|= MFC_CNTL_DECREMENTER_RUNNING
;
1886 ctx
->csa
.priv2
.mfc_control_RW
&= ~MFC_CNTL_DECREMENTER_RUNNING
;
1887 spu_release_saved(ctx
);
1892 static u64
spufs_decr_status_get(struct spu_context
*ctx
)
1894 if (ctx
->csa
.priv2
.mfc_control_RW
& MFC_CNTL_DECREMENTER_RUNNING
)
1895 return SPU_DECR_STATUS_RUNNING
;
1899 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops
, spufs_decr_status_get
,
1900 spufs_decr_status_set
, "0x%llx\n",
1901 SPU_ATTR_ACQUIRE_SAVED
);
1903 static int spufs_event_mask_set(void *data
, u64 val
)
1905 struct spu_context
*ctx
= data
;
1906 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1909 ret
= spu_acquire_saved(ctx
);
1912 lscsa
->event_mask
.slot
[0] = (u32
) val
;
1913 spu_release_saved(ctx
);
1918 static u64
spufs_event_mask_get(struct spu_context
*ctx
)
1920 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1921 return lscsa
->event_mask
.slot
[0];
1924 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops
, spufs_event_mask_get
,
1925 spufs_event_mask_set
, "0x%llx\n",
1926 SPU_ATTR_ACQUIRE_SAVED
);
1928 static u64
spufs_event_status_get(struct spu_context
*ctx
)
1930 struct spu_state
*state
= &ctx
->csa
;
1932 stat
= state
->spu_chnlcnt_RW
[0];
1934 return state
->spu_chnldata_RW
[0];
1937 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops
, spufs_event_status_get
,
1938 NULL
, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1940 static int spufs_srr0_set(void *data
, u64 val
)
1942 struct spu_context
*ctx
= data
;
1943 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1946 ret
= spu_acquire_saved(ctx
);
1949 lscsa
->srr0
.slot
[0] = (u32
) val
;
1950 spu_release_saved(ctx
);
1955 static u64
spufs_srr0_get(struct spu_context
*ctx
)
1957 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1958 return lscsa
->srr0
.slot
[0];
1960 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops
, spufs_srr0_get
, spufs_srr0_set
,
1961 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1963 static u64
spufs_id_get(struct spu_context
*ctx
)
1967 if (ctx
->state
== SPU_STATE_RUNNABLE
)
1968 num
= ctx
->spu
->number
;
1970 num
= (unsigned int)-1;
1974 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops
, spufs_id_get
, NULL
, "0x%llx\n",
1977 static u64
spufs_object_id_get(struct spu_context
*ctx
)
1979 /* FIXME: Should there really be no locking here? */
1980 return ctx
->object_id
;
1983 static int spufs_object_id_set(void *data
, u64 id
)
1985 struct spu_context
*ctx
= data
;
1986 ctx
->object_id
= id
;
1991 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops
, spufs_object_id_get
,
1992 spufs_object_id_set
, "0x%llx\n", SPU_ATTR_NOACQUIRE
);
1994 static u64
spufs_lslr_get(struct spu_context
*ctx
)
1996 return ctx
->csa
.priv2
.spu_lslr_RW
;
1998 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops
, spufs_lslr_get
, NULL
, "0x%llx\n",
1999 SPU_ATTR_ACQUIRE_SAVED
);
2001 static int spufs_info_open(struct inode
*inode
, struct file
*file
)
2003 struct spufs_inode_info
*i
= SPUFS_I(inode
);
2004 struct spu_context
*ctx
= i
->i_ctx
;
2005 file
->private_data
= ctx
;
2009 static int spufs_caps_show(struct seq_file
*s
, void *private)
2011 struct spu_context
*ctx
= s
->private;
2013 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
))
2014 seq_puts(s
, "sched\n");
2015 if (!(ctx
->flags
& SPU_CREATE_ISOLATE
))
2016 seq_puts(s
, "step\n");
2020 static int spufs_caps_open(struct inode
*inode
, struct file
*file
)
2022 return single_open(file
, spufs_caps_show
, SPUFS_I(inode
)->i_ctx
);
2025 static const struct file_operations spufs_caps_fops
= {
2026 .open
= spufs_caps_open
,
2028 .llseek
= seq_lseek
,
2029 .release
= single_release
,
2032 static ssize_t
__spufs_mbox_info_read(struct spu_context
*ctx
,
2033 char __user
*buf
, size_t len
, loff_t
*pos
)
2037 /* EOF if there's no entry in the mbox */
2038 if (!(ctx
->csa
.prob
.mb_stat_R
& 0x0000ff))
2041 data
= ctx
->csa
.prob
.pu_mb_R
;
2043 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
2046 static ssize_t
spufs_mbox_info_read(struct file
*file
, char __user
*buf
,
2047 size_t len
, loff_t
*pos
)
2050 struct spu_context
*ctx
= file
->private_data
;
2052 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2055 ret
= spu_acquire_saved(ctx
);
2058 spin_lock(&ctx
->csa
.register_lock
);
2059 ret
= __spufs_mbox_info_read(ctx
, buf
, len
, pos
);
2060 spin_unlock(&ctx
->csa
.register_lock
);
2061 spu_release_saved(ctx
);
2066 static const struct file_operations spufs_mbox_info_fops
= {
2067 .open
= spufs_info_open
,
2068 .read
= spufs_mbox_info_read
,
2069 .llseek
= generic_file_llseek
,
2072 static ssize_t
__spufs_ibox_info_read(struct spu_context
*ctx
,
2073 char __user
*buf
, size_t len
, loff_t
*pos
)
2077 /* EOF if there's no entry in the ibox */
2078 if (!(ctx
->csa
.prob
.mb_stat_R
& 0xff0000))
2081 data
= ctx
->csa
.priv2
.puint_mb_R
;
2083 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
2086 static ssize_t
spufs_ibox_info_read(struct file
*file
, char __user
*buf
,
2087 size_t len
, loff_t
*pos
)
2089 struct spu_context
*ctx
= file
->private_data
;
2092 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2095 ret
= spu_acquire_saved(ctx
);
2098 spin_lock(&ctx
->csa
.register_lock
);
2099 ret
= __spufs_ibox_info_read(ctx
, buf
, len
, pos
);
2100 spin_unlock(&ctx
->csa
.register_lock
);
2101 spu_release_saved(ctx
);
2106 static const struct file_operations spufs_ibox_info_fops
= {
2107 .open
= spufs_info_open
,
2108 .read
= spufs_ibox_info_read
,
2109 .llseek
= generic_file_llseek
,
2112 static ssize_t
__spufs_wbox_info_read(struct spu_context
*ctx
,
2113 char __user
*buf
, size_t len
, loff_t
*pos
)
2119 wbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
2120 cnt
= 4 - ((wbox_stat
& 0x00ff00) >> 8);
2121 for (i
= 0; i
< cnt
; i
++) {
2122 data
[i
] = ctx
->csa
.spu_mailbox_data
[i
];
2125 return simple_read_from_buffer(buf
, len
, pos
, &data
,
2129 static ssize_t
spufs_wbox_info_read(struct file
*file
, char __user
*buf
,
2130 size_t len
, loff_t
*pos
)
2132 struct spu_context
*ctx
= file
->private_data
;
2135 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2138 ret
= spu_acquire_saved(ctx
);
2141 spin_lock(&ctx
->csa
.register_lock
);
2142 ret
= __spufs_wbox_info_read(ctx
, buf
, len
, pos
);
2143 spin_unlock(&ctx
->csa
.register_lock
);
2144 spu_release_saved(ctx
);
2149 static const struct file_operations spufs_wbox_info_fops
= {
2150 .open
= spufs_info_open
,
2151 .read
= spufs_wbox_info_read
,
2152 .llseek
= generic_file_llseek
,
2155 static ssize_t
__spufs_dma_info_read(struct spu_context
*ctx
,
2156 char __user
*buf
, size_t len
, loff_t
*pos
)
2158 struct spu_dma_info info
;
2159 struct mfc_cq_sr
*qp
, *spuqp
;
2162 info
.dma_info_type
= ctx
->csa
.priv2
.spu_tag_status_query_RW
;
2163 info
.dma_info_mask
= ctx
->csa
.lscsa
->tag_mask
.slot
[0];
2164 info
.dma_info_status
= ctx
->csa
.spu_chnldata_RW
[24];
2165 info
.dma_info_stall_and_notify
= ctx
->csa
.spu_chnldata_RW
[25];
2166 info
.dma_info_atomic_command_status
= ctx
->csa
.spu_chnldata_RW
[27];
2167 for (i
= 0; i
< 16; i
++) {
2168 qp
= &info
.dma_info_command_data
[i
];
2169 spuqp
= &ctx
->csa
.priv2
.spuq
[i
];
2171 qp
->mfc_cq_data0_RW
= spuqp
->mfc_cq_data0_RW
;
2172 qp
->mfc_cq_data1_RW
= spuqp
->mfc_cq_data1_RW
;
2173 qp
->mfc_cq_data2_RW
= spuqp
->mfc_cq_data2_RW
;
2174 qp
->mfc_cq_data3_RW
= spuqp
->mfc_cq_data3_RW
;
2177 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2181 static ssize_t
spufs_dma_info_read(struct file
*file
, char __user
*buf
,
2182 size_t len
, loff_t
*pos
)
2184 struct spu_context
*ctx
= file
->private_data
;
2187 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2190 ret
= spu_acquire_saved(ctx
);
2193 spin_lock(&ctx
->csa
.register_lock
);
2194 ret
= __spufs_dma_info_read(ctx
, buf
, len
, pos
);
2195 spin_unlock(&ctx
->csa
.register_lock
);
2196 spu_release_saved(ctx
);
2201 static const struct file_operations spufs_dma_info_fops
= {
2202 .open
= spufs_info_open
,
2203 .read
= spufs_dma_info_read
,
2206 static ssize_t
__spufs_proxydma_info_read(struct spu_context
*ctx
,
2207 char __user
*buf
, size_t len
, loff_t
*pos
)
2209 struct spu_proxydma_info info
;
2210 struct mfc_cq_sr
*qp
, *puqp
;
2211 int ret
= sizeof info
;
2217 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2220 info
.proxydma_info_type
= ctx
->csa
.prob
.dma_querytype_RW
;
2221 info
.proxydma_info_mask
= ctx
->csa
.prob
.dma_querymask_RW
;
2222 info
.proxydma_info_status
= ctx
->csa
.prob
.dma_tagstatus_R
;
2223 for (i
= 0; i
< 8; i
++) {
2224 qp
= &info
.proxydma_info_command_data
[i
];
2225 puqp
= &ctx
->csa
.priv2
.puq
[i
];
2227 qp
->mfc_cq_data0_RW
= puqp
->mfc_cq_data0_RW
;
2228 qp
->mfc_cq_data1_RW
= puqp
->mfc_cq_data1_RW
;
2229 qp
->mfc_cq_data2_RW
= puqp
->mfc_cq_data2_RW
;
2230 qp
->mfc_cq_data3_RW
= puqp
->mfc_cq_data3_RW
;
2233 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2237 static ssize_t
spufs_proxydma_info_read(struct file
*file
, char __user
*buf
,
2238 size_t len
, loff_t
*pos
)
2240 struct spu_context
*ctx
= file
->private_data
;
2243 ret
= spu_acquire_saved(ctx
);
2246 spin_lock(&ctx
->csa
.register_lock
);
2247 ret
= __spufs_proxydma_info_read(ctx
, buf
, len
, pos
);
2248 spin_unlock(&ctx
->csa
.register_lock
);
2249 spu_release_saved(ctx
);
2254 static const struct file_operations spufs_proxydma_info_fops
= {
2255 .open
= spufs_info_open
,
2256 .read
= spufs_proxydma_info_read
,
2259 static int spufs_show_tid(struct seq_file
*s
, void *private)
2261 struct spu_context
*ctx
= s
->private;
2263 seq_printf(s
, "%d\n", ctx
->tid
);
2267 static int spufs_tid_open(struct inode
*inode
, struct file
*file
)
2269 return single_open(file
, spufs_show_tid
, SPUFS_I(inode
)->i_ctx
);
2272 static const struct file_operations spufs_tid_fops
= {
2273 .open
= spufs_tid_open
,
2275 .llseek
= seq_lseek
,
2276 .release
= single_release
,
2279 static const char *ctx_state_names
[] = {
2280 "user", "system", "iowait", "loaded"
2283 static unsigned long long spufs_acct_time(struct spu_context
*ctx
,
2284 enum spu_utilization_state state
)
2287 unsigned long long time
= ctx
->stats
.times
[state
];
2290 * In general, utilization statistics are updated by the controlling
2291 * thread as the spu context moves through various well defined
2292 * state transitions, but if the context is lazily loaded its
2293 * utilization statistics are not updated as the controlling thread
2294 * is not tightly coupled with the execution of the spu context. We
2295 * calculate and apply the time delta from the last recorded state
2296 * of the spu context.
2298 if (ctx
->spu
&& ctx
->stats
.util_state
== state
) {
2300 time
+= timespec_to_ns(&ts
) - ctx
->stats
.tstamp
;
2303 return time
/ NSEC_PER_MSEC
;
2306 static unsigned long long spufs_slb_flts(struct spu_context
*ctx
)
2308 unsigned long long slb_flts
= ctx
->stats
.slb_flt
;
2310 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2311 slb_flts
+= (ctx
->spu
->stats
.slb_flt
-
2312 ctx
->stats
.slb_flt_base
);
2318 static unsigned long long spufs_class2_intrs(struct spu_context
*ctx
)
2320 unsigned long long class2_intrs
= ctx
->stats
.class2_intr
;
2322 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2323 class2_intrs
+= (ctx
->spu
->stats
.class2_intr
-
2324 ctx
->stats
.class2_intr_base
);
2327 return class2_intrs
;
2331 static int spufs_show_stat(struct seq_file
*s
, void *private)
2333 struct spu_context
*ctx
= s
->private;
2336 ret
= spu_acquire(ctx
);
2340 seq_printf(s
, "%s %llu %llu %llu %llu "
2341 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2342 ctx_state_names
[ctx
->stats
.util_state
],
2343 spufs_acct_time(ctx
, SPU_UTIL_USER
),
2344 spufs_acct_time(ctx
, SPU_UTIL_SYSTEM
),
2345 spufs_acct_time(ctx
, SPU_UTIL_IOWAIT
),
2346 spufs_acct_time(ctx
, SPU_UTIL_IDLE_LOADED
),
2347 ctx
->stats
.vol_ctx_switch
,
2348 ctx
->stats
.invol_ctx_switch
,
2349 spufs_slb_flts(ctx
),
2350 ctx
->stats
.hash_flt
,
2353 spufs_class2_intrs(ctx
),
2354 ctx
->stats
.libassist
);
2359 static int spufs_stat_open(struct inode
*inode
, struct file
*file
)
2361 return single_open(file
, spufs_show_stat
, SPUFS_I(inode
)->i_ctx
);
2364 static const struct file_operations spufs_stat_fops
= {
2365 .open
= spufs_stat_open
,
2367 .llseek
= seq_lseek
,
2368 .release
= single_release
,
2372 struct tree_descr spufs_dir_contents
[] = {
2373 { "capabilities", &spufs_caps_fops
, 0444, },
2374 { "mem", &spufs_mem_fops
, 0666, },
2375 { "regs", &spufs_regs_fops
, 0666, },
2376 { "mbox", &spufs_mbox_fops
, 0444, },
2377 { "ibox", &spufs_ibox_fops
, 0444, },
2378 { "wbox", &spufs_wbox_fops
, 0222, },
2379 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, },
2380 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, },
2381 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, },
2382 { "signal1", &spufs_signal1_fops
, 0666, },
2383 { "signal2", &spufs_signal2_fops
, 0666, },
2384 { "signal1_type", &spufs_signal1_type
, 0666, },
2385 { "signal2_type", &spufs_signal2_type
, 0666, },
2386 { "cntl", &spufs_cntl_fops
, 0666, },
2387 { "fpcr", &spufs_fpcr_fops
, 0666, },
2388 { "lslr", &spufs_lslr_ops
, 0444, },
2389 { "mfc", &spufs_mfc_fops
, 0666, },
2390 { "mss", &spufs_mss_fops
, 0666, },
2391 { "npc", &spufs_npc_ops
, 0666, },
2392 { "srr0", &spufs_srr0_ops
, 0666, },
2393 { "decr", &spufs_decr_ops
, 0666, },
2394 { "decr_status", &spufs_decr_status_ops
, 0666, },
2395 { "event_mask", &spufs_event_mask_ops
, 0666, },
2396 { "event_status", &spufs_event_status_ops
, 0444, },
2397 { "psmap", &spufs_psmap_fops
, 0666, },
2398 { "phys-id", &spufs_id_ops
, 0666, },
2399 { "object-id", &spufs_object_id_ops
, 0666, },
2400 { "mbox_info", &spufs_mbox_info_fops
, 0444, },
2401 { "ibox_info", &spufs_ibox_info_fops
, 0444, },
2402 { "wbox_info", &spufs_wbox_info_fops
, 0444, },
2403 { "dma_info", &spufs_dma_info_fops
, 0444, },
2404 { "proxydma_info", &spufs_proxydma_info_fops
, 0444, },
2405 { "tid", &spufs_tid_fops
, 0444, },
2406 { "stat", &spufs_stat_fops
, 0444, },
2410 struct tree_descr spufs_dir_nosched_contents
[] = {
2411 { "capabilities", &spufs_caps_fops
, 0444, },
2412 { "mem", &spufs_mem_fops
, 0666, },
2413 { "mbox", &spufs_mbox_fops
, 0444, },
2414 { "ibox", &spufs_ibox_fops
, 0444, },
2415 { "wbox", &spufs_wbox_fops
, 0222, },
2416 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, },
2417 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, },
2418 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, },
2419 { "signal1", &spufs_signal1_nosched_fops
, 0222, },
2420 { "signal2", &spufs_signal2_nosched_fops
, 0222, },
2421 { "signal1_type", &spufs_signal1_type
, 0666, },
2422 { "signal2_type", &spufs_signal2_type
, 0666, },
2423 { "mss", &spufs_mss_fops
, 0666, },
2424 { "mfc", &spufs_mfc_fops
, 0666, },
2425 { "cntl", &spufs_cntl_fops
, 0666, },
2426 { "npc", &spufs_npc_ops
, 0666, },
2427 { "psmap", &spufs_psmap_fops
, 0666, },
2428 { "phys-id", &spufs_id_ops
, 0666, },
2429 { "object-id", &spufs_object_id_ops
, 0666, },
2430 { "tid", &spufs_tid_fops
, 0444, },
2431 { "stat", &spufs_stat_fops
, 0444, },
2435 struct spufs_coredump_reader spufs_coredump_read
[] = {
2436 { "regs", __spufs_regs_read
, NULL
, sizeof(struct spu_reg128
[128])},
2437 { "fpcr", __spufs_fpcr_read
, NULL
, sizeof(struct spu_reg128
) },
2438 { "lslr", NULL
, spufs_lslr_get
, 19 },
2439 { "decr", NULL
, spufs_decr_get
, 19 },
2440 { "decr_status", NULL
, spufs_decr_status_get
, 19 },
2441 { "mem", __spufs_mem_read
, NULL
, LS_SIZE
, },
2442 { "signal1", __spufs_signal1_read
, NULL
, sizeof(u32
) },
2443 { "signal1_type", NULL
, spufs_signal1_type_get
, 19 },
2444 { "signal2", __spufs_signal2_read
, NULL
, sizeof(u32
) },
2445 { "signal2_type", NULL
, spufs_signal2_type_get
, 19 },
2446 { "event_mask", NULL
, spufs_event_mask_get
, 19 },
2447 { "event_status", NULL
, spufs_event_status_get
, 19 },
2448 { "mbox_info", __spufs_mbox_info_read
, NULL
, sizeof(u32
) },
2449 { "ibox_info", __spufs_ibox_info_read
, NULL
, sizeof(u32
) },
2450 { "wbox_info", __spufs_wbox_info_read
, NULL
, 4 * sizeof(u32
)},
2451 { "dma_info", __spufs_dma_info_read
, NULL
, sizeof(struct spu_dma_info
)},
2452 { "proxydma_info", __spufs_proxydma_info_read
,
2453 NULL
, sizeof(struct spu_proxydma_info
)},
2454 { "object-id", NULL
, spufs_object_id_get
, 19 },
2455 { "npc", NULL
, spufs_npc_get
, 19 },