2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
33 #include <asm/semaphore.h>
35 #include <asm/spu_info.h>
36 #include <asm/uaccess.h>
40 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
43 spufs_mem_open(struct inode
*inode
, struct file
*file
)
45 struct spufs_inode_info
*i
= SPUFS_I(inode
);
46 struct spu_context
*ctx
= i
->i_ctx
;
48 spin_lock(&ctx
->mapping_lock
);
49 file
->private_data
= ctx
;
51 ctx
->local_store
= inode
->i_mapping
;
52 spin_unlock(&ctx
->mapping_lock
);
58 spufs_mem_release(struct inode
*inode
, struct file
*file
)
60 struct spufs_inode_info
*i
= SPUFS_I(inode
);
61 struct spu_context
*ctx
= i
->i_ctx
;
63 spin_lock(&ctx
->mapping_lock
);
65 ctx
->local_store
= NULL
;
66 spin_unlock(&ctx
->mapping_lock
);
72 __spufs_mem_read(struct spu_context
*ctx
, char __user
*buffer
,
73 size_t size
, loff_t
*pos
)
75 char *local_store
= ctx
->ops
->get_ls(ctx
);
76 return simple_read_from_buffer(buffer
, size
, pos
, local_store
,
81 spufs_mem_read(struct file
*file
, char __user
*buffer
,
82 size_t size
, loff_t
*pos
)
84 struct spu_context
*ctx
= file
->private_data
;
88 ret
= __spufs_mem_read(ctx
, buffer
, size
, pos
);
94 spufs_mem_write(struct file
*file
, const char __user
*buffer
,
95 size_t size
, loff_t
*ppos
)
97 struct spu_context
*ctx
= file
->private_data
;
106 if (size
> LS_SIZE
- pos
)
107 size
= LS_SIZE
- pos
;
110 local_store
= ctx
->ops
->get_ls(ctx
);
111 ret
= copy_from_user(local_store
+ pos
, buffer
, size
);
120 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct
*vma
,
121 unsigned long address
)
123 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
124 unsigned long pfn
, offset
= address
- vma
->vm_start
;
126 offset
+= vma
->vm_pgoff
<< PAGE_SHIFT
;
128 if (offset
>= LS_SIZE
)
133 if (ctx
->state
== SPU_STATE_SAVED
) {
134 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
136 pfn
= vmalloc_to_pfn(ctx
->csa
.lscsa
->ls
+ offset
);
138 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
140 pfn
= (ctx
->spu
->local_store_phys
+ offset
) >> PAGE_SHIFT
;
142 vm_insert_pfn(vma
, address
, pfn
);
146 return NOPFN_REFAULT
;
150 static struct vm_operations_struct spufs_mem_mmap_vmops
= {
151 .nopfn
= spufs_mem_mmap_nopfn
,
155 spufs_mem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
157 if (!(vma
->vm_flags
& VM_SHARED
))
160 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
161 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
164 vma
->vm_ops
= &spufs_mem_mmap_vmops
;
168 static const struct file_operations spufs_mem_fops
= {
169 .open
= spufs_mem_open
,
170 .release
= spufs_mem_release
,
171 .read
= spufs_mem_read
,
172 .write
= spufs_mem_write
,
173 .llseek
= generic_file_llseek
,
174 .mmap
= spufs_mem_mmap
,
177 static unsigned long spufs_ps_nopfn(struct vm_area_struct
*vma
,
178 unsigned long address
,
179 unsigned long ps_offs
,
180 unsigned long ps_size
)
182 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
183 unsigned long area
, offset
= address
- vma
->vm_start
;
186 offset
+= vma
->vm_pgoff
<< PAGE_SHIFT
;
187 if (offset
>= ps_size
)
190 /* error here usually means a signal.. we might want to test
191 * the error code more precisely though
193 ret
= spu_acquire_runnable(ctx
, 0);
195 return NOPFN_REFAULT
;
197 area
= ctx
->spu
->problem_phys
+ ps_offs
;
198 vm_insert_pfn(vma
, address
, (area
+ offset
) >> PAGE_SHIFT
);
201 return NOPFN_REFAULT
;
205 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct
*vma
,
206 unsigned long address
)
208 return spufs_ps_nopfn(vma
, address
, 0x4000, 0x1000);
211 static struct vm_operations_struct spufs_cntl_mmap_vmops
= {
212 .nopfn
= spufs_cntl_mmap_nopfn
,
216 * mmap support for problem state control area [0x4000 - 0x4fff].
218 static int spufs_cntl_mmap(struct file
*file
, struct vm_area_struct
*vma
)
220 if (!(vma
->vm_flags
& VM_SHARED
))
223 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
224 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
225 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
227 vma
->vm_ops
= &spufs_cntl_mmap_vmops
;
230 #else /* SPUFS_MMAP_4K */
231 #define spufs_cntl_mmap NULL
232 #endif /* !SPUFS_MMAP_4K */
234 static u64
spufs_cntl_get(void *data
)
236 struct spu_context
*ctx
= data
;
240 val
= ctx
->ops
->status_read(ctx
);
246 static void spufs_cntl_set(void *data
, u64 val
)
248 struct spu_context
*ctx
= data
;
251 ctx
->ops
->runcntl_write(ctx
, val
);
255 static int spufs_cntl_open(struct inode
*inode
, struct file
*file
)
257 struct spufs_inode_info
*i
= SPUFS_I(inode
);
258 struct spu_context
*ctx
= i
->i_ctx
;
260 spin_lock(&ctx
->mapping_lock
);
261 file
->private_data
= ctx
;
263 ctx
->cntl
= inode
->i_mapping
;
264 spin_unlock(&ctx
->mapping_lock
);
266 return simple_attr_open(inode
, file
, spufs_cntl_get
,
267 spufs_cntl_set
, "0x%08lx");
271 spufs_cntl_release(struct inode
*inode
, struct file
*file
)
273 struct spufs_inode_info
*i
= SPUFS_I(inode
);
274 struct spu_context
*ctx
= i
->i_ctx
;
276 simple_attr_close(inode
, file
);
278 spin_lock(&ctx
->mapping_lock
);
281 spin_unlock(&ctx
->mapping_lock
);
286 static const struct file_operations spufs_cntl_fops
= {
287 .open
= spufs_cntl_open
,
288 .release
= spufs_cntl_release
,
289 .read
= simple_attr_read
,
290 .write
= simple_attr_write
,
291 .mmap
= spufs_cntl_mmap
,
295 spufs_regs_open(struct inode
*inode
, struct file
*file
)
297 struct spufs_inode_info
*i
= SPUFS_I(inode
);
298 file
->private_data
= i
->i_ctx
;
303 __spufs_regs_read(struct spu_context
*ctx
, char __user
*buffer
,
304 size_t size
, loff_t
*pos
)
306 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
307 return simple_read_from_buffer(buffer
, size
, pos
,
308 lscsa
->gprs
, sizeof lscsa
->gprs
);
312 spufs_regs_read(struct file
*file
, char __user
*buffer
,
313 size_t size
, loff_t
*pos
)
316 struct spu_context
*ctx
= file
->private_data
;
318 spu_acquire_saved(ctx
);
319 ret
= __spufs_regs_read(ctx
, buffer
, size
, pos
);
325 spufs_regs_write(struct file
*file
, const char __user
*buffer
,
326 size_t size
, loff_t
*pos
)
328 struct spu_context
*ctx
= file
->private_data
;
329 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
332 size
= min_t(ssize_t
, sizeof lscsa
->gprs
- *pos
, size
);
337 spu_acquire_saved(ctx
);
339 ret
= copy_from_user(lscsa
->gprs
+ *pos
- size
,
340 buffer
, size
) ? -EFAULT
: size
;
346 static const struct file_operations spufs_regs_fops
= {
347 .open
= spufs_regs_open
,
348 .read
= spufs_regs_read
,
349 .write
= spufs_regs_write
,
350 .llseek
= generic_file_llseek
,
354 __spufs_fpcr_read(struct spu_context
*ctx
, char __user
* buffer
,
355 size_t size
, loff_t
* pos
)
357 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
358 return simple_read_from_buffer(buffer
, size
, pos
,
359 &lscsa
->fpcr
, sizeof(lscsa
->fpcr
));
363 spufs_fpcr_read(struct file
*file
, char __user
* buffer
,
364 size_t size
, loff_t
* pos
)
367 struct spu_context
*ctx
= file
->private_data
;
369 spu_acquire_saved(ctx
);
370 ret
= __spufs_fpcr_read(ctx
, buffer
, size
, pos
);
376 spufs_fpcr_write(struct file
*file
, const char __user
* buffer
,
377 size_t size
, loff_t
* pos
)
379 struct spu_context
*ctx
= file
->private_data
;
380 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
383 size
= min_t(ssize_t
, sizeof(lscsa
->fpcr
) - *pos
, size
);
388 spu_acquire_saved(ctx
);
390 ret
= copy_from_user((char *)&lscsa
->fpcr
+ *pos
- size
,
391 buffer
, size
) ? -EFAULT
: size
;
397 static const struct file_operations spufs_fpcr_fops
= {
398 .open
= spufs_regs_open
,
399 .read
= spufs_fpcr_read
,
400 .write
= spufs_fpcr_write
,
401 .llseek
= generic_file_llseek
,
404 /* generic open function for all pipe-like files */
405 static int spufs_pipe_open(struct inode
*inode
, struct file
*file
)
407 struct spufs_inode_info
*i
= SPUFS_I(inode
);
408 file
->private_data
= i
->i_ctx
;
410 return nonseekable_open(inode
, file
);
414 * Read as many bytes from the mailbox as possible, until
415 * one of the conditions becomes true:
417 * - no more data available in the mailbox
418 * - end of the user provided buffer
419 * - end of the mapped area
421 static ssize_t
spufs_mbox_read(struct file
*file
, char __user
*buf
,
422 size_t len
, loff_t
*pos
)
424 struct spu_context
*ctx
= file
->private_data
;
425 u32 mbox_data
, __user
*udata
;
431 if (!access_ok(VERIFY_WRITE
, buf
, len
))
434 udata
= (void __user
*)buf
;
437 for (count
= 0; (count
+ 4) <= len
; count
+= 4, udata
++) {
439 ret
= ctx
->ops
->mbox_read(ctx
, &mbox_data
);
444 * at the end of the mapped area, we can fault
445 * but still need to return the data we have
446 * read successfully so far.
448 ret
= __put_user(mbox_data
, udata
);
463 static const struct file_operations spufs_mbox_fops
= {
464 .open
= spufs_pipe_open
,
465 .read
= spufs_mbox_read
,
468 static ssize_t
spufs_mbox_stat_read(struct file
*file
, char __user
*buf
,
469 size_t len
, loff_t
*pos
)
471 struct spu_context
*ctx
= file
->private_data
;
479 mbox_stat
= ctx
->ops
->mbox_stat_read(ctx
) & 0xff;
483 if (copy_to_user(buf
, &mbox_stat
, sizeof mbox_stat
))
489 static const struct file_operations spufs_mbox_stat_fops
= {
490 .open
= spufs_pipe_open
,
491 .read
= spufs_mbox_stat_read
,
494 /* low-level ibox access function */
495 size_t spu_ibox_read(struct spu_context
*ctx
, u32
*data
)
497 return ctx
->ops
->ibox_read(ctx
, data
);
500 static int spufs_ibox_fasync(int fd
, struct file
*file
, int on
)
502 struct spu_context
*ctx
= file
->private_data
;
504 return fasync_helper(fd
, file
, on
, &ctx
->ibox_fasync
);
507 /* interrupt-level ibox callback function. */
508 void spufs_ibox_callback(struct spu
*spu
)
510 struct spu_context
*ctx
= spu
->ctx
;
512 wake_up_all(&ctx
->ibox_wq
);
513 kill_fasync(&ctx
->ibox_fasync
, SIGIO
, POLLIN
);
517 * Read as many bytes from the interrupt mailbox as possible, until
518 * one of the conditions becomes true:
520 * - no more data available in the mailbox
521 * - end of the user provided buffer
522 * - end of the mapped area
524 * If the file is opened without O_NONBLOCK, we wait here until
525 * any data is available, but return when we have been able to
528 static ssize_t
spufs_ibox_read(struct file
*file
, char __user
*buf
,
529 size_t len
, loff_t
*pos
)
531 struct spu_context
*ctx
= file
->private_data
;
532 u32 ibox_data
, __user
*udata
;
538 if (!access_ok(VERIFY_WRITE
, buf
, len
))
541 udata
= (void __user
*)buf
;
545 /* wait only for the first element */
547 if (file
->f_flags
& O_NONBLOCK
) {
548 if (!spu_ibox_read(ctx
, &ibox_data
))
551 count
= spufs_wait(ctx
->ibox_wq
, spu_ibox_read(ctx
, &ibox_data
));
556 /* if we can't write at all, return -EFAULT */
557 count
= __put_user(ibox_data
, udata
);
561 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
563 ret
= ctx
->ops
->ibox_read(ctx
, &ibox_data
);
567 * at the end of the mapped area, we can fault
568 * but still need to return the data we have
569 * read successfully so far.
571 ret
= __put_user(ibox_data
, udata
);
582 static unsigned int spufs_ibox_poll(struct file
*file
, poll_table
*wait
)
584 struct spu_context
*ctx
= file
->private_data
;
587 poll_wait(file
, &ctx
->ibox_wq
, wait
);
590 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLIN
| POLLRDNORM
);
596 static const struct file_operations spufs_ibox_fops
= {
597 .open
= spufs_pipe_open
,
598 .read
= spufs_ibox_read
,
599 .poll
= spufs_ibox_poll
,
600 .fasync
= spufs_ibox_fasync
,
603 static ssize_t
spufs_ibox_stat_read(struct file
*file
, char __user
*buf
,
604 size_t len
, loff_t
*pos
)
606 struct spu_context
*ctx
= file
->private_data
;
613 ibox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 16) & 0xff;
616 if (copy_to_user(buf
, &ibox_stat
, sizeof ibox_stat
))
622 static const struct file_operations spufs_ibox_stat_fops
= {
623 .open
= spufs_pipe_open
,
624 .read
= spufs_ibox_stat_read
,
627 /* low-level mailbox write */
628 size_t spu_wbox_write(struct spu_context
*ctx
, u32 data
)
630 return ctx
->ops
->wbox_write(ctx
, data
);
633 static int spufs_wbox_fasync(int fd
, struct file
*file
, int on
)
635 struct spu_context
*ctx
= file
->private_data
;
638 ret
= fasync_helper(fd
, file
, on
, &ctx
->wbox_fasync
);
643 /* interrupt-level wbox callback function. */
644 void spufs_wbox_callback(struct spu
*spu
)
646 struct spu_context
*ctx
= spu
->ctx
;
648 wake_up_all(&ctx
->wbox_wq
);
649 kill_fasync(&ctx
->wbox_fasync
, SIGIO
, POLLOUT
);
653 * Write as many bytes to the interrupt mailbox as possible, until
654 * one of the conditions becomes true:
656 * - the mailbox is full
657 * - end of the user provided buffer
658 * - end of the mapped area
660 * If the file is opened without O_NONBLOCK, we wait here until
661 * space is availabyl, but return when we have been able to
664 static ssize_t
spufs_wbox_write(struct file
*file
, const char __user
*buf
,
665 size_t len
, loff_t
*pos
)
667 struct spu_context
*ctx
= file
->private_data
;
668 u32 wbox_data
, __user
*udata
;
674 udata
= (void __user
*)buf
;
675 if (!access_ok(VERIFY_READ
, buf
, len
))
678 if (__get_user(wbox_data
, udata
))
684 * make sure we can at least write one element, by waiting
685 * in case of !O_NONBLOCK
688 if (file
->f_flags
& O_NONBLOCK
) {
689 if (!spu_wbox_write(ctx
, wbox_data
))
692 count
= spufs_wait(ctx
->wbox_wq
, spu_wbox_write(ctx
, wbox_data
));
698 /* write aѕ much as possible */
699 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
701 ret
= __get_user(wbox_data
, udata
);
705 ret
= spu_wbox_write(ctx
, wbox_data
);
715 static unsigned int spufs_wbox_poll(struct file
*file
, poll_table
*wait
)
717 struct spu_context
*ctx
= file
->private_data
;
720 poll_wait(file
, &ctx
->wbox_wq
, wait
);
723 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLOUT
| POLLWRNORM
);
729 static const struct file_operations spufs_wbox_fops
= {
730 .open
= spufs_pipe_open
,
731 .write
= spufs_wbox_write
,
732 .poll
= spufs_wbox_poll
,
733 .fasync
= spufs_wbox_fasync
,
736 static ssize_t
spufs_wbox_stat_read(struct file
*file
, char __user
*buf
,
737 size_t len
, loff_t
*pos
)
739 struct spu_context
*ctx
= file
->private_data
;
746 wbox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 8) & 0xff;
749 if (copy_to_user(buf
, &wbox_stat
, sizeof wbox_stat
))
755 static const struct file_operations spufs_wbox_stat_fops
= {
756 .open
= spufs_pipe_open
,
757 .read
= spufs_wbox_stat_read
,
760 static int spufs_signal1_open(struct inode
*inode
, struct file
*file
)
762 struct spufs_inode_info
*i
= SPUFS_I(inode
);
763 struct spu_context
*ctx
= i
->i_ctx
;
765 spin_lock(&ctx
->mapping_lock
);
766 file
->private_data
= ctx
;
768 ctx
->signal1
= inode
->i_mapping
;
769 spin_unlock(&ctx
->mapping_lock
);
771 return nonseekable_open(inode
, file
);
775 spufs_signal1_release(struct inode
*inode
, struct file
*file
)
777 struct spufs_inode_info
*i
= SPUFS_I(inode
);
778 struct spu_context
*ctx
= i
->i_ctx
;
780 spin_lock(&ctx
->mapping_lock
);
783 spin_unlock(&ctx
->mapping_lock
);
788 static ssize_t
__spufs_signal1_read(struct spu_context
*ctx
, char __user
*buf
,
789 size_t len
, loff_t
*pos
)
797 if (ctx
->csa
.spu_chnlcnt_RW
[3]) {
798 data
= ctx
->csa
.spu_chnldata_RW
[3];
805 if (copy_to_user(buf
, &data
, 4))
812 static ssize_t
spufs_signal1_read(struct file
*file
, char __user
*buf
,
813 size_t len
, loff_t
*pos
)
816 struct spu_context
*ctx
= file
->private_data
;
818 spu_acquire_saved(ctx
);
819 ret
= __spufs_signal1_read(ctx
, buf
, len
, pos
);
825 static ssize_t
spufs_signal1_write(struct file
*file
, const char __user
*buf
,
826 size_t len
, loff_t
*pos
)
828 struct spu_context
*ctx
;
831 ctx
= file
->private_data
;
836 if (copy_from_user(&data
, buf
, 4))
840 ctx
->ops
->signal1_write(ctx
, data
);
846 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct
*vma
,
847 unsigned long address
)
849 #if PAGE_SIZE == 0x1000
850 return spufs_ps_nopfn(vma
, address
, 0x14000, 0x1000);
851 #elif PAGE_SIZE == 0x10000
852 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
853 * signal 1 and 2 area
855 return spufs_ps_nopfn(vma
, address
, 0x10000, 0x10000);
857 #error unsupported page size
861 static struct vm_operations_struct spufs_signal1_mmap_vmops
= {
862 .nopfn
= spufs_signal1_mmap_nopfn
,
865 static int spufs_signal1_mmap(struct file
*file
, struct vm_area_struct
*vma
)
867 if (!(vma
->vm_flags
& VM_SHARED
))
870 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
871 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
872 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
874 vma
->vm_ops
= &spufs_signal1_mmap_vmops
;
878 static const struct file_operations spufs_signal1_fops
= {
879 .open
= spufs_signal1_open
,
880 .release
= spufs_signal1_release
,
881 .read
= spufs_signal1_read
,
882 .write
= spufs_signal1_write
,
883 .mmap
= spufs_signal1_mmap
,
886 static int spufs_signal2_open(struct inode
*inode
, struct file
*file
)
888 struct spufs_inode_info
*i
= SPUFS_I(inode
);
889 struct spu_context
*ctx
= i
->i_ctx
;
891 spin_lock(&ctx
->mapping_lock
);
892 file
->private_data
= ctx
;
894 ctx
->signal2
= inode
->i_mapping
;
895 spin_unlock(&ctx
->mapping_lock
);
897 return nonseekable_open(inode
, file
);
901 spufs_signal2_release(struct inode
*inode
, struct file
*file
)
903 struct spufs_inode_info
*i
= SPUFS_I(inode
);
904 struct spu_context
*ctx
= i
->i_ctx
;
906 spin_lock(&ctx
->mapping_lock
);
909 spin_unlock(&ctx
->mapping_lock
);
914 static ssize_t
__spufs_signal2_read(struct spu_context
*ctx
, char __user
*buf
,
915 size_t len
, loff_t
*pos
)
923 if (ctx
->csa
.spu_chnlcnt_RW
[4]) {
924 data
= ctx
->csa
.spu_chnldata_RW
[4];
931 if (copy_to_user(buf
, &data
, 4))
938 static ssize_t
spufs_signal2_read(struct file
*file
, char __user
*buf
,
939 size_t len
, loff_t
*pos
)
941 struct spu_context
*ctx
= file
->private_data
;
944 spu_acquire_saved(ctx
);
945 ret
= __spufs_signal2_read(ctx
, buf
, len
, pos
);
951 static ssize_t
spufs_signal2_write(struct file
*file
, const char __user
*buf
,
952 size_t len
, loff_t
*pos
)
954 struct spu_context
*ctx
;
957 ctx
= file
->private_data
;
962 if (copy_from_user(&data
, buf
, 4))
966 ctx
->ops
->signal2_write(ctx
, data
);
973 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct
*vma
,
974 unsigned long address
)
976 #if PAGE_SIZE == 0x1000
977 return spufs_ps_nopfn(vma
, address
, 0x1c000, 0x1000);
978 #elif PAGE_SIZE == 0x10000
979 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
980 * signal 1 and 2 area
982 return spufs_ps_nopfn(vma
, address
, 0x10000, 0x10000);
984 #error unsupported page size
988 static struct vm_operations_struct spufs_signal2_mmap_vmops
= {
989 .nopfn
= spufs_signal2_mmap_nopfn
,
992 static int spufs_signal2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
994 if (!(vma
->vm_flags
& VM_SHARED
))
997 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
998 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
999 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1001 vma
->vm_ops
= &spufs_signal2_mmap_vmops
;
1004 #else /* SPUFS_MMAP_4K */
1005 #define spufs_signal2_mmap NULL
1006 #endif /* !SPUFS_MMAP_4K */
1008 static const struct file_operations spufs_signal2_fops
= {
1009 .open
= spufs_signal2_open
,
1010 .release
= spufs_signal2_release
,
1011 .read
= spufs_signal2_read
,
1012 .write
= spufs_signal2_write
,
1013 .mmap
= spufs_signal2_mmap
,
1016 static void spufs_signal1_type_set(void *data
, u64 val
)
1018 struct spu_context
*ctx
= data
;
1021 ctx
->ops
->signal1_type_set(ctx
, val
);
1025 static u64
__spufs_signal1_type_get(void *data
)
1027 struct spu_context
*ctx
= data
;
1028 return ctx
->ops
->signal1_type_get(ctx
);
1031 static u64
spufs_signal1_type_get(void *data
)
1033 struct spu_context
*ctx
= data
;
1037 ret
= __spufs_signal1_type_get(data
);
1042 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type
, spufs_signal1_type_get
,
1043 spufs_signal1_type_set
, "%llu");
1045 static void spufs_signal2_type_set(void *data
, u64 val
)
1047 struct spu_context
*ctx
= data
;
1050 ctx
->ops
->signal2_type_set(ctx
, val
);
1054 static u64
__spufs_signal2_type_get(void *data
)
1056 struct spu_context
*ctx
= data
;
1057 return ctx
->ops
->signal2_type_get(ctx
);
1060 static u64
spufs_signal2_type_get(void *data
)
1062 struct spu_context
*ctx
= data
;
1066 ret
= __spufs_signal2_type_get(data
);
1071 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type
, spufs_signal2_type_get
,
1072 spufs_signal2_type_set
, "%llu");
1075 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct
*vma
,
1076 unsigned long address
)
1078 return spufs_ps_nopfn(vma
, address
, 0x0000, 0x1000);
1081 static struct vm_operations_struct spufs_mss_mmap_vmops
= {
1082 .nopfn
= spufs_mss_mmap_nopfn
,
1086 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1088 static int spufs_mss_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1090 if (!(vma
->vm_flags
& VM_SHARED
))
1093 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1094 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1095 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1097 vma
->vm_ops
= &spufs_mss_mmap_vmops
;
1100 #else /* SPUFS_MMAP_4K */
1101 #define spufs_mss_mmap NULL
1102 #endif /* !SPUFS_MMAP_4K */
1104 static int spufs_mss_open(struct inode
*inode
, struct file
*file
)
1106 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1107 struct spu_context
*ctx
= i
->i_ctx
;
1109 file
->private_data
= i
->i_ctx
;
1111 spin_lock(&ctx
->mapping_lock
);
1112 if (!i
->i_openers
++)
1113 ctx
->mss
= inode
->i_mapping
;
1114 spin_unlock(&ctx
->mapping_lock
);
1116 return nonseekable_open(inode
, file
);
1120 spufs_mss_release(struct inode
*inode
, struct file
*file
)
1122 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1123 struct spu_context
*ctx
= i
->i_ctx
;
1125 spin_lock(&ctx
->mapping_lock
);
1126 if (!--i
->i_openers
)
1128 spin_unlock(&ctx
->mapping_lock
);
1133 static const struct file_operations spufs_mss_fops
= {
1134 .open
= spufs_mss_open
,
1135 .release
= spufs_mss_release
,
1136 .mmap
= spufs_mss_mmap
,
1139 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct
*vma
,
1140 unsigned long address
)
1142 return spufs_ps_nopfn(vma
, address
, 0x0000, 0x20000);
1145 static struct vm_operations_struct spufs_psmap_mmap_vmops
= {
1146 .nopfn
= spufs_psmap_mmap_nopfn
,
1150 * mmap support for full problem state area [0x00000 - 0x1ffff].
1152 static int spufs_psmap_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1154 if (!(vma
->vm_flags
& VM_SHARED
))
1157 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1158 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1159 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1161 vma
->vm_ops
= &spufs_psmap_mmap_vmops
;
1165 static int spufs_psmap_open(struct inode
*inode
, struct file
*file
)
1167 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1168 struct spu_context
*ctx
= i
->i_ctx
;
1170 spin_lock(&ctx
->mapping_lock
);
1171 file
->private_data
= i
->i_ctx
;
1172 if (!i
->i_openers
++)
1173 ctx
->psmap
= inode
->i_mapping
;
1174 spin_unlock(&ctx
->mapping_lock
);
1176 return nonseekable_open(inode
, file
);
1180 spufs_psmap_release(struct inode
*inode
, struct file
*file
)
1182 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1183 struct spu_context
*ctx
= i
->i_ctx
;
1185 spin_lock(&ctx
->mapping_lock
);
1186 if (!--i
->i_openers
)
1188 spin_unlock(&ctx
->mapping_lock
);
1193 static const struct file_operations spufs_psmap_fops
= {
1194 .open
= spufs_psmap_open
,
1195 .release
= spufs_psmap_release
,
1196 .mmap
= spufs_psmap_mmap
,
1201 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct
*vma
,
1202 unsigned long address
)
1204 return spufs_ps_nopfn(vma
, address
, 0x3000, 0x1000);
1207 static struct vm_operations_struct spufs_mfc_mmap_vmops
= {
1208 .nopfn
= spufs_mfc_mmap_nopfn
,
1212 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1214 static int spufs_mfc_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1216 if (!(vma
->vm_flags
& VM_SHARED
))
1219 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1220 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1221 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1223 vma
->vm_ops
= &spufs_mfc_mmap_vmops
;
1226 #else /* SPUFS_MMAP_4K */
1227 #define spufs_mfc_mmap NULL
1228 #endif /* !SPUFS_MMAP_4K */
1230 static int spufs_mfc_open(struct inode
*inode
, struct file
*file
)
1232 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1233 struct spu_context
*ctx
= i
->i_ctx
;
1235 /* we don't want to deal with DMA into other processes */
1236 if (ctx
->owner
!= current
->mm
)
1239 if (atomic_read(&inode
->i_count
) != 1)
1242 spin_lock(&ctx
->mapping_lock
);
1243 file
->private_data
= ctx
;
1244 if (!i
->i_openers
++)
1245 ctx
->mfc
= inode
->i_mapping
;
1246 spin_unlock(&ctx
->mapping_lock
);
1248 return nonseekable_open(inode
, file
);
1252 spufs_mfc_release(struct inode
*inode
, struct file
*file
)
1254 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1255 struct spu_context
*ctx
= i
->i_ctx
;
1257 spin_lock(&ctx
->mapping_lock
);
1258 if (!--i
->i_openers
)
1260 spin_unlock(&ctx
->mapping_lock
);
1265 /* interrupt-level mfc callback function. */
1266 void spufs_mfc_callback(struct spu
*spu
)
1268 struct spu_context
*ctx
= spu
->ctx
;
1270 wake_up_all(&ctx
->mfc_wq
);
1272 pr_debug("%s %s\n", __FUNCTION__
, spu
->name
);
1273 if (ctx
->mfc_fasync
) {
1274 u32 free_elements
, tagstatus
;
1277 /* no need for spu_acquire in interrupt context */
1278 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1279 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1282 if (free_elements
& 0xffff)
1284 if (tagstatus
& ctx
->tagwait
)
1287 kill_fasync(&ctx
->mfc_fasync
, SIGIO
, mask
);
1291 static int spufs_read_mfc_tagstatus(struct spu_context
*ctx
, u32
*status
)
1293 /* See if there is one tag group is complete */
1294 /* FIXME we need locking around tagwait */
1295 *status
= ctx
->ops
->read_mfc_tagstatus(ctx
) & ctx
->tagwait
;
1296 ctx
->tagwait
&= ~*status
;
1300 /* enable interrupt waiting for any tag group,
1301 may silently fail if interrupts are already enabled */
1302 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1306 static ssize_t
spufs_mfc_read(struct file
*file
, char __user
*buffer
,
1307 size_t size
, loff_t
*pos
)
1309 struct spu_context
*ctx
= file
->private_data
;
1317 if (file
->f_flags
& O_NONBLOCK
) {
1318 status
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1319 if (!(status
& ctx
->tagwait
))
1322 ctx
->tagwait
&= ~status
;
1324 ret
= spufs_wait(ctx
->mfc_wq
,
1325 spufs_read_mfc_tagstatus(ctx
, &status
));
1333 if (copy_to_user(buffer
, &status
, 4))
1340 static int spufs_check_valid_dma(struct mfc_dma_command
*cmd
)
1342 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd
->lsa
,
1343 cmd
->ea
, cmd
->size
, cmd
->tag
, cmd
->cmd
);
1354 pr_debug("invalid DMA opcode %x\n", cmd
->cmd
);
1358 if ((cmd
->lsa
& 0xf) != (cmd
->ea
&0xf)) {
1359 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1364 switch (cmd
->size
& 0xf) {
1385 pr_debug("invalid DMA alignment %x for size %x\n",
1386 cmd
->lsa
& 0xf, cmd
->size
);
1390 if (cmd
->size
> 16 * 1024) {
1391 pr_debug("invalid DMA size %x\n", cmd
->size
);
1395 if (cmd
->tag
& 0xfff0) {
1396 /* we reserve the higher tag numbers for kernel use */
1397 pr_debug("invalid DMA tag\n");
1402 /* not supported in this version */
1403 pr_debug("invalid DMA class\n");
1410 static int spu_send_mfc_command(struct spu_context
*ctx
,
1411 struct mfc_dma_command cmd
,
1414 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1415 if (*error
== -EAGAIN
) {
1416 /* wait for any tag group to complete
1417 so we have space for the new command */
1418 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1419 /* try again, because the queue might be
1421 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1422 if (*error
== -EAGAIN
)
1428 static ssize_t
spufs_mfc_write(struct file
*file
, const char __user
*buffer
,
1429 size_t size
, loff_t
*pos
)
1431 struct spu_context
*ctx
= file
->private_data
;
1432 struct mfc_dma_command cmd
;
1435 if (size
!= sizeof cmd
)
1439 if (copy_from_user(&cmd
, buffer
, sizeof cmd
))
1442 ret
= spufs_check_valid_dma(&cmd
);
1446 spu_acquire_runnable(ctx
, 0);
1447 if (file
->f_flags
& O_NONBLOCK
) {
1448 ret
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1451 ret
= spufs_wait(ctx
->mfc_wq
,
1452 spu_send_mfc_command(ctx
, cmd
, &status
));
1461 ctx
->tagwait
|= 1 << cmd
.tag
;
1468 static unsigned int spufs_mfc_poll(struct file
*file
,poll_table
*wait
)
1470 struct spu_context
*ctx
= file
->private_data
;
1471 u32 free_elements
, tagstatus
;
1475 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2);
1476 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1477 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1480 poll_wait(file
, &ctx
->mfc_wq
, wait
);
1483 if (free_elements
& 0xffff)
1484 mask
|= POLLOUT
| POLLWRNORM
;
1485 if (tagstatus
& ctx
->tagwait
)
1486 mask
|= POLLIN
| POLLRDNORM
;
1488 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__
,
1489 free_elements
, tagstatus
, ctx
->tagwait
);
1494 static int spufs_mfc_flush(struct file
*file
, fl_owner_t id
)
1496 struct spu_context
*ctx
= file
->private_data
;
1501 /* this currently hangs */
1502 ret
= spufs_wait(ctx
->mfc_wq
,
1503 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2));
1506 ret
= spufs_wait(ctx
->mfc_wq
,
1507 ctx
->ops
->read_mfc_tagstatus(ctx
) == ctx
->tagwait
);
1517 static int spufs_mfc_fsync(struct file
*file
, struct dentry
*dentry
,
1520 return spufs_mfc_flush(file
, NULL
);
1523 static int spufs_mfc_fasync(int fd
, struct file
*file
, int on
)
1525 struct spu_context
*ctx
= file
->private_data
;
1527 return fasync_helper(fd
, file
, on
, &ctx
->mfc_fasync
);
1530 static const struct file_operations spufs_mfc_fops
= {
1531 .open
= spufs_mfc_open
,
1532 .release
= spufs_mfc_release
,
1533 .read
= spufs_mfc_read
,
1534 .write
= spufs_mfc_write
,
1535 .poll
= spufs_mfc_poll
,
1536 .flush
= spufs_mfc_flush
,
1537 .fsync
= spufs_mfc_fsync
,
1538 .fasync
= spufs_mfc_fasync
,
1539 .mmap
= spufs_mfc_mmap
,
1542 static void spufs_npc_set(void *data
, u64 val
)
1544 struct spu_context
*ctx
= data
;
1546 ctx
->ops
->npc_write(ctx
, val
);
1550 static u64
spufs_npc_get(void *data
)
1552 struct spu_context
*ctx
= data
;
1555 ret
= ctx
->ops
->npc_read(ctx
);
1559 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops
, spufs_npc_get
, spufs_npc_set
,
1562 static void spufs_decr_set(void *data
, u64 val
)
1564 struct spu_context
*ctx
= data
;
1565 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1566 spu_acquire_saved(ctx
);
1567 lscsa
->decr
.slot
[0] = (u32
) val
;
1571 static u64
__spufs_decr_get(void *data
)
1573 struct spu_context
*ctx
= data
;
1574 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1575 return lscsa
->decr
.slot
[0];
1578 static u64
spufs_decr_get(void *data
)
1580 struct spu_context
*ctx
= data
;
1582 spu_acquire_saved(ctx
);
1583 ret
= __spufs_decr_get(data
);
1587 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops
, spufs_decr_get
, spufs_decr_set
,
1590 static void spufs_decr_status_set(void *data
, u64 val
)
1592 struct spu_context
*ctx
= data
;
1593 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1594 spu_acquire_saved(ctx
);
1595 lscsa
->decr_status
.slot
[0] = (u32
) val
;
1599 static u64
__spufs_decr_status_get(void *data
)
1601 struct spu_context
*ctx
= data
;
1602 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1603 return lscsa
->decr_status
.slot
[0];
1606 static u64
spufs_decr_status_get(void *data
)
1608 struct spu_context
*ctx
= data
;
1610 spu_acquire_saved(ctx
);
1611 ret
= __spufs_decr_status_get(data
);
1615 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops
, spufs_decr_status_get
,
1616 spufs_decr_status_set
, "0x%llx\n")
1618 static void spufs_event_mask_set(void *data
, u64 val
)
1620 struct spu_context
*ctx
= data
;
1621 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1622 spu_acquire_saved(ctx
);
1623 lscsa
->event_mask
.slot
[0] = (u32
) val
;
1627 static u64
__spufs_event_mask_get(void *data
)
1629 struct spu_context
*ctx
= data
;
1630 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1631 return lscsa
->event_mask
.slot
[0];
1634 static u64
spufs_event_mask_get(void *data
)
1636 struct spu_context
*ctx
= data
;
1638 spu_acquire_saved(ctx
);
1639 ret
= __spufs_event_mask_get(data
);
1643 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops
, spufs_event_mask_get
,
1644 spufs_event_mask_set
, "0x%llx\n")
1646 static u64
__spufs_event_status_get(void *data
)
1648 struct spu_context
*ctx
= data
;
1649 struct spu_state
*state
= &ctx
->csa
;
1651 stat
= state
->spu_chnlcnt_RW
[0];
1653 return state
->spu_chnldata_RW
[0];
1657 static u64
spufs_event_status_get(void *data
)
1659 struct spu_context
*ctx
= data
;
1662 spu_acquire_saved(ctx
);
1663 ret
= __spufs_event_status_get(data
);
1667 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops
, spufs_event_status_get
,
1670 static void spufs_srr0_set(void *data
, u64 val
)
1672 struct spu_context
*ctx
= data
;
1673 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1674 spu_acquire_saved(ctx
);
1675 lscsa
->srr0
.slot
[0] = (u32
) val
;
1679 static u64
spufs_srr0_get(void *data
)
1681 struct spu_context
*ctx
= data
;
1682 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1684 spu_acquire_saved(ctx
);
1685 ret
= lscsa
->srr0
.slot
[0];
1689 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops
, spufs_srr0_get
, spufs_srr0_set
,
1692 static u64
spufs_id_get(void *data
)
1694 struct spu_context
*ctx
= data
;
1698 if (ctx
->state
== SPU_STATE_RUNNABLE
)
1699 num
= ctx
->spu
->number
;
1701 num
= (unsigned int)-1;
1706 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops
, spufs_id_get
, NULL
, "0x%llx\n")
1708 static u64
__spufs_object_id_get(void *data
)
1710 struct spu_context
*ctx
= data
;
1711 return ctx
->object_id
;
1714 static u64
spufs_object_id_get(void *data
)
1716 /* FIXME: Should there really be no locking here? */
1717 return __spufs_object_id_get(data
);
1720 static void spufs_object_id_set(void *data
, u64 id
)
1722 struct spu_context
*ctx
= data
;
1723 ctx
->object_id
= id
;
1726 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops
, spufs_object_id_get
,
1727 spufs_object_id_set
, "0x%llx\n");
1729 static u64
__spufs_lslr_get(void *data
)
1731 struct spu_context
*ctx
= data
;
1732 return ctx
->csa
.priv2
.spu_lslr_RW
;
1735 static u64
spufs_lslr_get(void *data
)
1737 struct spu_context
*ctx
= data
;
1740 spu_acquire_saved(ctx
);
1741 ret
= __spufs_lslr_get(data
);
1746 DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops
, spufs_lslr_get
, NULL
, "0x%llx\n")
1748 static int spufs_info_open(struct inode
*inode
, struct file
*file
)
1750 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1751 struct spu_context
*ctx
= i
->i_ctx
;
1752 file
->private_data
= ctx
;
1756 static ssize_t
__spufs_mbox_info_read(struct spu_context
*ctx
,
1757 char __user
*buf
, size_t len
, loff_t
*pos
)
1762 mbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
1763 if (mbox_stat
& 0x0000ff) {
1764 data
= ctx
->csa
.prob
.pu_mb_R
;
1767 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
1770 static ssize_t
spufs_mbox_info_read(struct file
*file
, char __user
*buf
,
1771 size_t len
, loff_t
*pos
)
1774 struct spu_context
*ctx
= file
->private_data
;
1776 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1779 spu_acquire_saved(ctx
);
1780 spin_lock(&ctx
->csa
.register_lock
);
1781 ret
= __spufs_mbox_info_read(ctx
, buf
, len
, pos
);
1782 spin_unlock(&ctx
->csa
.register_lock
);
1788 static const struct file_operations spufs_mbox_info_fops
= {
1789 .open
= spufs_info_open
,
1790 .read
= spufs_mbox_info_read
,
1791 .llseek
= generic_file_llseek
,
1794 static ssize_t
__spufs_ibox_info_read(struct spu_context
*ctx
,
1795 char __user
*buf
, size_t len
, loff_t
*pos
)
1800 ibox_stat
= ctx
->csa
.prob
.mb_stat_R
;
1801 if (ibox_stat
& 0xff0000) {
1802 data
= ctx
->csa
.priv2
.puint_mb_R
;
1805 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
1808 static ssize_t
spufs_ibox_info_read(struct file
*file
, char __user
*buf
,
1809 size_t len
, loff_t
*pos
)
1811 struct spu_context
*ctx
= file
->private_data
;
1814 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1817 spu_acquire_saved(ctx
);
1818 spin_lock(&ctx
->csa
.register_lock
);
1819 ret
= __spufs_ibox_info_read(ctx
, buf
, len
, pos
);
1820 spin_unlock(&ctx
->csa
.register_lock
);
1826 static const struct file_operations spufs_ibox_info_fops
= {
1827 .open
= spufs_info_open
,
1828 .read
= spufs_ibox_info_read
,
1829 .llseek
= generic_file_llseek
,
1832 static ssize_t
__spufs_wbox_info_read(struct spu_context
*ctx
,
1833 char __user
*buf
, size_t len
, loff_t
*pos
)
1839 wbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
1840 cnt
= 4 - ((wbox_stat
& 0x00ff00) >> 8);
1841 for (i
= 0; i
< cnt
; i
++) {
1842 data
[i
] = ctx
->csa
.spu_mailbox_data
[i
];
1845 return simple_read_from_buffer(buf
, len
, pos
, &data
,
1849 static ssize_t
spufs_wbox_info_read(struct file
*file
, char __user
*buf
,
1850 size_t len
, loff_t
*pos
)
1852 struct spu_context
*ctx
= file
->private_data
;
1855 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1858 spu_acquire_saved(ctx
);
1859 spin_lock(&ctx
->csa
.register_lock
);
1860 ret
= __spufs_wbox_info_read(ctx
, buf
, len
, pos
);
1861 spin_unlock(&ctx
->csa
.register_lock
);
1867 static const struct file_operations spufs_wbox_info_fops
= {
1868 .open
= spufs_info_open
,
1869 .read
= spufs_wbox_info_read
,
1870 .llseek
= generic_file_llseek
,
1873 static ssize_t
__spufs_dma_info_read(struct spu_context
*ctx
,
1874 char __user
*buf
, size_t len
, loff_t
*pos
)
1876 struct spu_dma_info info
;
1877 struct mfc_cq_sr
*qp
, *spuqp
;
1880 info
.dma_info_type
= ctx
->csa
.priv2
.spu_tag_status_query_RW
;
1881 info
.dma_info_mask
= ctx
->csa
.lscsa
->tag_mask
.slot
[0];
1882 info
.dma_info_status
= ctx
->csa
.spu_chnldata_RW
[24];
1883 info
.dma_info_stall_and_notify
= ctx
->csa
.spu_chnldata_RW
[25];
1884 info
.dma_info_atomic_command_status
= ctx
->csa
.spu_chnldata_RW
[27];
1885 for (i
= 0; i
< 16; i
++) {
1886 qp
= &info
.dma_info_command_data
[i
];
1887 spuqp
= &ctx
->csa
.priv2
.spuq
[i
];
1889 qp
->mfc_cq_data0_RW
= spuqp
->mfc_cq_data0_RW
;
1890 qp
->mfc_cq_data1_RW
= spuqp
->mfc_cq_data1_RW
;
1891 qp
->mfc_cq_data2_RW
= spuqp
->mfc_cq_data2_RW
;
1892 qp
->mfc_cq_data3_RW
= spuqp
->mfc_cq_data3_RW
;
1895 return simple_read_from_buffer(buf
, len
, pos
, &info
,
1899 static ssize_t
spufs_dma_info_read(struct file
*file
, char __user
*buf
,
1900 size_t len
, loff_t
*pos
)
1902 struct spu_context
*ctx
= file
->private_data
;
1905 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1908 spu_acquire_saved(ctx
);
1909 spin_lock(&ctx
->csa
.register_lock
);
1910 ret
= __spufs_dma_info_read(ctx
, buf
, len
, pos
);
1911 spin_unlock(&ctx
->csa
.register_lock
);
1917 static const struct file_operations spufs_dma_info_fops
= {
1918 .open
= spufs_info_open
,
1919 .read
= spufs_dma_info_read
,
1922 static ssize_t
__spufs_proxydma_info_read(struct spu_context
*ctx
,
1923 char __user
*buf
, size_t len
, loff_t
*pos
)
1925 struct spu_proxydma_info info
;
1926 struct mfc_cq_sr
*qp
, *puqp
;
1927 int ret
= sizeof info
;
1933 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1936 info
.proxydma_info_type
= ctx
->csa
.prob
.dma_querytype_RW
;
1937 info
.proxydma_info_mask
= ctx
->csa
.prob
.dma_querymask_RW
;
1938 info
.proxydma_info_status
= ctx
->csa
.prob
.dma_tagstatus_R
;
1939 for (i
= 0; i
< 8; i
++) {
1940 qp
= &info
.proxydma_info_command_data
[i
];
1941 puqp
= &ctx
->csa
.priv2
.puq
[i
];
1943 qp
->mfc_cq_data0_RW
= puqp
->mfc_cq_data0_RW
;
1944 qp
->mfc_cq_data1_RW
= puqp
->mfc_cq_data1_RW
;
1945 qp
->mfc_cq_data2_RW
= puqp
->mfc_cq_data2_RW
;
1946 qp
->mfc_cq_data3_RW
= puqp
->mfc_cq_data3_RW
;
1949 return simple_read_from_buffer(buf
, len
, pos
, &info
,
1953 static ssize_t
spufs_proxydma_info_read(struct file
*file
, char __user
*buf
,
1954 size_t len
, loff_t
*pos
)
1956 struct spu_context
*ctx
= file
->private_data
;
1959 spu_acquire_saved(ctx
);
1960 spin_lock(&ctx
->csa
.register_lock
);
1961 ret
= __spufs_proxydma_info_read(ctx
, buf
, len
, pos
);
1962 spin_unlock(&ctx
->csa
.register_lock
);
1968 static const struct file_operations spufs_proxydma_info_fops
= {
1969 .open
= spufs_info_open
,
1970 .read
= spufs_proxydma_info_read
,
1973 struct tree_descr spufs_dir_contents
[] = {
1974 { "mem", &spufs_mem_fops
, 0666, },
1975 { "regs", &spufs_regs_fops
, 0666, },
1976 { "mbox", &spufs_mbox_fops
, 0444, },
1977 { "ibox", &spufs_ibox_fops
, 0444, },
1978 { "wbox", &spufs_wbox_fops
, 0222, },
1979 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, },
1980 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, },
1981 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, },
1982 { "signal1", &spufs_signal1_fops
, 0666, },
1983 { "signal2", &spufs_signal2_fops
, 0666, },
1984 { "signal1_type", &spufs_signal1_type
, 0666, },
1985 { "signal2_type", &spufs_signal2_type
, 0666, },
1986 { "cntl", &spufs_cntl_fops
, 0666, },
1987 { "fpcr", &spufs_fpcr_fops
, 0666, },
1988 { "lslr", &spufs_lslr_ops
, 0444, },
1989 { "mfc", &spufs_mfc_fops
, 0666, },
1990 { "mss", &spufs_mss_fops
, 0666, },
1991 { "npc", &spufs_npc_ops
, 0666, },
1992 { "srr0", &spufs_srr0_ops
, 0666, },
1993 { "decr", &spufs_decr_ops
, 0666, },
1994 { "decr_status", &spufs_decr_status_ops
, 0666, },
1995 { "event_mask", &spufs_event_mask_ops
, 0666, },
1996 { "event_status", &spufs_event_status_ops
, 0444, },
1997 { "psmap", &spufs_psmap_fops
, 0666, },
1998 { "phys-id", &spufs_id_ops
, 0666, },
1999 { "object-id", &spufs_object_id_ops
, 0666, },
2000 { "mbox_info", &spufs_mbox_info_fops
, 0444, },
2001 { "ibox_info", &spufs_ibox_info_fops
, 0444, },
2002 { "wbox_info", &spufs_wbox_info_fops
, 0444, },
2003 { "dma_info", &spufs_dma_info_fops
, 0444, },
2004 { "proxydma_info", &spufs_proxydma_info_fops
, 0444, },
2008 struct tree_descr spufs_dir_nosched_contents
[] = {
2009 { "mem", &spufs_mem_fops
, 0666, },
2010 { "mbox", &spufs_mbox_fops
, 0444, },
2011 { "ibox", &spufs_ibox_fops
, 0444, },
2012 { "wbox", &spufs_wbox_fops
, 0222, },
2013 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, },
2014 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, },
2015 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, },
2016 { "signal1", &spufs_signal1_fops
, 0666, },
2017 { "signal2", &spufs_signal2_fops
, 0666, },
2018 { "signal1_type", &spufs_signal1_type
, 0666, },
2019 { "signal2_type", &spufs_signal2_type
, 0666, },
2020 { "mss", &spufs_mss_fops
, 0666, },
2021 { "mfc", &spufs_mfc_fops
, 0666, },
2022 { "cntl", &spufs_cntl_fops
, 0666, },
2023 { "npc", &spufs_npc_ops
, 0666, },
2024 { "psmap", &spufs_psmap_fops
, 0666, },
2025 { "phys-id", &spufs_id_ops
, 0666, },
2026 { "object-id", &spufs_object_id_ops
, 0666, },
2030 struct spufs_coredump_reader spufs_coredump_read
[] = {
2031 { "regs", __spufs_regs_read
, NULL
, 128 * 16 },
2032 { "fpcr", __spufs_fpcr_read
, NULL
, 16 },
2033 { "lslr", NULL
, __spufs_lslr_get
, 11 },
2034 { "decr", NULL
, __spufs_decr_get
, 11 },
2035 { "decr_status", NULL
, __spufs_decr_status_get
, 11 },
2036 { "mem", __spufs_mem_read
, NULL
, 256 * 1024, },
2037 { "signal1", __spufs_signal1_read
, NULL
, 4 },
2038 { "signal1_type", NULL
, __spufs_signal1_type_get
, 2 },
2039 { "signal2", __spufs_signal2_read
, NULL
, 4 },
2040 { "signal2_type", NULL
, __spufs_signal2_type_get
, 2 },
2041 { "event_mask", NULL
, __spufs_event_mask_get
, 8 },
2042 { "event_status", NULL
, __spufs_event_status_get
, 8 },
2043 { "mbox_info", __spufs_mbox_info_read
, NULL
, 4 },
2044 { "ibox_info", __spufs_ibox_info_read
, NULL
, 4 },
2045 { "wbox_info", __spufs_wbox_info_read
, NULL
, 16 },
2046 { "dma_info", __spufs_dma_info_read
, NULL
, 69 * 8 },
2047 { "proxydma_info", __spufs_proxydma_info_read
, NULL
, 35 * 8 },
2048 { "object-id", NULL
, __spufs_object_id_get
, 19 },
2051 int spufs_coredump_num_notes
= ARRAY_SIZE(spufs_coredump_read
) - 1;