2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
34 #include <asm/semaphore.h>
36 #include <asm/spu_info.h>
37 #include <asm/uaccess.h>
41 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
45 spufs_mem_open(struct inode
*inode
, struct file
*file
)
47 struct spufs_inode_info
*i
= SPUFS_I(inode
);
48 struct spu_context
*ctx
= i
->i_ctx
;
50 mutex_lock(&ctx
->mapping_lock
);
51 file
->private_data
= ctx
;
53 ctx
->local_store
= inode
->i_mapping
;
54 mutex_unlock(&ctx
->mapping_lock
);
59 spufs_mem_release(struct inode
*inode
, struct file
*file
)
61 struct spufs_inode_info
*i
= SPUFS_I(inode
);
62 struct spu_context
*ctx
= i
->i_ctx
;
64 mutex_lock(&ctx
->mapping_lock
);
66 ctx
->local_store
= NULL
;
67 mutex_unlock(&ctx
->mapping_lock
);
72 __spufs_mem_read(struct spu_context
*ctx
, char __user
*buffer
,
73 size_t size
, loff_t
*pos
)
75 char *local_store
= ctx
->ops
->get_ls(ctx
);
76 return simple_read_from_buffer(buffer
, size
, pos
, local_store
,
81 spufs_mem_read(struct file
*file
, char __user
*buffer
,
82 size_t size
, loff_t
*pos
)
84 struct spu_context
*ctx
= file
->private_data
;
88 ret
= __spufs_mem_read(ctx
, buffer
, size
, pos
);
94 spufs_mem_write(struct file
*file
, const char __user
*buffer
,
95 size_t size
, loff_t
*ppos
)
97 struct spu_context
*ctx
= file
->private_data
;
106 if (size
> LS_SIZE
- pos
)
107 size
= LS_SIZE
- pos
;
110 local_store
= ctx
->ops
->get_ls(ctx
);
111 ret
= copy_from_user(local_store
+ pos
, buffer
, size
);
120 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct
*vma
,
121 unsigned long address
)
123 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
124 unsigned long pfn
, offset
, addr0
= address
;
125 #ifdef CONFIG_SPU_FS_64K_LS
126 struct spu_state
*csa
= &ctx
->csa
;
129 /* Check what page size we are using */
130 psize
= get_slice_psize(vma
->vm_mm
, address
);
132 /* Some sanity checking */
133 BUG_ON(csa
->use_big_pages
!= (psize
== MMU_PAGE_64K
));
135 /* Wow, 64K, cool, we need to align the address though */
136 if (csa
->use_big_pages
) {
137 BUG_ON(vma
->vm_start
& 0xffff);
138 address
&= ~0xfffful
;
140 #endif /* CONFIG_SPU_FS_64K_LS */
142 offset
= (address
- vma
->vm_start
) + (vma
->vm_pgoff
<< PAGE_SHIFT
);
143 if (offset
>= LS_SIZE
)
146 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
147 addr0
, address
, offset
);
151 if (ctx
->state
== SPU_STATE_SAVED
) {
152 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
154 pfn
= vmalloc_to_pfn(ctx
->csa
.lscsa
->ls
+ offset
);
156 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
158 pfn
= (ctx
->spu
->local_store_phys
+ offset
) >> PAGE_SHIFT
;
160 vm_insert_pfn(vma
, address
, pfn
);
164 return NOPFN_REFAULT
;
168 static struct vm_operations_struct spufs_mem_mmap_vmops
= {
169 .nopfn
= spufs_mem_mmap_nopfn
,
172 static int spufs_mem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
174 #ifdef CONFIG_SPU_FS_64K_LS
175 struct spu_context
*ctx
= file
->private_data
;
176 struct spu_state
*csa
= &ctx
->csa
;
178 /* Sanity check VMA alignment */
179 if (csa
->use_big_pages
) {
180 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
181 " pgoff=0x%lx\n", vma
->vm_start
, vma
->vm_end
,
183 if (vma
->vm_start
& 0xffff)
185 if (vma
->vm_pgoff
& 0xf)
188 #endif /* CONFIG_SPU_FS_64K_LS */
190 if (!(vma
->vm_flags
& VM_SHARED
))
193 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
194 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
197 vma
->vm_ops
= &spufs_mem_mmap_vmops
;
201 #ifdef CONFIG_SPU_FS_64K_LS
202 static unsigned long spufs_get_unmapped_area(struct file
*file
,
203 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
206 struct spu_context
*ctx
= file
->private_data
;
207 struct spu_state
*csa
= &ctx
->csa
;
209 /* If not using big pages, fallback to normal MM g_u_a */
210 if (!csa
->use_big_pages
)
211 return current
->mm
->get_unmapped_area(file
, addr
, len
,
214 /* Else, try to obtain a 64K pages slice */
215 return slice_get_unmapped_area(addr
, len
, flags
,
218 #endif /* CONFIG_SPU_FS_64K_LS */
220 static const struct file_operations spufs_mem_fops
= {
221 .open
= spufs_mem_open
,
222 .release
= spufs_mem_release
,
223 .read
= spufs_mem_read
,
224 .write
= spufs_mem_write
,
225 .llseek
= generic_file_llseek
,
226 .mmap
= spufs_mem_mmap
,
227 #ifdef CONFIG_SPU_FS_64K_LS
228 .get_unmapped_area
= spufs_get_unmapped_area
,
232 static unsigned long spufs_ps_nopfn(struct vm_area_struct
*vma
,
233 unsigned long address
,
234 unsigned long ps_offs
,
235 unsigned long ps_size
)
237 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
238 unsigned long area
, offset
= address
- vma
->vm_start
;
241 offset
+= vma
->vm_pgoff
<< PAGE_SHIFT
;
242 if (offset
>= ps_size
)
245 /* error here usually means a signal.. we might want to test
246 * the error code more precisely though
248 ret
= spu_acquire_runnable(ctx
, 0);
250 return NOPFN_REFAULT
;
252 area
= ctx
->spu
->problem_phys
+ ps_offs
;
253 vm_insert_pfn(vma
, address
, (area
+ offset
) >> PAGE_SHIFT
);
256 return NOPFN_REFAULT
;
260 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct
*vma
,
261 unsigned long address
)
263 return spufs_ps_nopfn(vma
, address
, 0x4000, 0x1000);
266 static struct vm_operations_struct spufs_cntl_mmap_vmops
= {
267 .nopfn
= spufs_cntl_mmap_nopfn
,
271 * mmap support for problem state control area [0x4000 - 0x4fff].
273 static int spufs_cntl_mmap(struct file
*file
, struct vm_area_struct
*vma
)
275 if (!(vma
->vm_flags
& VM_SHARED
))
278 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
279 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
280 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
282 vma
->vm_ops
= &spufs_cntl_mmap_vmops
;
285 #else /* SPUFS_MMAP_4K */
286 #define spufs_cntl_mmap NULL
287 #endif /* !SPUFS_MMAP_4K */
289 static u64
spufs_cntl_get(void *data
)
291 struct spu_context
*ctx
= data
;
295 val
= ctx
->ops
->status_read(ctx
);
301 static void spufs_cntl_set(void *data
, u64 val
)
303 struct spu_context
*ctx
= data
;
306 ctx
->ops
->runcntl_write(ctx
, val
);
310 static int spufs_cntl_open(struct inode
*inode
, struct file
*file
)
312 struct spufs_inode_info
*i
= SPUFS_I(inode
);
313 struct spu_context
*ctx
= i
->i_ctx
;
315 mutex_lock(&ctx
->mapping_lock
);
316 file
->private_data
= ctx
;
318 ctx
->cntl
= inode
->i_mapping
;
319 mutex_unlock(&ctx
->mapping_lock
);
320 return simple_attr_open(inode
, file
, spufs_cntl_get
,
321 spufs_cntl_set
, "0x%08lx");
325 spufs_cntl_release(struct inode
*inode
, struct file
*file
)
327 struct spufs_inode_info
*i
= SPUFS_I(inode
);
328 struct spu_context
*ctx
= i
->i_ctx
;
330 simple_attr_close(inode
, file
);
332 mutex_lock(&ctx
->mapping_lock
);
335 mutex_unlock(&ctx
->mapping_lock
);
339 static const struct file_operations spufs_cntl_fops
= {
340 .open
= spufs_cntl_open
,
341 .release
= spufs_cntl_release
,
342 .read
= simple_attr_read
,
343 .write
= simple_attr_write
,
344 .mmap
= spufs_cntl_mmap
,
348 spufs_regs_open(struct inode
*inode
, struct file
*file
)
350 struct spufs_inode_info
*i
= SPUFS_I(inode
);
351 file
->private_data
= i
->i_ctx
;
356 __spufs_regs_read(struct spu_context
*ctx
, char __user
*buffer
,
357 size_t size
, loff_t
*pos
)
359 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
360 return simple_read_from_buffer(buffer
, size
, pos
,
361 lscsa
->gprs
, sizeof lscsa
->gprs
);
365 spufs_regs_read(struct file
*file
, char __user
*buffer
,
366 size_t size
, loff_t
*pos
)
369 struct spu_context
*ctx
= file
->private_data
;
371 spu_acquire_saved(ctx
);
372 ret
= __spufs_regs_read(ctx
, buffer
, size
, pos
);
373 spu_release_saved(ctx
);
378 spufs_regs_write(struct file
*file
, const char __user
*buffer
,
379 size_t size
, loff_t
*pos
)
381 struct spu_context
*ctx
= file
->private_data
;
382 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
385 size
= min_t(ssize_t
, sizeof lscsa
->gprs
- *pos
, size
);
390 spu_acquire_saved(ctx
);
392 ret
= copy_from_user(lscsa
->gprs
+ *pos
- size
,
393 buffer
, size
) ? -EFAULT
: size
;
395 spu_release_saved(ctx
);
399 static const struct file_operations spufs_regs_fops
= {
400 .open
= spufs_regs_open
,
401 .read
= spufs_regs_read
,
402 .write
= spufs_regs_write
,
403 .llseek
= generic_file_llseek
,
407 __spufs_fpcr_read(struct spu_context
*ctx
, char __user
* buffer
,
408 size_t size
, loff_t
* pos
)
410 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
411 return simple_read_from_buffer(buffer
, size
, pos
,
412 &lscsa
->fpcr
, sizeof(lscsa
->fpcr
));
416 spufs_fpcr_read(struct file
*file
, char __user
* buffer
,
417 size_t size
, loff_t
* pos
)
420 struct spu_context
*ctx
= file
->private_data
;
422 spu_acquire_saved(ctx
);
423 ret
= __spufs_fpcr_read(ctx
, buffer
, size
, pos
);
424 spu_release_saved(ctx
);
429 spufs_fpcr_write(struct file
*file
, const char __user
* buffer
,
430 size_t size
, loff_t
* pos
)
432 struct spu_context
*ctx
= file
->private_data
;
433 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
436 size
= min_t(ssize_t
, sizeof(lscsa
->fpcr
) - *pos
, size
);
441 spu_acquire_saved(ctx
);
443 ret
= copy_from_user((char *)&lscsa
->fpcr
+ *pos
- size
,
444 buffer
, size
) ? -EFAULT
: size
;
446 spu_release_saved(ctx
);
450 static const struct file_operations spufs_fpcr_fops
= {
451 .open
= spufs_regs_open
,
452 .read
= spufs_fpcr_read
,
453 .write
= spufs_fpcr_write
,
454 .llseek
= generic_file_llseek
,
457 /* generic open function for all pipe-like files */
458 static int spufs_pipe_open(struct inode
*inode
, struct file
*file
)
460 struct spufs_inode_info
*i
= SPUFS_I(inode
);
461 file
->private_data
= i
->i_ctx
;
463 return nonseekable_open(inode
, file
);
467 * Read as many bytes from the mailbox as possible, until
468 * one of the conditions becomes true:
470 * - no more data available in the mailbox
471 * - end of the user provided buffer
472 * - end of the mapped area
474 static ssize_t
spufs_mbox_read(struct file
*file
, char __user
*buf
,
475 size_t len
, loff_t
*pos
)
477 struct spu_context
*ctx
= file
->private_data
;
478 u32 mbox_data
, __user
*udata
;
484 if (!access_ok(VERIFY_WRITE
, buf
, len
))
487 udata
= (void __user
*)buf
;
490 for (count
= 0; (count
+ 4) <= len
; count
+= 4, udata
++) {
492 ret
= ctx
->ops
->mbox_read(ctx
, &mbox_data
);
497 * at the end of the mapped area, we can fault
498 * but still need to return the data we have
499 * read successfully so far.
501 ret
= __put_user(mbox_data
, udata
);
516 static const struct file_operations spufs_mbox_fops
= {
517 .open
= spufs_pipe_open
,
518 .read
= spufs_mbox_read
,
521 static ssize_t
spufs_mbox_stat_read(struct file
*file
, char __user
*buf
,
522 size_t len
, loff_t
*pos
)
524 struct spu_context
*ctx
= file
->private_data
;
532 mbox_stat
= ctx
->ops
->mbox_stat_read(ctx
) & 0xff;
536 if (copy_to_user(buf
, &mbox_stat
, sizeof mbox_stat
))
542 static const struct file_operations spufs_mbox_stat_fops
= {
543 .open
= spufs_pipe_open
,
544 .read
= spufs_mbox_stat_read
,
547 /* low-level ibox access function */
548 size_t spu_ibox_read(struct spu_context
*ctx
, u32
*data
)
550 return ctx
->ops
->ibox_read(ctx
, data
);
553 static int spufs_ibox_fasync(int fd
, struct file
*file
, int on
)
555 struct spu_context
*ctx
= file
->private_data
;
557 return fasync_helper(fd
, file
, on
, &ctx
->ibox_fasync
);
560 /* interrupt-level ibox callback function. */
561 void spufs_ibox_callback(struct spu
*spu
)
563 struct spu_context
*ctx
= spu
->ctx
;
565 wake_up_all(&ctx
->ibox_wq
);
566 kill_fasync(&ctx
->ibox_fasync
, SIGIO
, POLLIN
);
570 * Read as many bytes from the interrupt mailbox as possible, until
571 * one of the conditions becomes true:
573 * - no more data available in the mailbox
574 * - end of the user provided buffer
575 * - end of the mapped area
577 * If the file is opened without O_NONBLOCK, we wait here until
578 * any data is available, but return when we have been able to
581 static ssize_t
spufs_ibox_read(struct file
*file
, char __user
*buf
,
582 size_t len
, loff_t
*pos
)
584 struct spu_context
*ctx
= file
->private_data
;
585 u32 ibox_data
, __user
*udata
;
591 if (!access_ok(VERIFY_WRITE
, buf
, len
))
594 udata
= (void __user
*)buf
;
598 /* wait only for the first element */
600 if (file
->f_flags
& O_NONBLOCK
) {
601 if (!spu_ibox_read(ctx
, &ibox_data
))
604 count
= spufs_wait(ctx
->ibox_wq
, spu_ibox_read(ctx
, &ibox_data
));
609 /* if we can't write at all, return -EFAULT */
610 count
= __put_user(ibox_data
, udata
);
614 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
616 ret
= ctx
->ops
->ibox_read(ctx
, &ibox_data
);
620 * at the end of the mapped area, we can fault
621 * but still need to return the data we have
622 * read successfully so far.
624 ret
= __put_user(ibox_data
, udata
);
635 static unsigned int spufs_ibox_poll(struct file
*file
, poll_table
*wait
)
637 struct spu_context
*ctx
= file
->private_data
;
640 poll_wait(file
, &ctx
->ibox_wq
, wait
);
643 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLIN
| POLLRDNORM
);
649 static const struct file_operations spufs_ibox_fops
= {
650 .open
= spufs_pipe_open
,
651 .read
= spufs_ibox_read
,
652 .poll
= spufs_ibox_poll
,
653 .fasync
= spufs_ibox_fasync
,
656 static ssize_t
spufs_ibox_stat_read(struct file
*file
, char __user
*buf
,
657 size_t len
, loff_t
*pos
)
659 struct spu_context
*ctx
= file
->private_data
;
666 ibox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 16) & 0xff;
669 if (copy_to_user(buf
, &ibox_stat
, sizeof ibox_stat
))
675 static const struct file_operations spufs_ibox_stat_fops
= {
676 .open
= spufs_pipe_open
,
677 .read
= spufs_ibox_stat_read
,
680 /* low-level mailbox write */
681 size_t spu_wbox_write(struct spu_context
*ctx
, u32 data
)
683 return ctx
->ops
->wbox_write(ctx
, data
);
686 static int spufs_wbox_fasync(int fd
, struct file
*file
, int on
)
688 struct spu_context
*ctx
= file
->private_data
;
691 ret
= fasync_helper(fd
, file
, on
, &ctx
->wbox_fasync
);
696 /* interrupt-level wbox callback function. */
697 void spufs_wbox_callback(struct spu
*spu
)
699 struct spu_context
*ctx
= spu
->ctx
;
701 wake_up_all(&ctx
->wbox_wq
);
702 kill_fasync(&ctx
->wbox_fasync
, SIGIO
, POLLOUT
);
706 * Write as many bytes to the interrupt mailbox as possible, until
707 * one of the conditions becomes true:
709 * - the mailbox is full
710 * - end of the user provided buffer
711 * - end of the mapped area
713 * If the file is opened without O_NONBLOCK, we wait here until
714 * space is availabyl, but return when we have been able to
717 static ssize_t
spufs_wbox_write(struct file
*file
, const char __user
*buf
,
718 size_t len
, loff_t
*pos
)
720 struct spu_context
*ctx
= file
->private_data
;
721 u32 wbox_data
, __user
*udata
;
727 udata
= (void __user
*)buf
;
728 if (!access_ok(VERIFY_READ
, buf
, len
))
731 if (__get_user(wbox_data
, udata
))
737 * make sure we can at least write one element, by waiting
738 * in case of !O_NONBLOCK
741 if (file
->f_flags
& O_NONBLOCK
) {
742 if (!spu_wbox_write(ctx
, wbox_data
))
745 count
= spufs_wait(ctx
->wbox_wq
, spu_wbox_write(ctx
, wbox_data
));
751 /* write aѕ much as possible */
752 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
754 ret
= __get_user(wbox_data
, udata
);
758 ret
= spu_wbox_write(ctx
, wbox_data
);
768 static unsigned int spufs_wbox_poll(struct file
*file
, poll_table
*wait
)
770 struct spu_context
*ctx
= file
->private_data
;
773 poll_wait(file
, &ctx
->wbox_wq
, wait
);
776 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLOUT
| POLLWRNORM
);
782 static const struct file_operations spufs_wbox_fops
= {
783 .open
= spufs_pipe_open
,
784 .write
= spufs_wbox_write
,
785 .poll
= spufs_wbox_poll
,
786 .fasync
= spufs_wbox_fasync
,
789 static ssize_t
spufs_wbox_stat_read(struct file
*file
, char __user
*buf
,
790 size_t len
, loff_t
*pos
)
792 struct spu_context
*ctx
= file
->private_data
;
799 wbox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 8) & 0xff;
802 if (copy_to_user(buf
, &wbox_stat
, sizeof wbox_stat
))
808 static const struct file_operations spufs_wbox_stat_fops
= {
809 .open
= spufs_pipe_open
,
810 .read
= spufs_wbox_stat_read
,
813 static int spufs_signal1_open(struct inode
*inode
, struct file
*file
)
815 struct spufs_inode_info
*i
= SPUFS_I(inode
);
816 struct spu_context
*ctx
= i
->i_ctx
;
818 mutex_lock(&ctx
->mapping_lock
);
819 file
->private_data
= ctx
;
821 ctx
->signal1
= inode
->i_mapping
;
822 mutex_unlock(&ctx
->mapping_lock
);
823 return nonseekable_open(inode
, file
);
827 spufs_signal1_release(struct inode
*inode
, struct file
*file
)
829 struct spufs_inode_info
*i
= SPUFS_I(inode
);
830 struct spu_context
*ctx
= i
->i_ctx
;
832 mutex_lock(&ctx
->mapping_lock
);
835 mutex_unlock(&ctx
->mapping_lock
);
839 static ssize_t
__spufs_signal1_read(struct spu_context
*ctx
, char __user
*buf
,
840 size_t len
, loff_t
*pos
)
848 if (ctx
->csa
.spu_chnlcnt_RW
[3]) {
849 data
= ctx
->csa
.spu_chnldata_RW
[3];
856 if (copy_to_user(buf
, &data
, 4))
863 static ssize_t
spufs_signal1_read(struct file
*file
, char __user
*buf
,
864 size_t len
, loff_t
*pos
)
867 struct spu_context
*ctx
= file
->private_data
;
869 spu_acquire_saved(ctx
);
870 ret
= __spufs_signal1_read(ctx
, buf
, len
, pos
);
871 spu_release_saved(ctx
);
876 static ssize_t
spufs_signal1_write(struct file
*file
, const char __user
*buf
,
877 size_t len
, loff_t
*pos
)
879 struct spu_context
*ctx
;
882 ctx
= file
->private_data
;
887 if (copy_from_user(&data
, buf
, 4))
891 ctx
->ops
->signal1_write(ctx
, data
);
897 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct
*vma
,
898 unsigned long address
)
900 #if PAGE_SIZE == 0x1000
901 return spufs_ps_nopfn(vma
, address
, 0x14000, 0x1000);
902 #elif PAGE_SIZE == 0x10000
903 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
904 * signal 1 and 2 area
906 return spufs_ps_nopfn(vma
, address
, 0x10000, 0x10000);
908 #error unsupported page size
912 static struct vm_operations_struct spufs_signal1_mmap_vmops
= {
913 .nopfn
= spufs_signal1_mmap_nopfn
,
916 static int spufs_signal1_mmap(struct file
*file
, struct vm_area_struct
*vma
)
918 if (!(vma
->vm_flags
& VM_SHARED
))
921 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
922 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
923 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
925 vma
->vm_ops
= &spufs_signal1_mmap_vmops
;
929 static const struct file_operations spufs_signal1_fops
= {
930 .open
= spufs_signal1_open
,
931 .release
= spufs_signal1_release
,
932 .read
= spufs_signal1_read
,
933 .write
= spufs_signal1_write
,
934 .mmap
= spufs_signal1_mmap
,
937 static const struct file_operations spufs_signal1_nosched_fops
= {
938 .open
= spufs_signal1_open
,
939 .release
= spufs_signal1_release
,
940 .write
= spufs_signal1_write
,
941 .mmap
= spufs_signal1_mmap
,
944 static int spufs_signal2_open(struct inode
*inode
, struct file
*file
)
946 struct spufs_inode_info
*i
= SPUFS_I(inode
);
947 struct spu_context
*ctx
= i
->i_ctx
;
949 mutex_lock(&ctx
->mapping_lock
);
950 file
->private_data
= ctx
;
952 ctx
->signal2
= inode
->i_mapping
;
953 mutex_unlock(&ctx
->mapping_lock
);
954 return nonseekable_open(inode
, file
);
958 spufs_signal2_release(struct inode
*inode
, struct file
*file
)
960 struct spufs_inode_info
*i
= SPUFS_I(inode
);
961 struct spu_context
*ctx
= i
->i_ctx
;
963 mutex_lock(&ctx
->mapping_lock
);
966 mutex_unlock(&ctx
->mapping_lock
);
970 static ssize_t
__spufs_signal2_read(struct spu_context
*ctx
, char __user
*buf
,
971 size_t len
, loff_t
*pos
)
979 if (ctx
->csa
.spu_chnlcnt_RW
[4]) {
980 data
= ctx
->csa
.spu_chnldata_RW
[4];
987 if (copy_to_user(buf
, &data
, 4))
994 static ssize_t
spufs_signal2_read(struct file
*file
, char __user
*buf
,
995 size_t len
, loff_t
*pos
)
997 struct spu_context
*ctx
= file
->private_data
;
1000 spu_acquire_saved(ctx
);
1001 ret
= __spufs_signal2_read(ctx
, buf
, len
, pos
);
1002 spu_release_saved(ctx
);
1007 static ssize_t
spufs_signal2_write(struct file
*file
, const char __user
*buf
,
1008 size_t len
, loff_t
*pos
)
1010 struct spu_context
*ctx
;
1013 ctx
= file
->private_data
;
1018 if (copy_from_user(&data
, buf
, 4))
1022 ctx
->ops
->signal2_write(ctx
, data
);
1029 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct
*vma
,
1030 unsigned long address
)
1032 #if PAGE_SIZE == 0x1000
1033 return spufs_ps_nopfn(vma
, address
, 0x1c000, 0x1000);
1034 #elif PAGE_SIZE == 0x10000
1035 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1036 * signal 1 and 2 area
1038 return spufs_ps_nopfn(vma
, address
, 0x10000, 0x10000);
1040 #error unsupported page size
1044 static struct vm_operations_struct spufs_signal2_mmap_vmops
= {
1045 .nopfn
= spufs_signal2_mmap_nopfn
,
1048 static int spufs_signal2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1050 if (!(vma
->vm_flags
& VM_SHARED
))
1053 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1054 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1055 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1057 vma
->vm_ops
= &spufs_signal2_mmap_vmops
;
1060 #else /* SPUFS_MMAP_4K */
1061 #define spufs_signal2_mmap NULL
1062 #endif /* !SPUFS_MMAP_4K */
1064 static const struct file_operations spufs_signal2_fops
= {
1065 .open
= spufs_signal2_open
,
1066 .release
= spufs_signal2_release
,
1067 .read
= spufs_signal2_read
,
1068 .write
= spufs_signal2_write
,
1069 .mmap
= spufs_signal2_mmap
,
1072 static const struct file_operations spufs_signal2_nosched_fops
= {
1073 .open
= spufs_signal2_open
,
1074 .release
= spufs_signal2_release
,
1075 .write
= spufs_signal2_write
,
1076 .mmap
= spufs_signal2_mmap
,
1079 static void spufs_signal1_type_set(void *data
, u64 val
)
1081 struct spu_context
*ctx
= data
;
1084 ctx
->ops
->signal1_type_set(ctx
, val
);
1088 static u64
__spufs_signal1_type_get(void *data
)
1090 struct spu_context
*ctx
= data
;
1091 return ctx
->ops
->signal1_type_get(ctx
);
1094 static u64
spufs_signal1_type_get(void *data
)
1096 struct spu_context
*ctx
= data
;
1100 ret
= __spufs_signal1_type_get(data
);
1105 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type
, spufs_signal1_type_get
,
1106 spufs_signal1_type_set
, "%llu");
1108 static void spufs_signal2_type_set(void *data
, u64 val
)
1110 struct spu_context
*ctx
= data
;
1113 ctx
->ops
->signal2_type_set(ctx
, val
);
1117 static u64
__spufs_signal2_type_get(void *data
)
1119 struct spu_context
*ctx
= data
;
1120 return ctx
->ops
->signal2_type_get(ctx
);
1123 static u64
spufs_signal2_type_get(void *data
)
1125 struct spu_context
*ctx
= data
;
1129 ret
= __spufs_signal2_type_get(data
);
1134 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type
, spufs_signal2_type_get
,
1135 spufs_signal2_type_set
, "%llu");
1138 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct
*vma
,
1139 unsigned long address
)
1141 return spufs_ps_nopfn(vma
, address
, 0x0000, 0x1000);
1144 static struct vm_operations_struct spufs_mss_mmap_vmops
= {
1145 .nopfn
= spufs_mss_mmap_nopfn
,
1149 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1151 static int spufs_mss_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1153 if (!(vma
->vm_flags
& VM_SHARED
))
1156 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1157 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1158 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1160 vma
->vm_ops
= &spufs_mss_mmap_vmops
;
1163 #else /* SPUFS_MMAP_4K */
1164 #define spufs_mss_mmap NULL
1165 #endif /* !SPUFS_MMAP_4K */
1167 static int spufs_mss_open(struct inode
*inode
, struct file
*file
)
1169 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1170 struct spu_context
*ctx
= i
->i_ctx
;
1172 file
->private_data
= i
->i_ctx
;
1174 mutex_lock(&ctx
->mapping_lock
);
1175 if (!i
->i_openers
++)
1176 ctx
->mss
= inode
->i_mapping
;
1177 mutex_unlock(&ctx
->mapping_lock
);
1178 return nonseekable_open(inode
, file
);
1182 spufs_mss_release(struct inode
*inode
, struct file
*file
)
1184 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1185 struct spu_context
*ctx
= i
->i_ctx
;
1187 mutex_lock(&ctx
->mapping_lock
);
1188 if (!--i
->i_openers
)
1190 mutex_unlock(&ctx
->mapping_lock
);
1194 static const struct file_operations spufs_mss_fops
= {
1195 .open
= spufs_mss_open
,
1196 .release
= spufs_mss_release
,
1197 .mmap
= spufs_mss_mmap
,
1200 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct
*vma
,
1201 unsigned long address
)
1203 return spufs_ps_nopfn(vma
, address
, 0x0000, 0x20000);
1206 static struct vm_operations_struct spufs_psmap_mmap_vmops
= {
1207 .nopfn
= spufs_psmap_mmap_nopfn
,
1211 * mmap support for full problem state area [0x00000 - 0x1ffff].
1213 static int spufs_psmap_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1215 if (!(vma
->vm_flags
& VM_SHARED
))
1218 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1219 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1220 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1222 vma
->vm_ops
= &spufs_psmap_mmap_vmops
;
1226 static int spufs_psmap_open(struct inode
*inode
, struct file
*file
)
1228 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1229 struct spu_context
*ctx
= i
->i_ctx
;
1231 mutex_lock(&ctx
->mapping_lock
);
1232 file
->private_data
= i
->i_ctx
;
1233 if (!i
->i_openers
++)
1234 ctx
->psmap
= inode
->i_mapping
;
1235 mutex_unlock(&ctx
->mapping_lock
);
1236 return nonseekable_open(inode
, file
);
1240 spufs_psmap_release(struct inode
*inode
, struct file
*file
)
1242 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1243 struct spu_context
*ctx
= i
->i_ctx
;
1245 mutex_lock(&ctx
->mapping_lock
);
1246 if (!--i
->i_openers
)
1248 mutex_unlock(&ctx
->mapping_lock
);
1252 static const struct file_operations spufs_psmap_fops
= {
1253 .open
= spufs_psmap_open
,
1254 .release
= spufs_psmap_release
,
1255 .mmap
= spufs_psmap_mmap
,
1260 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct
*vma
,
1261 unsigned long address
)
1263 return spufs_ps_nopfn(vma
, address
, 0x3000, 0x1000);
1266 static struct vm_operations_struct spufs_mfc_mmap_vmops
= {
1267 .nopfn
= spufs_mfc_mmap_nopfn
,
1271 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1273 static int spufs_mfc_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1275 if (!(vma
->vm_flags
& VM_SHARED
))
1278 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1279 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1280 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1282 vma
->vm_ops
= &spufs_mfc_mmap_vmops
;
1285 #else /* SPUFS_MMAP_4K */
1286 #define spufs_mfc_mmap NULL
1287 #endif /* !SPUFS_MMAP_4K */
1289 static int spufs_mfc_open(struct inode
*inode
, struct file
*file
)
1291 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1292 struct spu_context
*ctx
= i
->i_ctx
;
1294 /* we don't want to deal with DMA into other processes */
1295 if (ctx
->owner
!= current
->mm
)
1298 if (atomic_read(&inode
->i_count
) != 1)
1301 mutex_lock(&ctx
->mapping_lock
);
1302 file
->private_data
= ctx
;
1303 if (!i
->i_openers
++)
1304 ctx
->mfc
= inode
->i_mapping
;
1305 mutex_unlock(&ctx
->mapping_lock
);
1306 return nonseekable_open(inode
, file
);
1310 spufs_mfc_release(struct inode
*inode
, struct file
*file
)
1312 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1313 struct spu_context
*ctx
= i
->i_ctx
;
1315 mutex_lock(&ctx
->mapping_lock
);
1316 if (!--i
->i_openers
)
1318 mutex_unlock(&ctx
->mapping_lock
);
1322 /* interrupt-level mfc callback function. */
1323 void spufs_mfc_callback(struct spu
*spu
)
1325 struct spu_context
*ctx
= spu
->ctx
;
1327 wake_up_all(&ctx
->mfc_wq
);
1329 pr_debug("%s %s\n", __FUNCTION__
, spu
->name
);
1330 if (ctx
->mfc_fasync
) {
1331 u32 free_elements
, tagstatus
;
1334 /* no need for spu_acquire in interrupt context */
1335 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1336 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1339 if (free_elements
& 0xffff)
1341 if (tagstatus
& ctx
->tagwait
)
1344 kill_fasync(&ctx
->mfc_fasync
, SIGIO
, mask
);
1348 static int spufs_read_mfc_tagstatus(struct spu_context
*ctx
, u32
*status
)
1350 /* See if there is one tag group is complete */
1351 /* FIXME we need locking around tagwait */
1352 *status
= ctx
->ops
->read_mfc_tagstatus(ctx
) & ctx
->tagwait
;
1353 ctx
->tagwait
&= ~*status
;
1357 /* enable interrupt waiting for any tag group,
1358 may silently fail if interrupts are already enabled */
1359 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1363 static ssize_t
spufs_mfc_read(struct file
*file
, char __user
*buffer
,
1364 size_t size
, loff_t
*pos
)
1366 struct spu_context
*ctx
= file
->private_data
;
1374 if (file
->f_flags
& O_NONBLOCK
) {
1375 status
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1376 if (!(status
& ctx
->tagwait
))
1379 ctx
->tagwait
&= ~status
;
1381 ret
= spufs_wait(ctx
->mfc_wq
,
1382 spufs_read_mfc_tagstatus(ctx
, &status
));
1390 if (copy_to_user(buffer
, &status
, 4))
1397 static int spufs_check_valid_dma(struct mfc_dma_command
*cmd
)
1399 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd
->lsa
,
1400 cmd
->ea
, cmd
->size
, cmd
->tag
, cmd
->cmd
);
1411 pr_debug("invalid DMA opcode %x\n", cmd
->cmd
);
1415 if ((cmd
->lsa
& 0xf) != (cmd
->ea
&0xf)) {
1416 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1421 switch (cmd
->size
& 0xf) {
1442 pr_debug("invalid DMA alignment %x for size %x\n",
1443 cmd
->lsa
& 0xf, cmd
->size
);
1447 if (cmd
->size
> 16 * 1024) {
1448 pr_debug("invalid DMA size %x\n", cmd
->size
);
1452 if (cmd
->tag
& 0xfff0) {
1453 /* we reserve the higher tag numbers for kernel use */
1454 pr_debug("invalid DMA tag\n");
1459 /* not supported in this version */
1460 pr_debug("invalid DMA class\n");
1467 static int spu_send_mfc_command(struct spu_context
*ctx
,
1468 struct mfc_dma_command cmd
,
1471 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1472 if (*error
== -EAGAIN
) {
1473 /* wait for any tag group to complete
1474 so we have space for the new command */
1475 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1476 /* try again, because the queue might be
1478 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1479 if (*error
== -EAGAIN
)
1485 static ssize_t
spufs_mfc_write(struct file
*file
, const char __user
*buffer
,
1486 size_t size
, loff_t
*pos
)
1488 struct spu_context
*ctx
= file
->private_data
;
1489 struct mfc_dma_command cmd
;
1492 if (size
!= sizeof cmd
)
1496 if (copy_from_user(&cmd
, buffer
, sizeof cmd
))
1499 ret
= spufs_check_valid_dma(&cmd
);
1503 ret
= spu_acquire_runnable(ctx
, 0);
1507 if (file
->f_flags
& O_NONBLOCK
) {
1508 ret
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1511 ret
= spufs_wait(ctx
->mfc_wq
,
1512 spu_send_mfc_command(ctx
, cmd
, &status
));
1520 ctx
->tagwait
|= 1 << cmd
.tag
;
1529 static unsigned int spufs_mfc_poll(struct file
*file
,poll_table
*wait
)
1531 struct spu_context
*ctx
= file
->private_data
;
1532 u32 free_elements
, tagstatus
;
1535 poll_wait(file
, &ctx
->mfc_wq
, wait
);
1538 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2);
1539 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1540 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1544 if (free_elements
& 0xffff)
1545 mask
|= POLLOUT
| POLLWRNORM
;
1546 if (tagstatus
& ctx
->tagwait
)
1547 mask
|= POLLIN
| POLLRDNORM
;
1549 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__
,
1550 free_elements
, tagstatus
, ctx
->tagwait
);
1555 static int spufs_mfc_flush(struct file
*file
, fl_owner_t id
)
1557 struct spu_context
*ctx
= file
->private_data
;
1562 /* this currently hangs */
1563 ret
= spufs_wait(ctx
->mfc_wq
,
1564 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2));
1567 ret
= spufs_wait(ctx
->mfc_wq
,
1568 ctx
->ops
->read_mfc_tagstatus(ctx
) == ctx
->tagwait
);
1578 static int spufs_mfc_fsync(struct file
*file
, struct dentry
*dentry
,
1581 return spufs_mfc_flush(file
, NULL
);
1584 static int spufs_mfc_fasync(int fd
, struct file
*file
, int on
)
1586 struct spu_context
*ctx
= file
->private_data
;
1588 return fasync_helper(fd
, file
, on
, &ctx
->mfc_fasync
);
1591 static const struct file_operations spufs_mfc_fops
= {
1592 .open
= spufs_mfc_open
,
1593 .release
= spufs_mfc_release
,
1594 .read
= spufs_mfc_read
,
1595 .write
= spufs_mfc_write
,
1596 .poll
= spufs_mfc_poll
,
1597 .flush
= spufs_mfc_flush
,
1598 .fsync
= spufs_mfc_fsync
,
1599 .fasync
= spufs_mfc_fasync
,
1600 .mmap
= spufs_mfc_mmap
,
1603 static void spufs_npc_set(void *data
, u64 val
)
1605 struct spu_context
*ctx
= data
;
1607 ctx
->ops
->npc_write(ctx
, val
);
1611 static u64
spufs_npc_get(void *data
)
1613 struct spu_context
*ctx
= data
;
1616 ret
= ctx
->ops
->npc_read(ctx
);
1620 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops
, spufs_npc_get
, spufs_npc_set
,
1623 static void spufs_decr_set(void *data
, u64 val
)
1625 struct spu_context
*ctx
= data
;
1626 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1627 spu_acquire_saved(ctx
);
1628 lscsa
->decr
.slot
[0] = (u32
) val
;
1629 spu_release_saved(ctx
);
1632 static u64
__spufs_decr_get(void *data
)
1634 struct spu_context
*ctx
= data
;
1635 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1636 return lscsa
->decr
.slot
[0];
1639 static u64
spufs_decr_get(void *data
)
1641 struct spu_context
*ctx
= data
;
1643 spu_acquire_saved(ctx
);
1644 ret
= __spufs_decr_get(data
);
1645 spu_release_saved(ctx
);
1648 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops
, spufs_decr_get
, spufs_decr_set
,
1651 static void spufs_decr_status_set(void *data
, u64 val
)
1653 struct spu_context
*ctx
= data
;
1654 spu_acquire_saved(ctx
);
1656 ctx
->csa
.priv2
.mfc_control_RW
|= MFC_CNTL_DECREMENTER_RUNNING
;
1658 ctx
->csa
.priv2
.mfc_control_RW
&= ~MFC_CNTL_DECREMENTER_RUNNING
;
1659 spu_release_saved(ctx
);
1662 static u64
__spufs_decr_status_get(void *data
)
1664 struct spu_context
*ctx
= data
;
1665 if (ctx
->csa
.priv2
.mfc_control_RW
& MFC_CNTL_DECREMENTER_RUNNING
)
1666 return SPU_DECR_STATUS_RUNNING
;
1671 static u64
spufs_decr_status_get(void *data
)
1673 struct spu_context
*ctx
= data
;
1675 spu_acquire_saved(ctx
);
1676 ret
= __spufs_decr_status_get(data
);
1677 spu_release_saved(ctx
);
1680 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops
, spufs_decr_status_get
,
1681 spufs_decr_status_set
, "0x%llx\n")
1683 static void spufs_event_mask_set(void *data
, u64 val
)
1685 struct spu_context
*ctx
= data
;
1686 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1687 spu_acquire_saved(ctx
);
1688 lscsa
->event_mask
.slot
[0] = (u32
) val
;
1689 spu_release_saved(ctx
);
1692 static u64
__spufs_event_mask_get(void *data
)
1694 struct spu_context
*ctx
= data
;
1695 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1696 return lscsa
->event_mask
.slot
[0];
1699 static u64
spufs_event_mask_get(void *data
)
1701 struct spu_context
*ctx
= data
;
1703 spu_acquire_saved(ctx
);
1704 ret
= __spufs_event_mask_get(data
);
1705 spu_release_saved(ctx
);
1708 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops
, spufs_event_mask_get
,
1709 spufs_event_mask_set
, "0x%llx\n")
1711 static u64
__spufs_event_status_get(void *data
)
1713 struct spu_context
*ctx
= data
;
1714 struct spu_state
*state
= &ctx
->csa
;
1716 stat
= state
->spu_chnlcnt_RW
[0];
1718 return state
->spu_chnldata_RW
[0];
1722 static u64
spufs_event_status_get(void *data
)
1724 struct spu_context
*ctx
= data
;
1727 spu_acquire_saved(ctx
);
1728 ret
= __spufs_event_status_get(data
);
1729 spu_release_saved(ctx
);
1732 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops
, spufs_event_status_get
,
1735 static void spufs_srr0_set(void *data
, u64 val
)
1737 struct spu_context
*ctx
= data
;
1738 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1739 spu_acquire_saved(ctx
);
1740 lscsa
->srr0
.slot
[0] = (u32
) val
;
1741 spu_release_saved(ctx
);
1744 static u64
spufs_srr0_get(void *data
)
1746 struct spu_context
*ctx
= data
;
1747 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1749 spu_acquire_saved(ctx
);
1750 ret
= lscsa
->srr0
.slot
[0];
1751 spu_release_saved(ctx
);
1754 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops
, spufs_srr0_get
, spufs_srr0_set
,
1757 static u64
spufs_id_get(void *data
)
1759 struct spu_context
*ctx
= data
;
1763 if (ctx
->state
== SPU_STATE_RUNNABLE
)
1764 num
= ctx
->spu
->number
;
1766 num
= (unsigned int)-1;
1771 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops
, spufs_id_get
, NULL
, "0x%llx\n")
1773 static u64
__spufs_object_id_get(void *data
)
1775 struct spu_context
*ctx
= data
;
1776 return ctx
->object_id
;
1779 static u64
spufs_object_id_get(void *data
)
1781 /* FIXME: Should there really be no locking here? */
1782 return __spufs_object_id_get(data
);
1785 static void spufs_object_id_set(void *data
, u64 id
)
1787 struct spu_context
*ctx
= data
;
1788 ctx
->object_id
= id
;
1791 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops
, spufs_object_id_get
,
1792 spufs_object_id_set
, "0x%llx\n");
1794 static u64
__spufs_lslr_get(void *data
)
1796 struct spu_context
*ctx
= data
;
1797 return ctx
->csa
.priv2
.spu_lslr_RW
;
1800 static u64
spufs_lslr_get(void *data
)
1802 struct spu_context
*ctx
= data
;
1805 spu_acquire_saved(ctx
);
1806 ret
= __spufs_lslr_get(data
);
1807 spu_release_saved(ctx
);
1811 DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops
, spufs_lslr_get
, NULL
, "0x%llx\n")
1813 static int spufs_info_open(struct inode
*inode
, struct file
*file
)
1815 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1816 struct spu_context
*ctx
= i
->i_ctx
;
1817 file
->private_data
= ctx
;
1821 static int spufs_caps_show(struct seq_file
*s
, void *private)
1823 struct spu_context
*ctx
= s
->private;
1825 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
))
1826 seq_puts(s
, "sched\n");
1827 if (!(ctx
->flags
& SPU_CREATE_ISOLATE
))
1828 seq_puts(s
, "step\n");
1832 static int spufs_caps_open(struct inode
*inode
, struct file
*file
)
1834 return single_open(file
, spufs_caps_show
, SPUFS_I(inode
)->i_ctx
);
1837 static const struct file_operations spufs_caps_fops
= {
1838 .open
= spufs_caps_open
,
1840 .llseek
= seq_lseek
,
1841 .release
= single_release
,
1844 static ssize_t
__spufs_mbox_info_read(struct spu_context
*ctx
,
1845 char __user
*buf
, size_t len
, loff_t
*pos
)
1850 mbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
1851 if (mbox_stat
& 0x0000ff) {
1852 data
= ctx
->csa
.prob
.pu_mb_R
;
1855 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
1858 static ssize_t
spufs_mbox_info_read(struct file
*file
, char __user
*buf
,
1859 size_t len
, loff_t
*pos
)
1862 struct spu_context
*ctx
= file
->private_data
;
1864 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1867 spu_acquire_saved(ctx
);
1868 spin_lock(&ctx
->csa
.register_lock
);
1869 ret
= __spufs_mbox_info_read(ctx
, buf
, len
, pos
);
1870 spin_unlock(&ctx
->csa
.register_lock
);
1871 spu_release_saved(ctx
);
1876 static const struct file_operations spufs_mbox_info_fops
= {
1877 .open
= spufs_info_open
,
1878 .read
= spufs_mbox_info_read
,
1879 .llseek
= generic_file_llseek
,
1882 static ssize_t
__spufs_ibox_info_read(struct spu_context
*ctx
,
1883 char __user
*buf
, size_t len
, loff_t
*pos
)
1888 ibox_stat
= ctx
->csa
.prob
.mb_stat_R
;
1889 if (ibox_stat
& 0xff0000) {
1890 data
= ctx
->csa
.priv2
.puint_mb_R
;
1893 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
1896 static ssize_t
spufs_ibox_info_read(struct file
*file
, char __user
*buf
,
1897 size_t len
, loff_t
*pos
)
1899 struct spu_context
*ctx
= file
->private_data
;
1902 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1905 spu_acquire_saved(ctx
);
1906 spin_lock(&ctx
->csa
.register_lock
);
1907 ret
= __spufs_ibox_info_read(ctx
, buf
, len
, pos
);
1908 spin_unlock(&ctx
->csa
.register_lock
);
1909 spu_release_saved(ctx
);
1914 static const struct file_operations spufs_ibox_info_fops
= {
1915 .open
= spufs_info_open
,
1916 .read
= spufs_ibox_info_read
,
1917 .llseek
= generic_file_llseek
,
1920 static ssize_t
__spufs_wbox_info_read(struct spu_context
*ctx
,
1921 char __user
*buf
, size_t len
, loff_t
*pos
)
1927 wbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
1928 cnt
= 4 - ((wbox_stat
& 0x00ff00) >> 8);
1929 for (i
= 0; i
< cnt
; i
++) {
1930 data
[i
] = ctx
->csa
.spu_mailbox_data
[i
];
1933 return simple_read_from_buffer(buf
, len
, pos
, &data
,
1937 static ssize_t
spufs_wbox_info_read(struct file
*file
, char __user
*buf
,
1938 size_t len
, loff_t
*pos
)
1940 struct spu_context
*ctx
= file
->private_data
;
1943 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1946 spu_acquire_saved(ctx
);
1947 spin_lock(&ctx
->csa
.register_lock
);
1948 ret
= __spufs_wbox_info_read(ctx
, buf
, len
, pos
);
1949 spin_unlock(&ctx
->csa
.register_lock
);
1950 spu_release_saved(ctx
);
1955 static const struct file_operations spufs_wbox_info_fops
= {
1956 .open
= spufs_info_open
,
1957 .read
= spufs_wbox_info_read
,
1958 .llseek
= generic_file_llseek
,
1961 static ssize_t
__spufs_dma_info_read(struct spu_context
*ctx
,
1962 char __user
*buf
, size_t len
, loff_t
*pos
)
1964 struct spu_dma_info info
;
1965 struct mfc_cq_sr
*qp
, *spuqp
;
1968 info
.dma_info_type
= ctx
->csa
.priv2
.spu_tag_status_query_RW
;
1969 info
.dma_info_mask
= ctx
->csa
.lscsa
->tag_mask
.slot
[0];
1970 info
.dma_info_status
= ctx
->csa
.spu_chnldata_RW
[24];
1971 info
.dma_info_stall_and_notify
= ctx
->csa
.spu_chnldata_RW
[25];
1972 info
.dma_info_atomic_command_status
= ctx
->csa
.spu_chnldata_RW
[27];
1973 for (i
= 0; i
< 16; i
++) {
1974 qp
= &info
.dma_info_command_data
[i
];
1975 spuqp
= &ctx
->csa
.priv2
.spuq
[i
];
1977 qp
->mfc_cq_data0_RW
= spuqp
->mfc_cq_data0_RW
;
1978 qp
->mfc_cq_data1_RW
= spuqp
->mfc_cq_data1_RW
;
1979 qp
->mfc_cq_data2_RW
= spuqp
->mfc_cq_data2_RW
;
1980 qp
->mfc_cq_data3_RW
= spuqp
->mfc_cq_data3_RW
;
1983 return simple_read_from_buffer(buf
, len
, pos
, &info
,
1987 static ssize_t
spufs_dma_info_read(struct file
*file
, char __user
*buf
,
1988 size_t len
, loff_t
*pos
)
1990 struct spu_context
*ctx
= file
->private_data
;
1993 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1996 spu_acquire_saved(ctx
);
1997 spin_lock(&ctx
->csa
.register_lock
);
1998 ret
= __spufs_dma_info_read(ctx
, buf
, len
, pos
);
1999 spin_unlock(&ctx
->csa
.register_lock
);
2000 spu_release_saved(ctx
);
2005 static const struct file_operations spufs_dma_info_fops
= {
2006 .open
= spufs_info_open
,
2007 .read
= spufs_dma_info_read
,
2010 static ssize_t
__spufs_proxydma_info_read(struct spu_context
*ctx
,
2011 char __user
*buf
, size_t len
, loff_t
*pos
)
2013 struct spu_proxydma_info info
;
2014 struct mfc_cq_sr
*qp
, *puqp
;
2015 int ret
= sizeof info
;
2021 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2024 info
.proxydma_info_type
= ctx
->csa
.prob
.dma_querytype_RW
;
2025 info
.proxydma_info_mask
= ctx
->csa
.prob
.dma_querymask_RW
;
2026 info
.proxydma_info_status
= ctx
->csa
.prob
.dma_tagstatus_R
;
2027 for (i
= 0; i
< 8; i
++) {
2028 qp
= &info
.proxydma_info_command_data
[i
];
2029 puqp
= &ctx
->csa
.priv2
.puq
[i
];
2031 qp
->mfc_cq_data0_RW
= puqp
->mfc_cq_data0_RW
;
2032 qp
->mfc_cq_data1_RW
= puqp
->mfc_cq_data1_RW
;
2033 qp
->mfc_cq_data2_RW
= puqp
->mfc_cq_data2_RW
;
2034 qp
->mfc_cq_data3_RW
= puqp
->mfc_cq_data3_RW
;
2037 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2041 static ssize_t
spufs_proxydma_info_read(struct file
*file
, char __user
*buf
,
2042 size_t len
, loff_t
*pos
)
2044 struct spu_context
*ctx
= file
->private_data
;
2047 spu_acquire_saved(ctx
);
2048 spin_lock(&ctx
->csa
.register_lock
);
2049 ret
= __spufs_proxydma_info_read(ctx
, buf
, len
, pos
);
2050 spin_unlock(&ctx
->csa
.register_lock
);
2051 spu_release_saved(ctx
);
2056 static const struct file_operations spufs_proxydma_info_fops
= {
2057 .open
= spufs_info_open
,
2058 .read
= spufs_proxydma_info_read
,
2061 static int spufs_show_tid(struct seq_file
*s
, void *private)
2063 struct spu_context
*ctx
= s
->private;
2065 seq_printf(s
, "%d\n", ctx
->tid
);
2069 static int spufs_tid_open(struct inode
*inode
, struct file
*file
)
2071 return single_open(file
, spufs_show_tid
, SPUFS_I(inode
)->i_ctx
);
2074 static const struct file_operations spufs_tid_fops
= {
2075 .open
= spufs_tid_open
,
2077 .llseek
= seq_lseek
,
2078 .release
= single_release
,
2081 static const char *ctx_state_names
[] = {
2082 "user", "system", "iowait", "loaded"
2085 static unsigned long long spufs_acct_time(struct spu_context
*ctx
,
2086 enum spu_utilization_state state
)
2089 unsigned long long time
= ctx
->stats
.times
[state
];
2092 * In general, utilization statistics are updated by the controlling
2093 * thread as the spu context moves through various well defined
2094 * state transitions, but if the context is lazily loaded its
2095 * utilization statistics are not updated as the controlling thread
2096 * is not tightly coupled with the execution of the spu context. We
2097 * calculate and apply the time delta from the last recorded state
2098 * of the spu context.
2100 if (ctx
->spu
&& ctx
->stats
.util_state
== state
) {
2102 time
+= timespec_to_ns(&ts
) - ctx
->stats
.tstamp
;
2105 return time
/ NSEC_PER_MSEC
;
2108 static unsigned long long spufs_slb_flts(struct spu_context
*ctx
)
2110 unsigned long long slb_flts
= ctx
->stats
.slb_flt
;
2112 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2113 slb_flts
+= (ctx
->spu
->stats
.slb_flt
-
2114 ctx
->stats
.slb_flt_base
);
2120 static unsigned long long spufs_class2_intrs(struct spu_context
*ctx
)
2122 unsigned long long class2_intrs
= ctx
->stats
.class2_intr
;
2124 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2125 class2_intrs
+= (ctx
->spu
->stats
.class2_intr
-
2126 ctx
->stats
.class2_intr_base
);
2129 return class2_intrs
;
2133 static int spufs_show_stat(struct seq_file
*s
, void *private)
2135 struct spu_context
*ctx
= s
->private;
2138 seq_printf(s
, "%s %llu %llu %llu %llu "
2139 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2140 ctx_state_names
[ctx
->stats
.util_state
],
2141 spufs_acct_time(ctx
, SPU_UTIL_USER
),
2142 spufs_acct_time(ctx
, SPU_UTIL_SYSTEM
),
2143 spufs_acct_time(ctx
, SPU_UTIL_IOWAIT
),
2144 spufs_acct_time(ctx
, SPU_UTIL_IDLE_LOADED
),
2145 ctx
->stats
.vol_ctx_switch
,
2146 ctx
->stats
.invol_ctx_switch
,
2147 spufs_slb_flts(ctx
),
2148 ctx
->stats
.hash_flt
,
2151 spufs_class2_intrs(ctx
),
2152 ctx
->stats
.libassist
);
2157 static int spufs_stat_open(struct inode
*inode
, struct file
*file
)
2159 return single_open(file
, spufs_show_stat
, SPUFS_I(inode
)->i_ctx
);
2162 static const struct file_operations spufs_stat_fops
= {
2163 .open
= spufs_stat_open
,
2165 .llseek
= seq_lseek
,
2166 .release
= single_release
,
2170 struct tree_descr spufs_dir_contents
[] = {
2171 { "capabilities", &spufs_caps_fops
, 0444, },
2172 { "mem", &spufs_mem_fops
, 0666, },
2173 { "regs", &spufs_regs_fops
, 0666, },
2174 { "mbox", &spufs_mbox_fops
, 0444, },
2175 { "ibox", &spufs_ibox_fops
, 0444, },
2176 { "wbox", &spufs_wbox_fops
, 0222, },
2177 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, },
2178 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, },
2179 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, },
2180 { "signal1", &spufs_signal1_nosched_fops
, 0222, },
2181 { "signal2", &spufs_signal2_nosched_fops
, 0222, },
2182 { "signal1_type", &spufs_signal1_type
, 0666, },
2183 { "signal2_type", &spufs_signal2_type
, 0666, },
2184 { "cntl", &spufs_cntl_fops
, 0666, },
2185 { "fpcr", &spufs_fpcr_fops
, 0666, },
2186 { "lslr", &spufs_lslr_ops
, 0444, },
2187 { "mfc", &spufs_mfc_fops
, 0666, },
2188 { "mss", &spufs_mss_fops
, 0666, },
2189 { "npc", &spufs_npc_ops
, 0666, },
2190 { "srr0", &spufs_srr0_ops
, 0666, },
2191 { "decr", &spufs_decr_ops
, 0666, },
2192 { "decr_status", &spufs_decr_status_ops
, 0666, },
2193 { "event_mask", &spufs_event_mask_ops
, 0666, },
2194 { "event_status", &spufs_event_status_ops
, 0444, },
2195 { "psmap", &spufs_psmap_fops
, 0666, },
2196 { "phys-id", &spufs_id_ops
, 0666, },
2197 { "object-id", &spufs_object_id_ops
, 0666, },
2198 { "mbox_info", &spufs_mbox_info_fops
, 0444, },
2199 { "ibox_info", &spufs_ibox_info_fops
, 0444, },
2200 { "wbox_info", &spufs_wbox_info_fops
, 0444, },
2201 { "dma_info", &spufs_dma_info_fops
, 0444, },
2202 { "proxydma_info", &spufs_proxydma_info_fops
, 0444, },
2203 { "tid", &spufs_tid_fops
, 0444, },
2204 { "stat", &spufs_stat_fops
, 0444, },
2208 struct tree_descr spufs_dir_nosched_contents
[] = {
2209 { "capabilities", &spufs_caps_fops
, 0444, },
2210 { "mem", &spufs_mem_fops
, 0666, },
2211 { "mbox", &spufs_mbox_fops
, 0444, },
2212 { "ibox", &spufs_ibox_fops
, 0444, },
2213 { "wbox", &spufs_wbox_fops
, 0222, },
2214 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, },
2215 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, },
2216 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, },
2217 { "signal1", &spufs_signal1_nosched_fops
, 0222, },
2218 { "signal2", &spufs_signal2_nosched_fops
, 0222, },
2219 { "signal1_type", &spufs_signal1_type
, 0666, },
2220 { "signal2_type", &spufs_signal2_type
, 0666, },
2221 { "mss", &spufs_mss_fops
, 0666, },
2222 { "mfc", &spufs_mfc_fops
, 0666, },
2223 { "cntl", &spufs_cntl_fops
, 0666, },
2224 { "npc", &spufs_npc_ops
, 0666, },
2225 { "psmap", &spufs_psmap_fops
, 0666, },
2226 { "phys-id", &spufs_id_ops
, 0666, },
2227 { "object-id", &spufs_object_id_ops
, 0666, },
2228 { "tid", &spufs_tid_fops
, 0444, },
2229 { "stat", &spufs_stat_fops
, 0444, },
2233 struct spufs_coredump_reader spufs_coredump_read
[] = {
2234 { "regs", __spufs_regs_read
, NULL
, sizeof(struct spu_reg128
[128])},
2235 { "fpcr", __spufs_fpcr_read
, NULL
, sizeof(struct spu_reg128
) },
2236 { "lslr", NULL
, __spufs_lslr_get
, 19 },
2237 { "decr", NULL
, __spufs_decr_get
, 19 },
2238 { "decr_status", NULL
, __spufs_decr_status_get
, 19 },
2239 { "mem", __spufs_mem_read
, NULL
, LS_SIZE
, },
2240 { "signal1", __spufs_signal1_read
, NULL
, sizeof(u32
) },
2241 { "signal1_type", NULL
, __spufs_signal1_type_get
, 19 },
2242 { "signal2", __spufs_signal2_read
, NULL
, sizeof(u32
) },
2243 { "signal2_type", NULL
, __spufs_signal2_type_get
, 19 },
2244 { "event_mask", NULL
, __spufs_event_mask_get
, 19 },
2245 { "event_status", NULL
, __spufs_event_status_get
, 19 },
2246 { "mbox_info", __spufs_mbox_info_read
, NULL
, sizeof(u32
) },
2247 { "ibox_info", __spufs_ibox_info_read
, NULL
, sizeof(u32
) },
2248 { "wbox_info", __spufs_wbox_info_read
, NULL
, 4 * sizeof(u32
)},
2249 { "dma_info", __spufs_dma_info_read
, NULL
, sizeof(struct spu_dma_info
)},
2250 { "proxydma_info", __spufs_proxydma_info_read
,
2251 NULL
, sizeof(struct spu_proxydma_info
)},
2252 { "object-id", NULL
, __spufs_object_id_get
, 19 },
2255 int spufs_coredump_num_notes
= ARRAY_SIZE(spufs_coredump_read
) - 1;