2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
34 #include <asm/semaphore.h>
36 #include <asm/spu_info.h>
37 #include <asm/uaccess.h>
41 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
45 spufs_mem_open(struct inode
*inode
, struct file
*file
)
47 struct spufs_inode_info
*i
= SPUFS_I(inode
);
48 struct spu_context
*ctx
= i
->i_ctx
;
50 mutex_lock(&ctx
->mapping_lock
);
51 file
->private_data
= ctx
;
53 ctx
->local_store
= inode
->i_mapping
;
54 mutex_unlock(&ctx
->mapping_lock
);
59 spufs_mem_release(struct inode
*inode
, struct file
*file
)
61 struct spufs_inode_info
*i
= SPUFS_I(inode
);
62 struct spu_context
*ctx
= i
->i_ctx
;
64 mutex_lock(&ctx
->mapping_lock
);
66 ctx
->local_store
= NULL
;
67 mutex_unlock(&ctx
->mapping_lock
);
72 __spufs_mem_read(struct spu_context
*ctx
, char __user
*buffer
,
73 size_t size
, loff_t
*pos
)
75 char *local_store
= ctx
->ops
->get_ls(ctx
);
76 return simple_read_from_buffer(buffer
, size
, pos
, local_store
,
81 spufs_mem_read(struct file
*file
, char __user
*buffer
,
82 size_t size
, loff_t
*pos
)
84 struct spu_context
*ctx
= file
->private_data
;
88 ret
= __spufs_mem_read(ctx
, buffer
, size
, pos
);
94 spufs_mem_write(struct file
*file
, const char __user
*buffer
,
95 size_t size
, loff_t
*ppos
)
97 struct spu_context
*ctx
= file
->private_data
;
106 if (size
> LS_SIZE
- pos
)
107 size
= LS_SIZE
- pos
;
110 local_store
= ctx
->ops
->get_ls(ctx
);
111 ret
= copy_from_user(local_store
+ pos
, buffer
, size
);
120 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct
*vma
,
121 unsigned long address
)
123 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
124 unsigned long pfn
, offset
, addr0
= address
;
125 #ifdef CONFIG_SPU_FS_64K_LS
126 struct spu_state
*csa
= &ctx
->csa
;
129 /* Check what page size we are using */
130 psize
= get_slice_psize(vma
->vm_mm
, address
);
132 /* Some sanity checking */
133 BUG_ON(csa
->use_big_pages
!= (psize
== MMU_PAGE_64K
));
135 /* Wow, 64K, cool, we need to align the address though */
136 if (csa
->use_big_pages
) {
137 BUG_ON(vma
->vm_start
& 0xffff);
138 address
&= ~0xfffful
;
140 #endif /* CONFIG_SPU_FS_64K_LS */
142 offset
= (address
- vma
->vm_start
) + (vma
->vm_pgoff
<< PAGE_SHIFT
);
143 if (offset
>= LS_SIZE
)
146 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
147 addr0
, address
, offset
);
151 if (ctx
->state
== SPU_STATE_SAVED
) {
152 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
154 pfn
= vmalloc_to_pfn(ctx
->csa
.lscsa
->ls
+ offset
);
156 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
158 pfn
= (ctx
->spu
->local_store_phys
+ offset
) >> PAGE_SHIFT
;
160 vm_insert_pfn(vma
, address
, pfn
);
164 return NOPFN_REFAULT
;
168 static struct vm_operations_struct spufs_mem_mmap_vmops
= {
169 .nopfn
= spufs_mem_mmap_nopfn
,
172 static int spufs_mem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
174 #ifdef CONFIG_SPU_FS_64K_LS
175 struct spu_context
*ctx
= file
->private_data
;
176 struct spu_state
*csa
= &ctx
->csa
;
178 /* Sanity check VMA alignment */
179 if (csa
->use_big_pages
) {
180 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
181 " pgoff=0x%lx\n", vma
->vm_start
, vma
->vm_end
,
183 if (vma
->vm_start
& 0xffff)
185 if (vma
->vm_pgoff
& 0xf)
188 #endif /* CONFIG_SPU_FS_64K_LS */
190 if (!(vma
->vm_flags
& VM_SHARED
))
193 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
194 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
197 vma
->vm_ops
= &spufs_mem_mmap_vmops
;
201 #ifdef CONFIG_SPU_FS_64K_LS
202 static unsigned long spufs_get_unmapped_area(struct file
*file
,
203 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
206 struct spu_context
*ctx
= file
->private_data
;
207 struct spu_state
*csa
= &ctx
->csa
;
209 /* If not using big pages, fallback to normal MM g_u_a */
210 if (!csa
->use_big_pages
)
211 return current
->mm
->get_unmapped_area(file
, addr
, len
,
214 /* Else, try to obtain a 64K pages slice */
215 return slice_get_unmapped_area(addr
, len
, flags
,
218 #endif /* CONFIG_SPU_FS_64K_LS */
220 static const struct file_operations spufs_mem_fops
= {
221 .open
= spufs_mem_open
,
222 .release
= spufs_mem_release
,
223 .read
= spufs_mem_read
,
224 .write
= spufs_mem_write
,
225 .llseek
= generic_file_llseek
,
226 .mmap
= spufs_mem_mmap
,
227 #ifdef CONFIG_SPU_FS_64K_LS
228 .get_unmapped_area
= spufs_get_unmapped_area
,
232 static unsigned long spufs_ps_nopfn(struct vm_area_struct
*vma
,
233 unsigned long address
,
234 unsigned long ps_offs
,
235 unsigned long ps_size
)
237 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
238 unsigned long area
, offset
= address
- vma
->vm_start
;
240 offset
+= vma
->vm_pgoff
<< PAGE_SHIFT
;
241 if (offset
>= ps_size
)
245 * We have to wait for context to be loaded before we have
246 * pages to hand out to the user, but we don't want to wait
247 * with the mmap_sem held.
248 * It is possible to drop the mmap_sem here, but then we need
249 * to return NOPFN_REFAULT because the mappings may have
253 if (ctx
->state
== SPU_STATE_SAVED
) {
254 up_read(¤t
->mm
->mmap_sem
);
255 spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
256 down_read(¤t
->mm
->mmap_sem
);
260 area
= ctx
->spu
->problem_phys
+ ps_offs
;
261 vm_insert_pfn(vma
, address
, (area
+ offset
) >> PAGE_SHIFT
);
266 return NOPFN_REFAULT
;
270 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct
*vma
,
271 unsigned long address
)
273 return spufs_ps_nopfn(vma
, address
, 0x4000, 0x1000);
276 static struct vm_operations_struct spufs_cntl_mmap_vmops
= {
277 .nopfn
= spufs_cntl_mmap_nopfn
,
281 * mmap support for problem state control area [0x4000 - 0x4fff].
283 static int spufs_cntl_mmap(struct file
*file
, struct vm_area_struct
*vma
)
285 if (!(vma
->vm_flags
& VM_SHARED
))
288 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
289 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
290 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
292 vma
->vm_ops
= &spufs_cntl_mmap_vmops
;
295 #else /* SPUFS_MMAP_4K */
296 #define spufs_cntl_mmap NULL
297 #endif /* !SPUFS_MMAP_4K */
299 static u64
spufs_cntl_get(void *data
)
301 struct spu_context
*ctx
= data
;
305 val
= ctx
->ops
->status_read(ctx
);
311 static void spufs_cntl_set(void *data
, u64 val
)
313 struct spu_context
*ctx
= data
;
316 ctx
->ops
->runcntl_write(ctx
, val
);
320 static int spufs_cntl_open(struct inode
*inode
, struct file
*file
)
322 struct spufs_inode_info
*i
= SPUFS_I(inode
);
323 struct spu_context
*ctx
= i
->i_ctx
;
325 mutex_lock(&ctx
->mapping_lock
);
326 file
->private_data
= ctx
;
328 ctx
->cntl
= inode
->i_mapping
;
329 mutex_unlock(&ctx
->mapping_lock
);
330 return simple_attr_open(inode
, file
, spufs_cntl_get
,
331 spufs_cntl_set
, "0x%08lx");
335 spufs_cntl_release(struct inode
*inode
, struct file
*file
)
337 struct spufs_inode_info
*i
= SPUFS_I(inode
);
338 struct spu_context
*ctx
= i
->i_ctx
;
340 simple_attr_close(inode
, file
);
342 mutex_lock(&ctx
->mapping_lock
);
345 mutex_unlock(&ctx
->mapping_lock
);
349 static const struct file_operations spufs_cntl_fops
= {
350 .open
= spufs_cntl_open
,
351 .release
= spufs_cntl_release
,
352 .read
= simple_attr_read
,
353 .write
= simple_attr_write
,
354 .mmap
= spufs_cntl_mmap
,
358 spufs_regs_open(struct inode
*inode
, struct file
*file
)
360 struct spufs_inode_info
*i
= SPUFS_I(inode
);
361 file
->private_data
= i
->i_ctx
;
366 __spufs_regs_read(struct spu_context
*ctx
, char __user
*buffer
,
367 size_t size
, loff_t
*pos
)
369 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
370 return simple_read_from_buffer(buffer
, size
, pos
,
371 lscsa
->gprs
, sizeof lscsa
->gprs
);
375 spufs_regs_read(struct file
*file
, char __user
*buffer
,
376 size_t size
, loff_t
*pos
)
379 struct spu_context
*ctx
= file
->private_data
;
381 spu_acquire_saved(ctx
);
382 ret
= __spufs_regs_read(ctx
, buffer
, size
, pos
);
383 spu_release_saved(ctx
);
388 spufs_regs_write(struct file
*file
, const char __user
*buffer
,
389 size_t size
, loff_t
*pos
)
391 struct spu_context
*ctx
= file
->private_data
;
392 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
395 size
= min_t(ssize_t
, sizeof lscsa
->gprs
- *pos
, size
);
400 spu_acquire_saved(ctx
);
402 ret
= copy_from_user(lscsa
->gprs
+ *pos
- size
,
403 buffer
, size
) ? -EFAULT
: size
;
405 spu_release_saved(ctx
);
409 static const struct file_operations spufs_regs_fops
= {
410 .open
= spufs_regs_open
,
411 .read
= spufs_regs_read
,
412 .write
= spufs_regs_write
,
413 .llseek
= generic_file_llseek
,
417 __spufs_fpcr_read(struct spu_context
*ctx
, char __user
* buffer
,
418 size_t size
, loff_t
* pos
)
420 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
421 return simple_read_from_buffer(buffer
, size
, pos
,
422 &lscsa
->fpcr
, sizeof(lscsa
->fpcr
));
426 spufs_fpcr_read(struct file
*file
, char __user
* buffer
,
427 size_t size
, loff_t
* pos
)
430 struct spu_context
*ctx
= file
->private_data
;
432 spu_acquire_saved(ctx
);
433 ret
= __spufs_fpcr_read(ctx
, buffer
, size
, pos
);
434 spu_release_saved(ctx
);
439 spufs_fpcr_write(struct file
*file
, const char __user
* buffer
,
440 size_t size
, loff_t
* pos
)
442 struct spu_context
*ctx
= file
->private_data
;
443 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
446 size
= min_t(ssize_t
, sizeof(lscsa
->fpcr
) - *pos
, size
);
451 spu_acquire_saved(ctx
);
453 ret
= copy_from_user((char *)&lscsa
->fpcr
+ *pos
- size
,
454 buffer
, size
) ? -EFAULT
: size
;
456 spu_release_saved(ctx
);
460 static const struct file_operations spufs_fpcr_fops
= {
461 .open
= spufs_regs_open
,
462 .read
= spufs_fpcr_read
,
463 .write
= spufs_fpcr_write
,
464 .llseek
= generic_file_llseek
,
467 /* generic open function for all pipe-like files */
468 static int spufs_pipe_open(struct inode
*inode
, struct file
*file
)
470 struct spufs_inode_info
*i
= SPUFS_I(inode
);
471 file
->private_data
= i
->i_ctx
;
473 return nonseekable_open(inode
, file
);
477 * Read as many bytes from the mailbox as possible, until
478 * one of the conditions becomes true:
480 * - no more data available in the mailbox
481 * - end of the user provided buffer
482 * - end of the mapped area
484 static ssize_t
spufs_mbox_read(struct file
*file
, char __user
*buf
,
485 size_t len
, loff_t
*pos
)
487 struct spu_context
*ctx
= file
->private_data
;
488 u32 mbox_data
, __user
*udata
;
494 if (!access_ok(VERIFY_WRITE
, buf
, len
))
497 udata
= (void __user
*)buf
;
500 for (count
= 0; (count
+ 4) <= len
; count
+= 4, udata
++) {
502 ret
= ctx
->ops
->mbox_read(ctx
, &mbox_data
);
507 * at the end of the mapped area, we can fault
508 * but still need to return the data we have
509 * read successfully so far.
511 ret
= __put_user(mbox_data
, udata
);
526 static const struct file_operations spufs_mbox_fops
= {
527 .open
= spufs_pipe_open
,
528 .read
= spufs_mbox_read
,
531 static ssize_t
spufs_mbox_stat_read(struct file
*file
, char __user
*buf
,
532 size_t len
, loff_t
*pos
)
534 struct spu_context
*ctx
= file
->private_data
;
542 mbox_stat
= ctx
->ops
->mbox_stat_read(ctx
) & 0xff;
546 if (copy_to_user(buf
, &mbox_stat
, sizeof mbox_stat
))
552 static const struct file_operations spufs_mbox_stat_fops
= {
553 .open
= spufs_pipe_open
,
554 .read
= spufs_mbox_stat_read
,
557 /* low-level ibox access function */
558 size_t spu_ibox_read(struct spu_context
*ctx
, u32
*data
)
560 return ctx
->ops
->ibox_read(ctx
, data
);
563 static int spufs_ibox_fasync(int fd
, struct file
*file
, int on
)
565 struct spu_context
*ctx
= file
->private_data
;
567 return fasync_helper(fd
, file
, on
, &ctx
->ibox_fasync
);
570 /* interrupt-level ibox callback function. */
571 void spufs_ibox_callback(struct spu
*spu
)
573 struct spu_context
*ctx
= spu
->ctx
;
578 wake_up_all(&ctx
->ibox_wq
);
579 kill_fasync(&ctx
->ibox_fasync
, SIGIO
, POLLIN
);
583 * Read as many bytes from the interrupt mailbox as possible, until
584 * one of the conditions becomes true:
586 * - no more data available in the mailbox
587 * - end of the user provided buffer
588 * - end of the mapped area
590 * If the file is opened without O_NONBLOCK, we wait here until
591 * any data is available, but return when we have been able to
594 static ssize_t
spufs_ibox_read(struct file
*file
, char __user
*buf
,
595 size_t len
, loff_t
*pos
)
597 struct spu_context
*ctx
= file
->private_data
;
598 u32 ibox_data
, __user
*udata
;
604 if (!access_ok(VERIFY_WRITE
, buf
, len
))
607 udata
= (void __user
*)buf
;
611 /* wait only for the first element */
613 if (file
->f_flags
& O_NONBLOCK
) {
614 if (!spu_ibox_read(ctx
, &ibox_data
))
617 count
= spufs_wait(ctx
->ibox_wq
, spu_ibox_read(ctx
, &ibox_data
));
622 /* if we can't write at all, return -EFAULT */
623 count
= __put_user(ibox_data
, udata
);
627 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
629 ret
= ctx
->ops
->ibox_read(ctx
, &ibox_data
);
633 * at the end of the mapped area, we can fault
634 * but still need to return the data we have
635 * read successfully so far.
637 ret
= __put_user(ibox_data
, udata
);
648 static unsigned int spufs_ibox_poll(struct file
*file
, poll_table
*wait
)
650 struct spu_context
*ctx
= file
->private_data
;
653 poll_wait(file
, &ctx
->ibox_wq
, wait
);
656 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLIN
| POLLRDNORM
);
662 static const struct file_operations spufs_ibox_fops
= {
663 .open
= spufs_pipe_open
,
664 .read
= spufs_ibox_read
,
665 .poll
= spufs_ibox_poll
,
666 .fasync
= spufs_ibox_fasync
,
669 static ssize_t
spufs_ibox_stat_read(struct file
*file
, char __user
*buf
,
670 size_t len
, loff_t
*pos
)
672 struct spu_context
*ctx
= file
->private_data
;
679 ibox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 16) & 0xff;
682 if (copy_to_user(buf
, &ibox_stat
, sizeof ibox_stat
))
688 static const struct file_operations spufs_ibox_stat_fops
= {
689 .open
= spufs_pipe_open
,
690 .read
= spufs_ibox_stat_read
,
693 /* low-level mailbox write */
694 size_t spu_wbox_write(struct spu_context
*ctx
, u32 data
)
696 return ctx
->ops
->wbox_write(ctx
, data
);
699 static int spufs_wbox_fasync(int fd
, struct file
*file
, int on
)
701 struct spu_context
*ctx
= file
->private_data
;
704 ret
= fasync_helper(fd
, file
, on
, &ctx
->wbox_fasync
);
709 /* interrupt-level wbox callback function. */
710 void spufs_wbox_callback(struct spu
*spu
)
712 struct spu_context
*ctx
= spu
->ctx
;
717 wake_up_all(&ctx
->wbox_wq
);
718 kill_fasync(&ctx
->wbox_fasync
, SIGIO
, POLLOUT
);
722 * Write as many bytes to the interrupt mailbox as possible, until
723 * one of the conditions becomes true:
725 * - the mailbox is full
726 * - end of the user provided buffer
727 * - end of the mapped area
729 * If the file is opened without O_NONBLOCK, we wait here until
730 * space is availabyl, but return when we have been able to
733 static ssize_t
spufs_wbox_write(struct file
*file
, const char __user
*buf
,
734 size_t len
, loff_t
*pos
)
736 struct spu_context
*ctx
= file
->private_data
;
737 u32 wbox_data
, __user
*udata
;
743 udata
= (void __user
*)buf
;
744 if (!access_ok(VERIFY_READ
, buf
, len
))
747 if (__get_user(wbox_data
, udata
))
753 * make sure we can at least write one element, by waiting
754 * in case of !O_NONBLOCK
757 if (file
->f_flags
& O_NONBLOCK
) {
758 if (!spu_wbox_write(ctx
, wbox_data
))
761 count
= spufs_wait(ctx
->wbox_wq
, spu_wbox_write(ctx
, wbox_data
));
767 /* write as much as possible */
768 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
770 ret
= __get_user(wbox_data
, udata
);
774 ret
= spu_wbox_write(ctx
, wbox_data
);
784 static unsigned int spufs_wbox_poll(struct file
*file
, poll_table
*wait
)
786 struct spu_context
*ctx
= file
->private_data
;
789 poll_wait(file
, &ctx
->wbox_wq
, wait
);
792 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLOUT
| POLLWRNORM
);
798 static const struct file_operations spufs_wbox_fops
= {
799 .open
= spufs_pipe_open
,
800 .write
= spufs_wbox_write
,
801 .poll
= spufs_wbox_poll
,
802 .fasync
= spufs_wbox_fasync
,
805 static ssize_t
spufs_wbox_stat_read(struct file
*file
, char __user
*buf
,
806 size_t len
, loff_t
*pos
)
808 struct spu_context
*ctx
= file
->private_data
;
815 wbox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 8) & 0xff;
818 if (copy_to_user(buf
, &wbox_stat
, sizeof wbox_stat
))
824 static const struct file_operations spufs_wbox_stat_fops
= {
825 .open
= spufs_pipe_open
,
826 .read
= spufs_wbox_stat_read
,
829 static int spufs_signal1_open(struct inode
*inode
, struct file
*file
)
831 struct spufs_inode_info
*i
= SPUFS_I(inode
);
832 struct spu_context
*ctx
= i
->i_ctx
;
834 mutex_lock(&ctx
->mapping_lock
);
835 file
->private_data
= ctx
;
837 ctx
->signal1
= inode
->i_mapping
;
838 mutex_unlock(&ctx
->mapping_lock
);
839 return nonseekable_open(inode
, file
);
843 spufs_signal1_release(struct inode
*inode
, struct file
*file
)
845 struct spufs_inode_info
*i
= SPUFS_I(inode
);
846 struct spu_context
*ctx
= i
->i_ctx
;
848 mutex_lock(&ctx
->mapping_lock
);
851 mutex_unlock(&ctx
->mapping_lock
);
855 static ssize_t
__spufs_signal1_read(struct spu_context
*ctx
, char __user
*buf
,
856 size_t len
, loff_t
*pos
)
864 if (ctx
->csa
.spu_chnlcnt_RW
[3]) {
865 data
= ctx
->csa
.spu_chnldata_RW
[3];
872 if (copy_to_user(buf
, &data
, 4))
879 static ssize_t
spufs_signal1_read(struct file
*file
, char __user
*buf
,
880 size_t len
, loff_t
*pos
)
883 struct spu_context
*ctx
= file
->private_data
;
885 spu_acquire_saved(ctx
);
886 ret
= __spufs_signal1_read(ctx
, buf
, len
, pos
);
887 spu_release_saved(ctx
);
892 static ssize_t
spufs_signal1_write(struct file
*file
, const char __user
*buf
,
893 size_t len
, loff_t
*pos
)
895 struct spu_context
*ctx
;
898 ctx
= file
->private_data
;
903 if (copy_from_user(&data
, buf
, 4))
907 ctx
->ops
->signal1_write(ctx
, data
);
913 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct
*vma
,
914 unsigned long address
)
916 #if PAGE_SIZE == 0x1000
917 return spufs_ps_nopfn(vma
, address
, 0x14000, 0x1000);
918 #elif PAGE_SIZE == 0x10000
919 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
920 * signal 1 and 2 area
922 return spufs_ps_nopfn(vma
, address
, 0x10000, 0x10000);
924 #error unsupported page size
928 static struct vm_operations_struct spufs_signal1_mmap_vmops
= {
929 .nopfn
= spufs_signal1_mmap_nopfn
,
932 static int spufs_signal1_mmap(struct file
*file
, struct vm_area_struct
*vma
)
934 if (!(vma
->vm_flags
& VM_SHARED
))
937 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
938 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
939 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
941 vma
->vm_ops
= &spufs_signal1_mmap_vmops
;
945 static const struct file_operations spufs_signal1_fops
= {
946 .open
= spufs_signal1_open
,
947 .release
= spufs_signal1_release
,
948 .read
= spufs_signal1_read
,
949 .write
= spufs_signal1_write
,
950 .mmap
= spufs_signal1_mmap
,
953 static const struct file_operations spufs_signal1_nosched_fops
= {
954 .open
= spufs_signal1_open
,
955 .release
= spufs_signal1_release
,
956 .write
= spufs_signal1_write
,
957 .mmap
= spufs_signal1_mmap
,
960 static int spufs_signal2_open(struct inode
*inode
, struct file
*file
)
962 struct spufs_inode_info
*i
= SPUFS_I(inode
);
963 struct spu_context
*ctx
= i
->i_ctx
;
965 mutex_lock(&ctx
->mapping_lock
);
966 file
->private_data
= ctx
;
968 ctx
->signal2
= inode
->i_mapping
;
969 mutex_unlock(&ctx
->mapping_lock
);
970 return nonseekable_open(inode
, file
);
974 spufs_signal2_release(struct inode
*inode
, struct file
*file
)
976 struct spufs_inode_info
*i
= SPUFS_I(inode
);
977 struct spu_context
*ctx
= i
->i_ctx
;
979 mutex_lock(&ctx
->mapping_lock
);
982 mutex_unlock(&ctx
->mapping_lock
);
986 static ssize_t
__spufs_signal2_read(struct spu_context
*ctx
, char __user
*buf
,
987 size_t len
, loff_t
*pos
)
995 if (ctx
->csa
.spu_chnlcnt_RW
[4]) {
996 data
= ctx
->csa
.spu_chnldata_RW
[4];
1003 if (copy_to_user(buf
, &data
, 4))
1010 static ssize_t
spufs_signal2_read(struct file
*file
, char __user
*buf
,
1011 size_t len
, loff_t
*pos
)
1013 struct spu_context
*ctx
= file
->private_data
;
1016 spu_acquire_saved(ctx
);
1017 ret
= __spufs_signal2_read(ctx
, buf
, len
, pos
);
1018 spu_release_saved(ctx
);
1023 static ssize_t
spufs_signal2_write(struct file
*file
, const char __user
*buf
,
1024 size_t len
, loff_t
*pos
)
1026 struct spu_context
*ctx
;
1029 ctx
= file
->private_data
;
1034 if (copy_from_user(&data
, buf
, 4))
1038 ctx
->ops
->signal2_write(ctx
, data
);
1045 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct
*vma
,
1046 unsigned long address
)
1048 #if PAGE_SIZE == 0x1000
1049 return spufs_ps_nopfn(vma
, address
, 0x1c000, 0x1000);
1050 #elif PAGE_SIZE == 0x10000
1051 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1052 * signal 1 and 2 area
1054 return spufs_ps_nopfn(vma
, address
, 0x10000, 0x10000);
1056 #error unsupported page size
1060 static struct vm_operations_struct spufs_signal2_mmap_vmops
= {
1061 .nopfn
= spufs_signal2_mmap_nopfn
,
1064 static int spufs_signal2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1066 if (!(vma
->vm_flags
& VM_SHARED
))
1069 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1070 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1071 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1073 vma
->vm_ops
= &spufs_signal2_mmap_vmops
;
1076 #else /* SPUFS_MMAP_4K */
1077 #define spufs_signal2_mmap NULL
1078 #endif /* !SPUFS_MMAP_4K */
1080 static const struct file_operations spufs_signal2_fops
= {
1081 .open
= spufs_signal2_open
,
1082 .release
= spufs_signal2_release
,
1083 .read
= spufs_signal2_read
,
1084 .write
= spufs_signal2_write
,
1085 .mmap
= spufs_signal2_mmap
,
1088 static const struct file_operations spufs_signal2_nosched_fops
= {
1089 .open
= spufs_signal2_open
,
1090 .release
= spufs_signal2_release
,
1091 .write
= spufs_signal2_write
,
1092 .mmap
= spufs_signal2_mmap
,
1096 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1097 * work of acquiring (or not) the SPU context before calling through
1098 * to the actual get routine. The set routine is called directly.
1100 #define SPU_ATTR_NOACQUIRE 0
1101 #define SPU_ATTR_ACQUIRE 1
1102 #define SPU_ATTR_ACQUIRE_SAVED 2
1104 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1105 static u64 __##__get(void *data) \
1107 struct spu_context *ctx = data; \
1110 if (__acquire == SPU_ATTR_ACQUIRE) { \
1114 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1115 spu_acquire_saved(ctx); \
1117 spu_release_saved(ctx); \
1123 DEFINE_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1125 static void spufs_signal1_type_set(void *data
, u64 val
)
1127 struct spu_context
*ctx
= data
;
1130 ctx
->ops
->signal1_type_set(ctx
, val
);
1134 static u64
spufs_signal1_type_get(struct spu_context
*ctx
)
1136 return ctx
->ops
->signal1_type_get(ctx
);
1138 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type
, spufs_signal1_type_get
,
1139 spufs_signal1_type_set
, "%llu", SPU_ATTR_ACQUIRE
);
1142 static void spufs_signal2_type_set(void *data
, u64 val
)
1144 struct spu_context
*ctx
= data
;
1147 ctx
->ops
->signal2_type_set(ctx
, val
);
1151 static u64
spufs_signal2_type_get(struct spu_context
*ctx
)
1153 return ctx
->ops
->signal2_type_get(ctx
);
1155 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type
, spufs_signal2_type_get
,
1156 spufs_signal2_type_set
, "%llu", SPU_ATTR_ACQUIRE
);
1159 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct
*vma
,
1160 unsigned long address
)
1162 return spufs_ps_nopfn(vma
, address
, 0x0000, 0x1000);
1165 static struct vm_operations_struct spufs_mss_mmap_vmops
= {
1166 .nopfn
= spufs_mss_mmap_nopfn
,
1170 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1172 static int spufs_mss_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1174 if (!(vma
->vm_flags
& VM_SHARED
))
1177 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1178 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1179 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1181 vma
->vm_ops
= &spufs_mss_mmap_vmops
;
1184 #else /* SPUFS_MMAP_4K */
1185 #define spufs_mss_mmap NULL
1186 #endif /* !SPUFS_MMAP_4K */
1188 static int spufs_mss_open(struct inode
*inode
, struct file
*file
)
1190 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1191 struct spu_context
*ctx
= i
->i_ctx
;
1193 file
->private_data
= i
->i_ctx
;
1195 mutex_lock(&ctx
->mapping_lock
);
1196 if (!i
->i_openers
++)
1197 ctx
->mss
= inode
->i_mapping
;
1198 mutex_unlock(&ctx
->mapping_lock
);
1199 return nonseekable_open(inode
, file
);
1203 spufs_mss_release(struct inode
*inode
, struct file
*file
)
1205 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1206 struct spu_context
*ctx
= i
->i_ctx
;
1208 mutex_lock(&ctx
->mapping_lock
);
1209 if (!--i
->i_openers
)
1211 mutex_unlock(&ctx
->mapping_lock
);
1215 static const struct file_operations spufs_mss_fops
= {
1216 .open
= spufs_mss_open
,
1217 .release
= spufs_mss_release
,
1218 .mmap
= spufs_mss_mmap
,
1221 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct
*vma
,
1222 unsigned long address
)
1224 return spufs_ps_nopfn(vma
, address
, 0x0000, 0x20000);
1227 static struct vm_operations_struct spufs_psmap_mmap_vmops
= {
1228 .nopfn
= spufs_psmap_mmap_nopfn
,
1232 * mmap support for full problem state area [0x00000 - 0x1ffff].
1234 static int spufs_psmap_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1236 if (!(vma
->vm_flags
& VM_SHARED
))
1239 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1240 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1241 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1243 vma
->vm_ops
= &spufs_psmap_mmap_vmops
;
1247 static int spufs_psmap_open(struct inode
*inode
, struct file
*file
)
1249 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1250 struct spu_context
*ctx
= i
->i_ctx
;
1252 mutex_lock(&ctx
->mapping_lock
);
1253 file
->private_data
= i
->i_ctx
;
1254 if (!i
->i_openers
++)
1255 ctx
->psmap
= inode
->i_mapping
;
1256 mutex_unlock(&ctx
->mapping_lock
);
1257 return nonseekable_open(inode
, file
);
1261 spufs_psmap_release(struct inode
*inode
, struct file
*file
)
1263 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1264 struct spu_context
*ctx
= i
->i_ctx
;
1266 mutex_lock(&ctx
->mapping_lock
);
1267 if (!--i
->i_openers
)
1269 mutex_unlock(&ctx
->mapping_lock
);
1273 static const struct file_operations spufs_psmap_fops
= {
1274 .open
= spufs_psmap_open
,
1275 .release
= spufs_psmap_release
,
1276 .mmap
= spufs_psmap_mmap
,
1281 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct
*vma
,
1282 unsigned long address
)
1284 return spufs_ps_nopfn(vma
, address
, 0x3000, 0x1000);
1287 static struct vm_operations_struct spufs_mfc_mmap_vmops
= {
1288 .nopfn
= spufs_mfc_mmap_nopfn
,
1292 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1294 static int spufs_mfc_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1296 if (!(vma
->vm_flags
& VM_SHARED
))
1299 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1300 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1301 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1303 vma
->vm_ops
= &spufs_mfc_mmap_vmops
;
1306 #else /* SPUFS_MMAP_4K */
1307 #define spufs_mfc_mmap NULL
1308 #endif /* !SPUFS_MMAP_4K */
1310 static int spufs_mfc_open(struct inode
*inode
, struct file
*file
)
1312 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1313 struct spu_context
*ctx
= i
->i_ctx
;
1315 /* we don't want to deal with DMA into other processes */
1316 if (ctx
->owner
!= current
->mm
)
1319 if (atomic_read(&inode
->i_count
) != 1)
1322 mutex_lock(&ctx
->mapping_lock
);
1323 file
->private_data
= ctx
;
1324 if (!i
->i_openers
++)
1325 ctx
->mfc
= inode
->i_mapping
;
1326 mutex_unlock(&ctx
->mapping_lock
);
1327 return nonseekable_open(inode
, file
);
1331 spufs_mfc_release(struct inode
*inode
, struct file
*file
)
1333 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1334 struct spu_context
*ctx
= i
->i_ctx
;
1336 mutex_lock(&ctx
->mapping_lock
);
1337 if (!--i
->i_openers
)
1339 mutex_unlock(&ctx
->mapping_lock
);
1343 /* interrupt-level mfc callback function. */
1344 void spufs_mfc_callback(struct spu
*spu
)
1346 struct spu_context
*ctx
= spu
->ctx
;
1351 wake_up_all(&ctx
->mfc_wq
);
1353 pr_debug("%s %s\n", __FUNCTION__
, spu
->name
);
1354 if (ctx
->mfc_fasync
) {
1355 u32 free_elements
, tagstatus
;
1358 /* no need for spu_acquire in interrupt context */
1359 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1360 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1363 if (free_elements
& 0xffff)
1365 if (tagstatus
& ctx
->tagwait
)
1368 kill_fasync(&ctx
->mfc_fasync
, SIGIO
, mask
);
1372 static int spufs_read_mfc_tagstatus(struct spu_context
*ctx
, u32
*status
)
1374 /* See if there is one tag group is complete */
1375 /* FIXME we need locking around tagwait */
1376 *status
= ctx
->ops
->read_mfc_tagstatus(ctx
) & ctx
->tagwait
;
1377 ctx
->tagwait
&= ~*status
;
1381 /* enable interrupt waiting for any tag group,
1382 may silently fail if interrupts are already enabled */
1383 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1387 static ssize_t
spufs_mfc_read(struct file
*file
, char __user
*buffer
,
1388 size_t size
, loff_t
*pos
)
1390 struct spu_context
*ctx
= file
->private_data
;
1398 if (file
->f_flags
& O_NONBLOCK
) {
1399 status
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1400 if (!(status
& ctx
->tagwait
))
1403 ctx
->tagwait
&= ~status
;
1405 ret
= spufs_wait(ctx
->mfc_wq
,
1406 spufs_read_mfc_tagstatus(ctx
, &status
));
1414 if (copy_to_user(buffer
, &status
, 4))
1421 static int spufs_check_valid_dma(struct mfc_dma_command
*cmd
)
1423 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd
->lsa
,
1424 cmd
->ea
, cmd
->size
, cmd
->tag
, cmd
->cmd
);
1435 pr_debug("invalid DMA opcode %x\n", cmd
->cmd
);
1439 if ((cmd
->lsa
& 0xf) != (cmd
->ea
&0xf)) {
1440 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1445 switch (cmd
->size
& 0xf) {
1466 pr_debug("invalid DMA alignment %x for size %x\n",
1467 cmd
->lsa
& 0xf, cmd
->size
);
1471 if (cmd
->size
> 16 * 1024) {
1472 pr_debug("invalid DMA size %x\n", cmd
->size
);
1476 if (cmd
->tag
& 0xfff0) {
1477 /* we reserve the higher tag numbers for kernel use */
1478 pr_debug("invalid DMA tag\n");
1483 /* not supported in this version */
1484 pr_debug("invalid DMA class\n");
1491 static int spu_send_mfc_command(struct spu_context
*ctx
,
1492 struct mfc_dma_command cmd
,
1495 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1496 if (*error
== -EAGAIN
) {
1497 /* wait for any tag group to complete
1498 so we have space for the new command */
1499 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1500 /* try again, because the queue might be
1502 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1503 if (*error
== -EAGAIN
)
1509 static ssize_t
spufs_mfc_write(struct file
*file
, const char __user
*buffer
,
1510 size_t size
, loff_t
*pos
)
1512 struct spu_context
*ctx
= file
->private_data
;
1513 struct mfc_dma_command cmd
;
1516 if (size
!= sizeof cmd
)
1520 if (copy_from_user(&cmd
, buffer
, sizeof cmd
))
1523 ret
= spufs_check_valid_dma(&cmd
);
1528 ret
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
1532 if (file
->f_flags
& O_NONBLOCK
) {
1533 ret
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1536 ret
= spufs_wait(ctx
->mfc_wq
,
1537 spu_send_mfc_command(ctx
, cmd
, &status
));
1545 ctx
->tagwait
|= 1 << cmd
.tag
;
1554 static unsigned int spufs_mfc_poll(struct file
*file
,poll_table
*wait
)
1556 struct spu_context
*ctx
= file
->private_data
;
1557 u32 free_elements
, tagstatus
;
1560 poll_wait(file
, &ctx
->mfc_wq
, wait
);
1563 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2);
1564 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1565 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1569 if (free_elements
& 0xffff)
1570 mask
|= POLLOUT
| POLLWRNORM
;
1571 if (tagstatus
& ctx
->tagwait
)
1572 mask
|= POLLIN
| POLLRDNORM
;
1574 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__
,
1575 free_elements
, tagstatus
, ctx
->tagwait
);
1580 static int spufs_mfc_flush(struct file
*file
, fl_owner_t id
)
1582 struct spu_context
*ctx
= file
->private_data
;
1587 /* this currently hangs */
1588 ret
= spufs_wait(ctx
->mfc_wq
,
1589 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2));
1592 ret
= spufs_wait(ctx
->mfc_wq
,
1593 ctx
->ops
->read_mfc_tagstatus(ctx
) == ctx
->tagwait
);
1603 static int spufs_mfc_fsync(struct file
*file
, struct dentry
*dentry
,
1606 return spufs_mfc_flush(file
, NULL
);
1609 static int spufs_mfc_fasync(int fd
, struct file
*file
, int on
)
1611 struct spu_context
*ctx
= file
->private_data
;
1613 return fasync_helper(fd
, file
, on
, &ctx
->mfc_fasync
);
1616 static const struct file_operations spufs_mfc_fops
= {
1617 .open
= spufs_mfc_open
,
1618 .release
= spufs_mfc_release
,
1619 .read
= spufs_mfc_read
,
1620 .write
= spufs_mfc_write
,
1621 .poll
= spufs_mfc_poll
,
1622 .flush
= spufs_mfc_flush
,
1623 .fsync
= spufs_mfc_fsync
,
1624 .fasync
= spufs_mfc_fasync
,
1625 .mmap
= spufs_mfc_mmap
,
1628 static void spufs_npc_set(void *data
, u64 val
)
1630 struct spu_context
*ctx
= data
;
1632 ctx
->ops
->npc_write(ctx
, val
);
1636 static u64
spufs_npc_get(struct spu_context
*ctx
)
1638 return ctx
->ops
->npc_read(ctx
);
1640 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops
, spufs_npc_get
, spufs_npc_set
,
1641 "0x%llx\n", SPU_ATTR_ACQUIRE
);
1643 static void spufs_decr_set(void *data
, u64 val
)
1645 struct spu_context
*ctx
= data
;
1646 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1647 spu_acquire_saved(ctx
);
1648 lscsa
->decr
.slot
[0] = (u32
) val
;
1649 spu_release_saved(ctx
);
1652 static u64
spufs_decr_get(struct spu_context
*ctx
)
1654 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1655 return lscsa
->decr
.slot
[0];
1657 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops
, spufs_decr_get
, spufs_decr_set
,
1658 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
);
1660 static void spufs_decr_status_set(void *data
, u64 val
)
1662 struct spu_context
*ctx
= data
;
1663 spu_acquire_saved(ctx
);
1665 ctx
->csa
.priv2
.mfc_control_RW
|= MFC_CNTL_DECREMENTER_RUNNING
;
1667 ctx
->csa
.priv2
.mfc_control_RW
&= ~MFC_CNTL_DECREMENTER_RUNNING
;
1668 spu_release_saved(ctx
);
1671 static u64
spufs_decr_status_get(struct spu_context
*ctx
)
1673 if (ctx
->csa
.priv2
.mfc_control_RW
& MFC_CNTL_DECREMENTER_RUNNING
)
1674 return SPU_DECR_STATUS_RUNNING
;
1678 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops
, spufs_decr_status_get
,
1679 spufs_decr_status_set
, "0x%llx\n",
1680 SPU_ATTR_ACQUIRE_SAVED
);
1682 static void spufs_event_mask_set(void *data
, u64 val
)
1684 struct spu_context
*ctx
= data
;
1685 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1686 spu_acquire_saved(ctx
);
1687 lscsa
->event_mask
.slot
[0] = (u32
) val
;
1688 spu_release_saved(ctx
);
1691 static u64
spufs_event_mask_get(struct spu_context
*ctx
)
1693 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1694 return lscsa
->event_mask
.slot
[0];
1697 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops
, spufs_event_mask_get
,
1698 spufs_event_mask_set
, "0x%llx\n",
1699 SPU_ATTR_ACQUIRE_SAVED
);
1701 static u64
spufs_event_status_get(struct spu_context
*ctx
)
1703 struct spu_state
*state
= &ctx
->csa
;
1705 stat
= state
->spu_chnlcnt_RW
[0];
1707 return state
->spu_chnldata_RW
[0];
1710 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops
, spufs_event_status_get
,
1711 NULL
, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1713 static void spufs_srr0_set(void *data
, u64 val
)
1715 struct spu_context
*ctx
= data
;
1716 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1717 spu_acquire_saved(ctx
);
1718 lscsa
->srr0
.slot
[0] = (u32
) val
;
1719 spu_release_saved(ctx
);
1722 static u64
spufs_srr0_get(struct spu_context
*ctx
)
1724 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1725 return lscsa
->srr0
.slot
[0];
1727 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops
, spufs_srr0_get
, spufs_srr0_set
,
1728 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1730 static u64
spufs_id_get(struct spu_context
*ctx
)
1734 if (ctx
->state
== SPU_STATE_RUNNABLE
)
1735 num
= ctx
->spu
->number
;
1737 num
= (unsigned int)-1;
1741 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops
, spufs_id_get
, NULL
, "0x%llx\n",
1744 static u64
spufs_object_id_get(struct spu_context
*ctx
)
1746 /* FIXME: Should there really be no locking here? */
1747 return ctx
->object_id
;
1750 static void spufs_object_id_set(void *data
, u64 id
)
1752 struct spu_context
*ctx
= data
;
1753 ctx
->object_id
= id
;
1756 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops
, spufs_object_id_get
,
1757 spufs_object_id_set
, "0x%llx\n", SPU_ATTR_NOACQUIRE
);
1759 static u64
spufs_lslr_get(struct spu_context
*ctx
)
1761 return ctx
->csa
.priv2
.spu_lslr_RW
;
1763 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops
, spufs_lslr_get
, NULL
, "0x%llx\n",
1764 SPU_ATTR_ACQUIRE_SAVED
);
1766 static int spufs_info_open(struct inode
*inode
, struct file
*file
)
1768 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1769 struct spu_context
*ctx
= i
->i_ctx
;
1770 file
->private_data
= ctx
;
1774 static int spufs_caps_show(struct seq_file
*s
, void *private)
1776 struct spu_context
*ctx
= s
->private;
1778 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
))
1779 seq_puts(s
, "sched\n");
1780 if (!(ctx
->flags
& SPU_CREATE_ISOLATE
))
1781 seq_puts(s
, "step\n");
1785 static int spufs_caps_open(struct inode
*inode
, struct file
*file
)
1787 return single_open(file
, spufs_caps_show
, SPUFS_I(inode
)->i_ctx
);
1790 static const struct file_operations spufs_caps_fops
= {
1791 .open
= spufs_caps_open
,
1793 .llseek
= seq_lseek
,
1794 .release
= single_release
,
1797 static ssize_t
__spufs_mbox_info_read(struct spu_context
*ctx
,
1798 char __user
*buf
, size_t len
, loff_t
*pos
)
1803 mbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
1804 if (mbox_stat
& 0x0000ff) {
1805 data
= ctx
->csa
.prob
.pu_mb_R
;
1808 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
1811 static ssize_t
spufs_mbox_info_read(struct file
*file
, char __user
*buf
,
1812 size_t len
, loff_t
*pos
)
1815 struct spu_context
*ctx
= file
->private_data
;
1817 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1820 spu_acquire_saved(ctx
);
1821 spin_lock(&ctx
->csa
.register_lock
);
1822 ret
= __spufs_mbox_info_read(ctx
, buf
, len
, pos
);
1823 spin_unlock(&ctx
->csa
.register_lock
);
1824 spu_release_saved(ctx
);
1829 static const struct file_operations spufs_mbox_info_fops
= {
1830 .open
= spufs_info_open
,
1831 .read
= spufs_mbox_info_read
,
1832 .llseek
= generic_file_llseek
,
1835 static ssize_t
__spufs_ibox_info_read(struct spu_context
*ctx
,
1836 char __user
*buf
, size_t len
, loff_t
*pos
)
1841 ibox_stat
= ctx
->csa
.prob
.mb_stat_R
;
1842 if (ibox_stat
& 0xff0000) {
1843 data
= ctx
->csa
.priv2
.puint_mb_R
;
1846 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
1849 static ssize_t
spufs_ibox_info_read(struct file
*file
, char __user
*buf
,
1850 size_t len
, loff_t
*pos
)
1852 struct spu_context
*ctx
= file
->private_data
;
1855 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1858 spu_acquire_saved(ctx
);
1859 spin_lock(&ctx
->csa
.register_lock
);
1860 ret
= __spufs_ibox_info_read(ctx
, buf
, len
, pos
);
1861 spin_unlock(&ctx
->csa
.register_lock
);
1862 spu_release_saved(ctx
);
1867 static const struct file_operations spufs_ibox_info_fops
= {
1868 .open
= spufs_info_open
,
1869 .read
= spufs_ibox_info_read
,
1870 .llseek
= generic_file_llseek
,
1873 static ssize_t
__spufs_wbox_info_read(struct spu_context
*ctx
,
1874 char __user
*buf
, size_t len
, loff_t
*pos
)
1880 wbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
1881 cnt
= 4 - ((wbox_stat
& 0x00ff00) >> 8);
1882 for (i
= 0; i
< cnt
; i
++) {
1883 data
[i
] = ctx
->csa
.spu_mailbox_data
[i
];
1886 return simple_read_from_buffer(buf
, len
, pos
, &data
,
1890 static ssize_t
spufs_wbox_info_read(struct file
*file
, char __user
*buf
,
1891 size_t len
, loff_t
*pos
)
1893 struct spu_context
*ctx
= file
->private_data
;
1896 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1899 spu_acquire_saved(ctx
);
1900 spin_lock(&ctx
->csa
.register_lock
);
1901 ret
= __spufs_wbox_info_read(ctx
, buf
, len
, pos
);
1902 spin_unlock(&ctx
->csa
.register_lock
);
1903 spu_release_saved(ctx
);
1908 static const struct file_operations spufs_wbox_info_fops
= {
1909 .open
= spufs_info_open
,
1910 .read
= spufs_wbox_info_read
,
1911 .llseek
= generic_file_llseek
,
1914 static ssize_t
__spufs_dma_info_read(struct spu_context
*ctx
,
1915 char __user
*buf
, size_t len
, loff_t
*pos
)
1917 struct spu_dma_info info
;
1918 struct mfc_cq_sr
*qp
, *spuqp
;
1921 info
.dma_info_type
= ctx
->csa
.priv2
.spu_tag_status_query_RW
;
1922 info
.dma_info_mask
= ctx
->csa
.lscsa
->tag_mask
.slot
[0];
1923 info
.dma_info_status
= ctx
->csa
.spu_chnldata_RW
[24];
1924 info
.dma_info_stall_and_notify
= ctx
->csa
.spu_chnldata_RW
[25];
1925 info
.dma_info_atomic_command_status
= ctx
->csa
.spu_chnldata_RW
[27];
1926 for (i
= 0; i
< 16; i
++) {
1927 qp
= &info
.dma_info_command_data
[i
];
1928 spuqp
= &ctx
->csa
.priv2
.spuq
[i
];
1930 qp
->mfc_cq_data0_RW
= spuqp
->mfc_cq_data0_RW
;
1931 qp
->mfc_cq_data1_RW
= spuqp
->mfc_cq_data1_RW
;
1932 qp
->mfc_cq_data2_RW
= spuqp
->mfc_cq_data2_RW
;
1933 qp
->mfc_cq_data3_RW
= spuqp
->mfc_cq_data3_RW
;
1936 return simple_read_from_buffer(buf
, len
, pos
, &info
,
1940 static ssize_t
spufs_dma_info_read(struct file
*file
, char __user
*buf
,
1941 size_t len
, loff_t
*pos
)
1943 struct spu_context
*ctx
= file
->private_data
;
1946 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1949 spu_acquire_saved(ctx
);
1950 spin_lock(&ctx
->csa
.register_lock
);
1951 ret
= __spufs_dma_info_read(ctx
, buf
, len
, pos
);
1952 spin_unlock(&ctx
->csa
.register_lock
);
1953 spu_release_saved(ctx
);
1958 static const struct file_operations spufs_dma_info_fops
= {
1959 .open
= spufs_info_open
,
1960 .read
= spufs_dma_info_read
,
1963 static ssize_t
__spufs_proxydma_info_read(struct spu_context
*ctx
,
1964 char __user
*buf
, size_t len
, loff_t
*pos
)
1966 struct spu_proxydma_info info
;
1967 struct mfc_cq_sr
*qp
, *puqp
;
1968 int ret
= sizeof info
;
1974 if (!access_ok(VERIFY_WRITE
, buf
, len
))
1977 info
.proxydma_info_type
= ctx
->csa
.prob
.dma_querytype_RW
;
1978 info
.proxydma_info_mask
= ctx
->csa
.prob
.dma_querymask_RW
;
1979 info
.proxydma_info_status
= ctx
->csa
.prob
.dma_tagstatus_R
;
1980 for (i
= 0; i
< 8; i
++) {
1981 qp
= &info
.proxydma_info_command_data
[i
];
1982 puqp
= &ctx
->csa
.priv2
.puq
[i
];
1984 qp
->mfc_cq_data0_RW
= puqp
->mfc_cq_data0_RW
;
1985 qp
->mfc_cq_data1_RW
= puqp
->mfc_cq_data1_RW
;
1986 qp
->mfc_cq_data2_RW
= puqp
->mfc_cq_data2_RW
;
1987 qp
->mfc_cq_data3_RW
= puqp
->mfc_cq_data3_RW
;
1990 return simple_read_from_buffer(buf
, len
, pos
, &info
,
1994 static ssize_t
spufs_proxydma_info_read(struct file
*file
, char __user
*buf
,
1995 size_t len
, loff_t
*pos
)
1997 struct spu_context
*ctx
= file
->private_data
;
2000 spu_acquire_saved(ctx
);
2001 spin_lock(&ctx
->csa
.register_lock
);
2002 ret
= __spufs_proxydma_info_read(ctx
, buf
, len
, pos
);
2003 spin_unlock(&ctx
->csa
.register_lock
);
2004 spu_release_saved(ctx
);
2009 static const struct file_operations spufs_proxydma_info_fops
= {
2010 .open
= spufs_info_open
,
2011 .read
= spufs_proxydma_info_read
,
2014 static int spufs_show_tid(struct seq_file
*s
, void *private)
2016 struct spu_context
*ctx
= s
->private;
2018 seq_printf(s
, "%d\n", ctx
->tid
);
2022 static int spufs_tid_open(struct inode
*inode
, struct file
*file
)
2024 return single_open(file
, spufs_show_tid
, SPUFS_I(inode
)->i_ctx
);
2027 static const struct file_operations spufs_tid_fops
= {
2028 .open
= spufs_tid_open
,
2030 .llseek
= seq_lseek
,
2031 .release
= single_release
,
2034 static const char *ctx_state_names
[] = {
2035 "user", "system", "iowait", "loaded"
2038 static unsigned long long spufs_acct_time(struct spu_context
*ctx
,
2039 enum spu_utilization_state state
)
2042 unsigned long long time
= ctx
->stats
.times
[state
];
2045 * In general, utilization statistics are updated by the controlling
2046 * thread as the spu context moves through various well defined
2047 * state transitions, but if the context is lazily loaded its
2048 * utilization statistics are not updated as the controlling thread
2049 * is not tightly coupled with the execution of the spu context. We
2050 * calculate and apply the time delta from the last recorded state
2051 * of the spu context.
2053 if (ctx
->spu
&& ctx
->stats
.util_state
== state
) {
2055 time
+= timespec_to_ns(&ts
) - ctx
->stats
.tstamp
;
2058 return time
/ NSEC_PER_MSEC
;
2061 static unsigned long long spufs_slb_flts(struct spu_context
*ctx
)
2063 unsigned long long slb_flts
= ctx
->stats
.slb_flt
;
2065 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2066 slb_flts
+= (ctx
->spu
->stats
.slb_flt
-
2067 ctx
->stats
.slb_flt_base
);
2073 static unsigned long long spufs_class2_intrs(struct spu_context
*ctx
)
2075 unsigned long long class2_intrs
= ctx
->stats
.class2_intr
;
2077 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2078 class2_intrs
+= (ctx
->spu
->stats
.class2_intr
-
2079 ctx
->stats
.class2_intr_base
);
2082 return class2_intrs
;
2086 static int spufs_show_stat(struct seq_file
*s
, void *private)
2088 struct spu_context
*ctx
= s
->private;
2091 seq_printf(s
, "%s %llu %llu %llu %llu "
2092 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2093 ctx_state_names
[ctx
->stats
.util_state
],
2094 spufs_acct_time(ctx
, SPU_UTIL_USER
),
2095 spufs_acct_time(ctx
, SPU_UTIL_SYSTEM
),
2096 spufs_acct_time(ctx
, SPU_UTIL_IOWAIT
),
2097 spufs_acct_time(ctx
, SPU_UTIL_IDLE_LOADED
),
2098 ctx
->stats
.vol_ctx_switch
,
2099 ctx
->stats
.invol_ctx_switch
,
2100 spufs_slb_flts(ctx
),
2101 ctx
->stats
.hash_flt
,
2104 spufs_class2_intrs(ctx
),
2105 ctx
->stats
.libassist
);
2110 static int spufs_stat_open(struct inode
*inode
, struct file
*file
)
2112 return single_open(file
, spufs_show_stat
, SPUFS_I(inode
)->i_ctx
);
2115 static const struct file_operations spufs_stat_fops
= {
2116 .open
= spufs_stat_open
,
2118 .llseek
= seq_lseek
,
2119 .release
= single_release
,
2123 struct tree_descr spufs_dir_contents
[] = {
2124 { "capabilities", &spufs_caps_fops
, 0444, },
2125 { "mem", &spufs_mem_fops
, 0666, },
2126 { "regs", &spufs_regs_fops
, 0666, },
2127 { "mbox", &spufs_mbox_fops
, 0444, },
2128 { "ibox", &spufs_ibox_fops
, 0444, },
2129 { "wbox", &spufs_wbox_fops
, 0222, },
2130 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, },
2131 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, },
2132 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, },
2133 { "signal1", &spufs_signal1_fops
, 0666, },
2134 { "signal2", &spufs_signal2_fops
, 0666, },
2135 { "signal1_type", &spufs_signal1_type
, 0666, },
2136 { "signal2_type", &spufs_signal2_type
, 0666, },
2137 { "cntl", &spufs_cntl_fops
, 0666, },
2138 { "fpcr", &spufs_fpcr_fops
, 0666, },
2139 { "lslr", &spufs_lslr_ops
, 0444, },
2140 { "mfc", &spufs_mfc_fops
, 0666, },
2141 { "mss", &spufs_mss_fops
, 0666, },
2142 { "npc", &spufs_npc_ops
, 0666, },
2143 { "srr0", &spufs_srr0_ops
, 0666, },
2144 { "decr", &spufs_decr_ops
, 0666, },
2145 { "decr_status", &spufs_decr_status_ops
, 0666, },
2146 { "event_mask", &spufs_event_mask_ops
, 0666, },
2147 { "event_status", &spufs_event_status_ops
, 0444, },
2148 { "psmap", &spufs_psmap_fops
, 0666, },
2149 { "phys-id", &spufs_id_ops
, 0666, },
2150 { "object-id", &spufs_object_id_ops
, 0666, },
2151 { "mbox_info", &spufs_mbox_info_fops
, 0444, },
2152 { "ibox_info", &spufs_ibox_info_fops
, 0444, },
2153 { "wbox_info", &spufs_wbox_info_fops
, 0444, },
2154 { "dma_info", &spufs_dma_info_fops
, 0444, },
2155 { "proxydma_info", &spufs_proxydma_info_fops
, 0444, },
2156 { "tid", &spufs_tid_fops
, 0444, },
2157 { "stat", &spufs_stat_fops
, 0444, },
2161 struct tree_descr spufs_dir_nosched_contents
[] = {
2162 { "capabilities", &spufs_caps_fops
, 0444, },
2163 { "mem", &spufs_mem_fops
, 0666, },
2164 { "mbox", &spufs_mbox_fops
, 0444, },
2165 { "ibox", &spufs_ibox_fops
, 0444, },
2166 { "wbox", &spufs_wbox_fops
, 0222, },
2167 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, },
2168 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, },
2169 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, },
2170 { "signal1", &spufs_signal1_nosched_fops
, 0222, },
2171 { "signal2", &spufs_signal2_nosched_fops
, 0222, },
2172 { "signal1_type", &spufs_signal1_type
, 0666, },
2173 { "signal2_type", &spufs_signal2_type
, 0666, },
2174 { "mss", &spufs_mss_fops
, 0666, },
2175 { "mfc", &spufs_mfc_fops
, 0666, },
2176 { "cntl", &spufs_cntl_fops
, 0666, },
2177 { "npc", &spufs_npc_ops
, 0666, },
2178 { "psmap", &spufs_psmap_fops
, 0666, },
2179 { "phys-id", &spufs_id_ops
, 0666, },
2180 { "object-id", &spufs_object_id_ops
, 0666, },
2181 { "tid", &spufs_tid_fops
, 0444, },
2182 { "stat", &spufs_stat_fops
, 0444, },
2186 struct spufs_coredump_reader spufs_coredump_read
[] = {
2187 { "regs", __spufs_regs_read
, NULL
, sizeof(struct spu_reg128
[128])},
2188 { "fpcr", __spufs_fpcr_read
, NULL
, sizeof(struct spu_reg128
) },
2189 { "lslr", NULL
, spufs_lslr_get
, 19 },
2190 { "decr", NULL
, spufs_decr_get
, 19 },
2191 { "decr_status", NULL
, spufs_decr_status_get
, 19 },
2192 { "mem", __spufs_mem_read
, NULL
, LS_SIZE
, },
2193 { "signal1", __spufs_signal1_read
, NULL
, sizeof(u32
) },
2194 { "signal1_type", NULL
, spufs_signal1_type_get
, 19 },
2195 { "signal2", __spufs_signal2_read
, NULL
, sizeof(u32
) },
2196 { "signal2_type", NULL
, spufs_signal2_type_get
, 19 },
2197 { "event_mask", NULL
, spufs_event_mask_get
, 19 },
2198 { "event_status", NULL
, spufs_event_status_get
, 19 },
2199 { "mbox_info", __spufs_mbox_info_read
, NULL
, sizeof(u32
) },
2200 { "ibox_info", __spufs_ibox_info_read
, NULL
, sizeof(u32
) },
2201 { "wbox_info", __spufs_wbox_info_read
, NULL
, 4 * sizeof(u32
)},
2202 { "dma_info", __spufs_dma_info_read
, NULL
, sizeof(struct spu_dma_info
)},
2203 { "proxydma_info", __spufs_proxydma_info_read
,
2204 NULL
, sizeof(struct spu_proxydma_info
)},
2205 { "object-id", NULL
, spufs_object_id_get
, 19 },
2206 { "npc", NULL
, spufs_npc_get
, 19 },