[POWERPC] spufs: clear mapping pointers after last close
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / powerpc / platforms / cell / spufs / file.c
blobdeb340e6e0ae618b40fb826c9a58782217ba0397
1 /*
2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #undef DEBUG
25 #include <linux/fs.h>
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
32 #include <asm/io.h>
33 #include <asm/semaphore.h>
34 #include <asm/spu.h>
35 #include <asm/spu_info.h>
36 #include <asm/uaccess.h>
38 #include "spufs.h"
40 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
42 static int
43 spufs_mem_open(struct inode *inode, struct file *file)
45 struct spufs_inode_info *i = SPUFS_I(inode);
46 struct spu_context *ctx = i->i_ctx;
48 spin_lock(&ctx->mapping_lock);
49 file->private_data = ctx;
50 if (!i->i_openers++)
51 ctx->local_store = inode->i_mapping;
52 spin_unlock(&ctx->mapping_lock);
53 smp_wmb();
54 return 0;
57 static int
58 spufs_mem_release(struct inode *inode, struct file *file)
60 struct spufs_inode_info *i = SPUFS_I(inode);
61 struct spu_context *ctx = i->i_ctx;
63 spin_lock(&ctx->mapping_lock);
64 if (!--i->i_openers)
65 ctx->local_store = NULL;
66 spin_unlock(&ctx->mapping_lock);
67 smp_wmb();
68 return 0;
71 static ssize_t
72 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
73 size_t size, loff_t *pos)
75 char *local_store = ctx->ops->get_ls(ctx);
76 return simple_read_from_buffer(buffer, size, pos, local_store,
77 LS_SIZE);
80 static ssize_t
81 spufs_mem_read(struct file *file, char __user *buffer,
82 size_t size, loff_t *pos)
84 struct spu_context *ctx = file->private_data;
85 ssize_t ret;
87 spu_acquire(ctx);
88 ret = __spufs_mem_read(ctx, buffer, size, pos);
89 spu_release(ctx);
90 return ret;
93 static ssize_t
94 spufs_mem_write(struct file *file, const char __user *buffer,
95 size_t size, loff_t *ppos)
97 struct spu_context *ctx = file->private_data;
98 char *local_store;
99 loff_t pos = *ppos;
100 int ret;
102 if (pos < 0)
103 return -EINVAL;
104 if (pos > LS_SIZE)
105 return -EFBIG;
106 if (size > LS_SIZE - pos)
107 size = LS_SIZE - pos;
109 spu_acquire(ctx);
110 local_store = ctx->ops->get_ls(ctx);
111 ret = copy_from_user(local_store + pos, buffer, size);
112 spu_release(ctx);
114 if (ret)
115 return -EFAULT;
116 *ppos = pos + size;
117 return size;
120 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
121 unsigned long address)
123 struct spu_context *ctx = vma->vm_file->private_data;
124 unsigned long pfn, offset = address - vma->vm_start;
126 offset += vma->vm_pgoff << PAGE_SHIFT;
128 if (offset >= LS_SIZE)
129 return NOPFN_SIGBUS;
131 spu_acquire(ctx);
133 if (ctx->state == SPU_STATE_SAVED) {
134 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
135 & ~_PAGE_NO_CACHE);
136 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
137 } else {
138 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
139 | _PAGE_NO_CACHE);
140 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
142 vm_insert_pfn(vma, address, pfn);
144 spu_release(ctx);
146 return NOPFN_REFAULT;
150 static struct vm_operations_struct spufs_mem_mmap_vmops = {
151 .nopfn = spufs_mem_mmap_nopfn,
154 static int
155 spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
157 if (!(vma->vm_flags & VM_SHARED))
158 return -EINVAL;
160 vma->vm_flags |= VM_IO | VM_PFNMAP;
161 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
162 | _PAGE_NO_CACHE);
164 vma->vm_ops = &spufs_mem_mmap_vmops;
165 return 0;
168 static const struct file_operations spufs_mem_fops = {
169 .open = spufs_mem_open,
170 .release = spufs_mem_release,
171 .read = spufs_mem_read,
172 .write = spufs_mem_write,
173 .llseek = generic_file_llseek,
174 .mmap = spufs_mem_mmap,
177 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
178 unsigned long address,
179 unsigned long ps_offs,
180 unsigned long ps_size)
182 struct spu_context *ctx = vma->vm_file->private_data;
183 unsigned long area, offset = address - vma->vm_start;
184 int ret;
186 offset += vma->vm_pgoff << PAGE_SHIFT;
187 if (offset >= ps_size)
188 return NOPFN_SIGBUS;
190 /* error here usually means a signal.. we might want to test
191 * the error code more precisely though
193 ret = spu_acquire_runnable(ctx, 0);
194 if (ret)
195 return NOPFN_REFAULT;
197 area = ctx->spu->problem_phys + ps_offs;
198 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
199 spu_release(ctx);
201 return NOPFN_REFAULT;
204 #if SPUFS_MMAP_4K
205 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
206 unsigned long address)
208 return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
211 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
212 .nopfn = spufs_cntl_mmap_nopfn,
216 * mmap support for problem state control area [0x4000 - 0x4fff].
218 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
220 if (!(vma->vm_flags & VM_SHARED))
221 return -EINVAL;
223 vma->vm_flags |= VM_IO | VM_PFNMAP;
224 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
225 | _PAGE_NO_CACHE | _PAGE_GUARDED);
227 vma->vm_ops = &spufs_cntl_mmap_vmops;
228 return 0;
230 #else /* SPUFS_MMAP_4K */
231 #define spufs_cntl_mmap NULL
232 #endif /* !SPUFS_MMAP_4K */
234 static u64 spufs_cntl_get(void *data)
236 struct spu_context *ctx = data;
237 u64 val;
239 spu_acquire(ctx);
240 val = ctx->ops->status_read(ctx);
241 spu_release(ctx);
243 return val;
246 static void spufs_cntl_set(void *data, u64 val)
248 struct spu_context *ctx = data;
250 spu_acquire(ctx);
251 ctx->ops->runcntl_write(ctx, val);
252 spu_release(ctx);
255 static int spufs_cntl_open(struct inode *inode, struct file *file)
257 struct spufs_inode_info *i = SPUFS_I(inode);
258 struct spu_context *ctx = i->i_ctx;
260 spin_lock(&ctx->mapping_lock);
261 file->private_data = ctx;
262 if (!i->i_openers++)
263 ctx->cntl = inode->i_mapping;
264 spin_unlock(&ctx->mapping_lock);
265 smp_wmb();
266 return simple_attr_open(inode, file, spufs_cntl_get,
267 spufs_cntl_set, "0x%08lx");
270 static int
271 spufs_cntl_release(struct inode *inode, struct file *file)
273 struct spufs_inode_info *i = SPUFS_I(inode);
274 struct spu_context *ctx = i->i_ctx;
276 simple_attr_close(inode, file);
278 spin_lock(&ctx->mapping_lock);
279 if (!--i->i_openers)
280 ctx->cntl = NULL;
281 spin_unlock(&ctx->mapping_lock);
282 smp_wmb();
283 return 0;
286 static const struct file_operations spufs_cntl_fops = {
287 .open = spufs_cntl_open,
288 .release = spufs_cntl_release,
289 .read = simple_attr_read,
290 .write = simple_attr_write,
291 .mmap = spufs_cntl_mmap,
294 static int
295 spufs_regs_open(struct inode *inode, struct file *file)
297 struct spufs_inode_info *i = SPUFS_I(inode);
298 file->private_data = i->i_ctx;
299 return 0;
302 static ssize_t
303 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
304 size_t size, loff_t *pos)
306 struct spu_lscsa *lscsa = ctx->csa.lscsa;
307 return simple_read_from_buffer(buffer, size, pos,
308 lscsa->gprs, sizeof lscsa->gprs);
311 static ssize_t
312 spufs_regs_read(struct file *file, char __user *buffer,
313 size_t size, loff_t *pos)
315 int ret;
316 struct spu_context *ctx = file->private_data;
318 spu_acquire_saved(ctx);
319 ret = __spufs_regs_read(ctx, buffer, size, pos);
320 spu_release(ctx);
321 return ret;
324 static ssize_t
325 spufs_regs_write(struct file *file, const char __user *buffer,
326 size_t size, loff_t *pos)
328 struct spu_context *ctx = file->private_data;
329 struct spu_lscsa *lscsa = ctx->csa.lscsa;
330 int ret;
332 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
333 if (size <= 0)
334 return -EFBIG;
335 *pos += size;
337 spu_acquire_saved(ctx);
339 ret = copy_from_user(lscsa->gprs + *pos - size,
340 buffer, size) ? -EFAULT : size;
342 spu_release(ctx);
343 return ret;
346 static const struct file_operations spufs_regs_fops = {
347 .open = spufs_regs_open,
348 .read = spufs_regs_read,
349 .write = spufs_regs_write,
350 .llseek = generic_file_llseek,
353 static ssize_t
354 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
355 size_t size, loff_t * pos)
357 struct spu_lscsa *lscsa = ctx->csa.lscsa;
358 return simple_read_from_buffer(buffer, size, pos,
359 &lscsa->fpcr, sizeof(lscsa->fpcr));
362 static ssize_t
363 spufs_fpcr_read(struct file *file, char __user * buffer,
364 size_t size, loff_t * pos)
366 int ret;
367 struct spu_context *ctx = file->private_data;
369 spu_acquire_saved(ctx);
370 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
371 spu_release(ctx);
372 return ret;
375 static ssize_t
376 spufs_fpcr_write(struct file *file, const char __user * buffer,
377 size_t size, loff_t * pos)
379 struct spu_context *ctx = file->private_data;
380 struct spu_lscsa *lscsa = ctx->csa.lscsa;
381 int ret;
383 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
384 if (size <= 0)
385 return -EFBIG;
386 *pos += size;
388 spu_acquire_saved(ctx);
390 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
391 buffer, size) ? -EFAULT : size;
393 spu_release(ctx);
394 return ret;
397 static const struct file_operations spufs_fpcr_fops = {
398 .open = spufs_regs_open,
399 .read = spufs_fpcr_read,
400 .write = spufs_fpcr_write,
401 .llseek = generic_file_llseek,
404 /* generic open function for all pipe-like files */
405 static int spufs_pipe_open(struct inode *inode, struct file *file)
407 struct spufs_inode_info *i = SPUFS_I(inode);
408 file->private_data = i->i_ctx;
410 return nonseekable_open(inode, file);
414 * Read as many bytes from the mailbox as possible, until
415 * one of the conditions becomes true:
417 * - no more data available in the mailbox
418 * - end of the user provided buffer
419 * - end of the mapped area
421 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
422 size_t len, loff_t *pos)
424 struct spu_context *ctx = file->private_data;
425 u32 mbox_data, __user *udata;
426 ssize_t count;
428 if (len < 4)
429 return -EINVAL;
431 if (!access_ok(VERIFY_WRITE, buf, len))
432 return -EFAULT;
434 udata = (void __user *)buf;
436 spu_acquire(ctx);
437 for (count = 0; (count + 4) <= len; count += 4, udata++) {
438 int ret;
439 ret = ctx->ops->mbox_read(ctx, &mbox_data);
440 if (ret == 0)
441 break;
444 * at the end of the mapped area, we can fault
445 * but still need to return the data we have
446 * read successfully so far.
448 ret = __put_user(mbox_data, udata);
449 if (ret) {
450 if (!count)
451 count = -EFAULT;
452 break;
455 spu_release(ctx);
457 if (!count)
458 count = -EAGAIN;
460 return count;
463 static const struct file_operations spufs_mbox_fops = {
464 .open = spufs_pipe_open,
465 .read = spufs_mbox_read,
468 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
469 size_t len, loff_t *pos)
471 struct spu_context *ctx = file->private_data;
472 u32 mbox_stat;
474 if (len < 4)
475 return -EINVAL;
477 spu_acquire(ctx);
479 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
481 spu_release(ctx);
483 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
484 return -EFAULT;
486 return 4;
489 static const struct file_operations spufs_mbox_stat_fops = {
490 .open = spufs_pipe_open,
491 .read = spufs_mbox_stat_read,
494 /* low-level ibox access function */
495 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
497 return ctx->ops->ibox_read(ctx, data);
500 static int spufs_ibox_fasync(int fd, struct file *file, int on)
502 struct spu_context *ctx = file->private_data;
504 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
507 /* interrupt-level ibox callback function. */
508 void spufs_ibox_callback(struct spu *spu)
510 struct spu_context *ctx = spu->ctx;
512 wake_up_all(&ctx->ibox_wq);
513 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
517 * Read as many bytes from the interrupt mailbox as possible, until
518 * one of the conditions becomes true:
520 * - no more data available in the mailbox
521 * - end of the user provided buffer
522 * - end of the mapped area
524 * If the file is opened without O_NONBLOCK, we wait here until
525 * any data is available, but return when we have been able to
526 * read something.
528 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
529 size_t len, loff_t *pos)
531 struct spu_context *ctx = file->private_data;
532 u32 ibox_data, __user *udata;
533 ssize_t count;
535 if (len < 4)
536 return -EINVAL;
538 if (!access_ok(VERIFY_WRITE, buf, len))
539 return -EFAULT;
541 udata = (void __user *)buf;
543 spu_acquire(ctx);
545 /* wait only for the first element */
546 count = 0;
547 if (file->f_flags & O_NONBLOCK) {
548 if (!spu_ibox_read(ctx, &ibox_data))
549 count = -EAGAIN;
550 } else {
551 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
553 if (count)
554 goto out;
556 /* if we can't write at all, return -EFAULT */
557 count = __put_user(ibox_data, udata);
558 if (count)
559 goto out;
561 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
562 int ret;
563 ret = ctx->ops->ibox_read(ctx, &ibox_data);
564 if (ret == 0)
565 break;
567 * at the end of the mapped area, we can fault
568 * but still need to return the data we have
569 * read successfully so far.
571 ret = __put_user(ibox_data, udata);
572 if (ret)
573 break;
576 out:
577 spu_release(ctx);
579 return count;
582 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
584 struct spu_context *ctx = file->private_data;
585 unsigned int mask;
587 poll_wait(file, &ctx->ibox_wq, wait);
589 spu_acquire(ctx);
590 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
591 spu_release(ctx);
593 return mask;
596 static const struct file_operations spufs_ibox_fops = {
597 .open = spufs_pipe_open,
598 .read = spufs_ibox_read,
599 .poll = spufs_ibox_poll,
600 .fasync = spufs_ibox_fasync,
603 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
604 size_t len, loff_t *pos)
606 struct spu_context *ctx = file->private_data;
607 u32 ibox_stat;
609 if (len < 4)
610 return -EINVAL;
612 spu_acquire(ctx);
613 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
614 spu_release(ctx);
616 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
617 return -EFAULT;
619 return 4;
622 static const struct file_operations spufs_ibox_stat_fops = {
623 .open = spufs_pipe_open,
624 .read = spufs_ibox_stat_read,
627 /* low-level mailbox write */
628 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
630 return ctx->ops->wbox_write(ctx, data);
633 static int spufs_wbox_fasync(int fd, struct file *file, int on)
635 struct spu_context *ctx = file->private_data;
636 int ret;
638 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
640 return ret;
643 /* interrupt-level wbox callback function. */
644 void spufs_wbox_callback(struct spu *spu)
646 struct spu_context *ctx = spu->ctx;
648 wake_up_all(&ctx->wbox_wq);
649 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
653 * Write as many bytes to the interrupt mailbox as possible, until
654 * one of the conditions becomes true:
656 * - the mailbox is full
657 * - end of the user provided buffer
658 * - end of the mapped area
660 * If the file is opened without O_NONBLOCK, we wait here until
661 * space is availabyl, but return when we have been able to
662 * write something.
664 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
665 size_t len, loff_t *pos)
667 struct spu_context *ctx = file->private_data;
668 u32 wbox_data, __user *udata;
669 ssize_t count;
671 if (len < 4)
672 return -EINVAL;
674 udata = (void __user *)buf;
675 if (!access_ok(VERIFY_READ, buf, len))
676 return -EFAULT;
678 if (__get_user(wbox_data, udata))
679 return -EFAULT;
681 spu_acquire(ctx);
684 * make sure we can at least write one element, by waiting
685 * in case of !O_NONBLOCK
687 count = 0;
688 if (file->f_flags & O_NONBLOCK) {
689 if (!spu_wbox_write(ctx, wbox_data))
690 count = -EAGAIN;
691 } else {
692 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
695 if (count)
696 goto out;
698 /* write aѕ much as possible */
699 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
700 int ret;
701 ret = __get_user(wbox_data, udata);
702 if (ret)
703 break;
705 ret = spu_wbox_write(ctx, wbox_data);
706 if (ret == 0)
707 break;
710 out:
711 spu_release(ctx);
712 return count;
715 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
717 struct spu_context *ctx = file->private_data;
718 unsigned int mask;
720 poll_wait(file, &ctx->wbox_wq, wait);
722 spu_acquire(ctx);
723 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
724 spu_release(ctx);
726 return mask;
729 static const struct file_operations spufs_wbox_fops = {
730 .open = spufs_pipe_open,
731 .write = spufs_wbox_write,
732 .poll = spufs_wbox_poll,
733 .fasync = spufs_wbox_fasync,
736 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
737 size_t len, loff_t *pos)
739 struct spu_context *ctx = file->private_data;
740 u32 wbox_stat;
742 if (len < 4)
743 return -EINVAL;
745 spu_acquire(ctx);
746 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
747 spu_release(ctx);
749 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
750 return -EFAULT;
752 return 4;
755 static const struct file_operations spufs_wbox_stat_fops = {
756 .open = spufs_pipe_open,
757 .read = spufs_wbox_stat_read,
760 static int spufs_signal1_open(struct inode *inode, struct file *file)
762 struct spufs_inode_info *i = SPUFS_I(inode);
763 struct spu_context *ctx = i->i_ctx;
765 spin_lock(&ctx->mapping_lock);
766 file->private_data = ctx;
767 if (!i->i_openers++)
768 ctx->signal1 = inode->i_mapping;
769 spin_unlock(&ctx->mapping_lock);
770 smp_wmb();
771 return nonseekable_open(inode, file);
774 static int
775 spufs_signal1_release(struct inode *inode, struct file *file)
777 struct spufs_inode_info *i = SPUFS_I(inode);
778 struct spu_context *ctx = i->i_ctx;
780 spin_lock(&ctx->mapping_lock);
781 if (!--i->i_openers)
782 ctx->signal1 = NULL;
783 spin_unlock(&ctx->mapping_lock);
784 smp_wmb();
785 return 0;
788 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
789 size_t len, loff_t *pos)
791 int ret = 0;
792 u32 data;
794 if (len < 4)
795 return -EINVAL;
797 if (ctx->csa.spu_chnlcnt_RW[3]) {
798 data = ctx->csa.spu_chnldata_RW[3];
799 ret = 4;
802 if (!ret)
803 goto out;
805 if (copy_to_user(buf, &data, 4))
806 return -EFAULT;
808 out:
809 return ret;
812 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
813 size_t len, loff_t *pos)
815 int ret;
816 struct spu_context *ctx = file->private_data;
818 spu_acquire_saved(ctx);
819 ret = __spufs_signal1_read(ctx, buf, len, pos);
820 spu_release(ctx);
822 return ret;
825 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
826 size_t len, loff_t *pos)
828 struct spu_context *ctx;
829 u32 data;
831 ctx = file->private_data;
833 if (len < 4)
834 return -EINVAL;
836 if (copy_from_user(&data, buf, 4))
837 return -EFAULT;
839 spu_acquire(ctx);
840 ctx->ops->signal1_write(ctx, data);
841 spu_release(ctx);
843 return 4;
846 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
847 unsigned long address)
849 #if PAGE_SIZE == 0x1000
850 return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
851 #elif PAGE_SIZE == 0x10000
852 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
853 * signal 1 and 2 area
855 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
856 #else
857 #error unsupported page size
858 #endif
861 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
862 .nopfn = spufs_signal1_mmap_nopfn,
865 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
867 if (!(vma->vm_flags & VM_SHARED))
868 return -EINVAL;
870 vma->vm_flags |= VM_IO | VM_PFNMAP;
871 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
872 | _PAGE_NO_CACHE | _PAGE_GUARDED);
874 vma->vm_ops = &spufs_signal1_mmap_vmops;
875 return 0;
878 static const struct file_operations spufs_signal1_fops = {
879 .open = spufs_signal1_open,
880 .release = spufs_signal1_release,
881 .read = spufs_signal1_read,
882 .write = spufs_signal1_write,
883 .mmap = spufs_signal1_mmap,
886 static int spufs_signal2_open(struct inode *inode, struct file *file)
888 struct spufs_inode_info *i = SPUFS_I(inode);
889 struct spu_context *ctx = i->i_ctx;
891 spin_lock(&ctx->mapping_lock);
892 file->private_data = ctx;
893 if (!i->i_openers++)
894 ctx->signal2 = inode->i_mapping;
895 spin_unlock(&ctx->mapping_lock);
896 smp_wmb();
897 return nonseekable_open(inode, file);
900 static int
901 spufs_signal2_release(struct inode *inode, struct file *file)
903 struct spufs_inode_info *i = SPUFS_I(inode);
904 struct spu_context *ctx = i->i_ctx;
906 spin_lock(&ctx->mapping_lock);
907 if (!--i->i_openers)
908 ctx->signal2 = NULL;
909 spin_unlock(&ctx->mapping_lock);
910 smp_wmb();
911 return 0;
914 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
915 size_t len, loff_t *pos)
917 int ret = 0;
918 u32 data;
920 if (len < 4)
921 return -EINVAL;
923 if (ctx->csa.spu_chnlcnt_RW[4]) {
924 data = ctx->csa.spu_chnldata_RW[4];
925 ret = 4;
928 if (!ret)
929 goto out;
931 if (copy_to_user(buf, &data, 4))
932 return -EFAULT;
934 out:
935 return ret;
938 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
939 size_t len, loff_t *pos)
941 struct spu_context *ctx = file->private_data;
942 int ret;
944 spu_acquire_saved(ctx);
945 ret = __spufs_signal2_read(ctx, buf, len, pos);
946 spu_release(ctx);
948 return ret;
951 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
952 size_t len, loff_t *pos)
954 struct spu_context *ctx;
955 u32 data;
957 ctx = file->private_data;
959 if (len < 4)
960 return -EINVAL;
962 if (copy_from_user(&data, buf, 4))
963 return -EFAULT;
965 spu_acquire(ctx);
966 ctx->ops->signal2_write(ctx, data);
967 spu_release(ctx);
969 return 4;
972 #if SPUFS_MMAP_4K
973 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
974 unsigned long address)
976 #if PAGE_SIZE == 0x1000
977 return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
978 #elif PAGE_SIZE == 0x10000
979 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
980 * signal 1 and 2 area
982 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
983 #else
984 #error unsupported page size
985 #endif
988 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
989 .nopfn = spufs_signal2_mmap_nopfn,
992 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
994 if (!(vma->vm_flags & VM_SHARED))
995 return -EINVAL;
997 vma->vm_flags |= VM_IO | VM_PFNMAP;
998 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
999 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1001 vma->vm_ops = &spufs_signal2_mmap_vmops;
1002 return 0;
1004 #else /* SPUFS_MMAP_4K */
1005 #define spufs_signal2_mmap NULL
1006 #endif /* !SPUFS_MMAP_4K */
1008 static const struct file_operations spufs_signal2_fops = {
1009 .open = spufs_signal2_open,
1010 .release = spufs_signal2_release,
1011 .read = spufs_signal2_read,
1012 .write = spufs_signal2_write,
1013 .mmap = spufs_signal2_mmap,
1016 static void spufs_signal1_type_set(void *data, u64 val)
1018 struct spu_context *ctx = data;
1020 spu_acquire(ctx);
1021 ctx->ops->signal1_type_set(ctx, val);
1022 spu_release(ctx);
1025 static u64 __spufs_signal1_type_get(void *data)
1027 struct spu_context *ctx = data;
1028 return ctx->ops->signal1_type_get(ctx);
1031 static u64 spufs_signal1_type_get(void *data)
1033 struct spu_context *ctx = data;
1034 u64 ret;
1036 spu_acquire(ctx);
1037 ret = __spufs_signal1_type_get(data);
1038 spu_release(ctx);
1040 return ret;
1042 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1043 spufs_signal1_type_set, "%llu");
1045 static void spufs_signal2_type_set(void *data, u64 val)
1047 struct spu_context *ctx = data;
1049 spu_acquire(ctx);
1050 ctx->ops->signal2_type_set(ctx, val);
1051 spu_release(ctx);
1054 static u64 __spufs_signal2_type_get(void *data)
1056 struct spu_context *ctx = data;
1057 return ctx->ops->signal2_type_get(ctx);
1060 static u64 spufs_signal2_type_get(void *data)
1062 struct spu_context *ctx = data;
1063 u64 ret;
1065 spu_acquire(ctx);
1066 ret = __spufs_signal2_type_get(data);
1067 spu_release(ctx);
1069 return ret;
1071 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1072 spufs_signal2_type_set, "%llu");
1074 #if SPUFS_MMAP_4K
1075 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
1076 unsigned long address)
1078 return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
1081 static struct vm_operations_struct spufs_mss_mmap_vmops = {
1082 .nopfn = spufs_mss_mmap_nopfn,
1086 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1088 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1090 if (!(vma->vm_flags & VM_SHARED))
1091 return -EINVAL;
1093 vma->vm_flags |= VM_IO | VM_PFNMAP;
1094 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1095 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1097 vma->vm_ops = &spufs_mss_mmap_vmops;
1098 return 0;
1100 #else /* SPUFS_MMAP_4K */
1101 #define spufs_mss_mmap NULL
1102 #endif /* !SPUFS_MMAP_4K */
1104 static int spufs_mss_open(struct inode *inode, struct file *file)
1106 struct spufs_inode_info *i = SPUFS_I(inode);
1107 struct spu_context *ctx = i->i_ctx;
1109 file->private_data = i->i_ctx;
1111 spin_lock(&ctx->mapping_lock);
1112 if (!i->i_openers++)
1113 ctx->mss = inode->i_mapping;
1114 spin_unlock(&ctx->mapping_lock);
1115 smp_wmb();
1116 return nonseekable_open(inode, file);
1119 static int
1120 spufs_mss_release(struct inode *inode, struct file *file)
1122 struct spufs_inode_info *i = SPUFS_I(inode);
1123 struct spu_context *ctx = i->i_ctx;
1125 spin_lock(&ctx->mapping_lock);
1126 if (!--i->i_openers)
1127 ctx->mss = NULL;
1128 spin_unlock(&ctx->mapping_lock);
1129 smp_wmb();
1130 return 0;
1133 static const struct file_operations spufs_mss_fops = {
1134 .open = spufs_mss_open,
1135 .release = spufs_mss_release,
1136 .mmap = spufs_mss_mmap,
1139 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1140 unsigned long address)
1142 return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
1145 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1146 .nopfn = spufs_psmap_mmap_nopfn,
1150 * mmap support for full problem state area [0x00000 - 0x1ffff].
1152 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1154 if (!(vma->vm_flags & VM_SHARED))
1155 return -EINVAL;
1157 vma->vm_flags |= VM_IO | VM_PFNMAP;
1158 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1159 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1161 vma->vm_ops = &spufs_psmap_mmap_vmops;
1162 return 0;
1165 static int spufs_psmap_open(struct inode *inode, struct file *file)
1167 struct spufs_inode_info *i = SPUFS_I(inode);
1168 struct spu_context *ctx = i->i_ctx;
1170 spin_lock(&ctx->mapping_lock);
1171 file->private_data = i->i_ctx;
1172 if (!i->i_openers++)
1173 ctx->psmap = inode->i_mapping;
1174 spin_unlock(&ctx->mapping_lock);
1175 smp_wmb();
1176 return nonseekable_open(inode, file);
1179 static int
1180 spufs_psmap_release(struct inode *inode, struct file *file)
1182 struct spufs_inode_info *i = SPUFS_I(inode);
1183 struct spu_context *ctx = i->i_ctx;
1185 spin_lock(&ctx->mapping_lock);
1186 if (!--i->i_openers)
1187 ctx->psmap = NULL;
1188 spin_unlock(&ctx->mapping_lock);
1189 smp_wmb();
1190 return 0;
1193 static const struct file_operations spufs_psmap_fops = {
1194 .open = spufs_psmap_open,
1195 .release = spufs_psmap_release,
1196 .mmap = spufs_psmap_mmap,
1200 #if SPUFS_MMAP_4K
1201 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1202 unsigned long address)
1204 return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
1207 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1208 .nopfn = spufs_mfc_mmap_nopfn,
1212 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1214 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1216 if (!(vma->vm_flags & VM_SHARED))
1217 return -EINVAL;
1219 vma->vm_flags |= VM_IO | VM_PFNMAP;
1220 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1221 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1223 vma->vm_ops = &spufs_mfc_mmap_vmops;
1224 return 0;
1226 #else /* SPUFS_MMAP_4K */
1227 #define spufs_mfc_mmap NULL
1228 #endif /* !SPUFS_MMAP_4K */
1230 static int spufs_mfc_open(struct inode *inode, struct file *file)
1232 struct spufs_inode_info *i = SPUFS_I(inode);
1233 struct spu_context *ctx = i->i_ctx;
1235 /* we don't want to deal with DMA into other processes */
1236 if (ctx->owner != current->mm)
1237 return -EINVAL;
1239 if (atomic_read(&inode->i_count) != 1)
1240 return -EBUSY;
1242 spin_lock(&ctx->mapping_lock);
1243 file->private_data = ctx;
1244 if (!i->i_openers++)
1245 ctx->mfc = inode->i_mapping;
1246 spin_unlock(&ctx->mapping_lock);
1247 smp_wmb();
1248 return nonseekable_open(inode, file);
1251 static int
1252 spufs_mfc_release(struct inode *inode, struct file *file)
1254 struct spufs_inode_info *i = SPUFS_I(inode);
1255 struct spu_context *ctx = i->i_ctx;
1257 spin_lock(&ctx->mapping_lock);
1258 if (!--i->i_openers)
1259 ctx->mfc = NULL;
1260 spin_unlock(&ctx->mapping_lock);
1261 smp_wmb();
1262 return 0;
1265 /* interrupt-level mfc callback function. */
1266 void spufs_mfc_callback(struct spu *spu)
1268 struct spu_context *ctx = spu->ctx;
1270 wake_up_all(&ctx->mfc_wq);
1272 pr_debug("%s %s\n", __FUNCTION__, spu->name);
1273 if (ctx->mfc_fasync) {
1274 u32 free_elements, tagstatus;
1275 unsigned int mask;
1277 /* no need for spu_acquire in interrupt context */
1278 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1279 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1281 mask = 0;
1282 if (free_elements & 0xffff)
1283 mask |= POLLOUT;
1284 if (tagstatus & ctx->tagwait)
1285 mask |= POLLIN;
1287 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1291 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1293 /* See if there is one tag group is complete */
1294 /* FIXME we need locking around tagwait */
1295 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1296 ctx->tagwait &= ~*status;
1297 if (*status)
1298 return 1;
1300 /* enable interrupt waiting for any tag group,
1301 may silently fail if interrupts are already enabled */
1302 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1303 return 0;
1306 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1307 size_t size, loff_t *pos)
1309 struct spu_context *ctx = file->private_data;
1310 int ret = -EINVAL;
1311 u32 status;
1313 if (size != 4)
1314 goto out;
1316 spu_acquire(ctx);
1317 if (file->f_flags & O_NONBLOCK) {
1318 status = ctx->ops->read_mfc_tagstatus(ctx);
1319 if (!(status & ctx->tagwait))
1320 ret = -EAGAIN;
1321 else
1322 ctx->tagwait &= ~status;
1323 } else {
1324 ret = spufs_wait(ctx->mfc_wq,
1325 spufs_read_mfc_tagstatus(ctx, &status));
1327 spu_release(ctx);
1329 if (ret)
1330 goto out;
1332 ret = 4;
1333 if (copy_to_user(buffer, &status, 4))
1334 ret = -EFAULT;
1336 out:
1337 return ret;
1340 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1342 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1343 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1345 switch (cmd->cmd) {
1346 case MFC_PUT_CMD:
1347 case MFC_PUTF_CMD:
1348 case MFC_PUTB_CMD:
1349 case MFC_GET_CMD:
1350 case MFC_GETF_CMD:
1351 case MFC_GETB_CMD:
1352 break;
1353 default:
1354 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1355 return -EIO;
1358 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1359 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1360 cmd->ea, cmd->lsa);
1361 return -EIO;
1364 switch (cmd->size & 0xf) {
1365 case 1:
1366 break;
1367 case 2:
1368 if (cmd->lsa & 1)
1369 goto error;
1370 break;
1371 case 4:
1372 if (cmd->lsa & 3)
1373 goto error;
1374 break;
1375 case 8:
1376 if (cmd->lsa & 7)
1377 goto error;
1378 break;
1379 case 0:
1380 if (cmd->lsa & 15)
1381 goto error;
1382 break;
1383 error:
1384 default:
1385 pr_debug("invalid DMA alignment %x for size %x\n",
1386 cmd->lsa & 0xf, cmd->size);
1387 return -EIO;
1390 if (cmd->size > 16 * 1024) {
1391 pr_debug("invalid DMA size %x\n", cmd->size);
1392 return -EIO;
1395 if (cmd->tag & 0xfff0) {
1396 /* we reserve the higher tag numbers for kernel use */
1397 pr_debug("invalid DMA tag\n");
1398 return -EIO;
1401 if (cmd->class) {
1402 /* not supported in this version */
1403 pr_debug("invalid DMA class\n");
1404 return -EIO;
1407 return 0;
1410 static int spu_send_mfc_command(struct spu_context *ctx,
1411 struct mfc_dma_command cmd,
1412 int *error)
1414 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1415 if (*error == -EAGAIN) {
1416 /* wait for any tag group to complete
1417 so we have space for the new command */
1418 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1419 /* try again, because the queue might be
1420 empty again */
1421 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1422 if (*error == -EAGAIN)
1423 return 0;
1425 return 1;
1428 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1429 size_t size, loff_t *pos)
1431 struct spu_context *ctx = file->private_data;
1432 struct mfc_dma_command cmd;
1433 int ret = -EINVAL;
1435 if (size != sizeof cmd)
1436 goto out;
1438 ret = -EFAULT;
1439 if (copy_from_user(&cmd, buffer, sizeof cmd))
1440 goto out;
1442 ret = spufs_check_valid_dma(&cmd);
1443 if (ret)
1444 goto out;
1446 spu_acquire_runnable(ctx, 0);
1447 if (file->f_flags & O_NONBLOCK) {
1448 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1449 } else {
1450 int status;
1451 ret = spufs_wait(ctx->mfc_wq,
1452 spu_send_mfc_command(ctx, cmd, &status));
1453 if (status)
1454 ret = status;
1456 spu_release(ctx);
1458 if (ret)
1459 goto out;
1461 ctx->tagwait |= 1 << cmd.tag;
1462 ret = size;
1464 out:
1465 return ret;
1468 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1470 struct spu_context *ctx = file->private_data;
1471 u32 free_elements, tagstatus;
1472 unsigned int mask;
1474 spu_acquire(ctx);
1475 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1476 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1477 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1478 spu_release(ctx);
1480 poll_wait(file, &ctx->mfc_wq, wait);
1482 mask = 0;
1483 if (free_elements & 0xffff)
1484 mask |= POLLOUT | POLLWRNORM;
1485 if (tagstatus & ctx->tagwait)
1486 mask |= POLLIN | POLLRDNORM;
1488 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1489 free_elements, tagstatus, ctx->tagwait);
1491 return mask;
1494 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1496 struct spu_context *ctx = file->private_data;
1497 int ret;
1499 spu_acquire(ctx);
1500 #if 0
1501 /* this currently hangs */
1502 ret = spufs_wait(ctx->mfc_wq,
1503 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1504 if (ret)
1505 goto out;
1506 ret = spufs_wait(ctx->mfc_wq,
1507 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1508 out:
1509 #else
1510 ret = 0;
1511 #endif
1512 spu_release(ctx);
1514 return ret;
1517 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1518 int datasync)
1520 return spufs_mfc_flush(file, NULL);
1523 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1525 struct spu_context *ctx = file->private_data;
1527 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1530 static const struct file_operations spufs_mfc_fops = {
1531 .open = spufs_mfc_open,
1532 .release = spufs_mfc_release,
1533 .read = spufs_mfc_read,
1534 .write = spufs_mfc_write,
1535 .poll = spufs_mfc_poll,
1536 .flush = spufs_mfc_flush,
1537 .fsync = spufs_mfc_fsync,
1538 .fasync = spufs_mfc_fasync,
1539 .mmap = spufs_mfc_mmap,
1542 static void spufs_npc_set(void *data, u64 val)
1544 struct spu_context *ctx = data;
1545 spu_acquire(ctx);
1546 ctx->ops->npc_write(ctx, val);
1547 spu_release(ctx);
1550 static u64 spufs_npc_get(void *data)
1552 struct spu_context *ctx = data;
1553 u64 ret;
1554 spu_acquire(ctx);
1555 ret = ctx->ops->npc_read(ctx);
1556 spu_release(ctx);
1557 return ret;
1559 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1560 "0x%llx\n")
1562 static void spufs_decr_set(void *data, u64 val)
1564 struct spu_context *ctx = data;
1565 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1566 spu_acquire_saved(ctx);
1567 lscsa->decr.slot[0] = (u32) val;
1568 spu_release(ctx);
1571 static u64 __spufs_decr_get(void *data)
1573 struct spu_context *ctx = data;
1574 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1575 return lscsa->decr.slot[0];
1578 static u64 spufs_decr_get(void *data)
1580 struct spu_context *ctx = data;
1581 u64 ret;
1582 spu_acquire_saved(ctx);
1583 ret = __spufs_decr_get(data);
1584 spu_release(ctx);
1585 return ret;
1587 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1588 "0x%llx\n")
1590 static void spufs_decr_status_set(void *data, u64 val)
1592 struct spu_context *ctx = data;
1593 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1594 spu_acquire_saved(ctx);
1595 lscsa->decr_status.slot[0] = (u32) val;
1596 spu_release(ctx);
1599 static u64 __spufs_decr_status_get(void *data)
1601 struct spu_context *ctx = data;
1602 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1603 return lscsa->decr_status.slot[0];
1606 static u64 spufs_decr_status_get(void *data)
1608 struct spu_context *ctx = data;
1609 u64 ret;
1610 spu_acquire_saved(ctx);
1611 ret = __spufs_decr_status_get(data);
1612 spu_release(ctx);
1613 return ret;
1615 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1616 spufs_decr_status_set, "0x%llx\n")
1618 static void spufs_event_mask_set(void *data, u64 val)
1620 struct spu_context *ctx = data;
1621 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1622 spu_acquire_saved(ctx);
1623 lscsa->event_mask.slot[0] = (u32) val;
1624 spu_release(ctx);
1627 static u64 __spufs_event_mask_get(void *data)
1629 struct spu_context *ctx = data;
1630 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1631 return lscsa->event_mask.slot[0];
1634 static u64 spufs_event_mask_get(void *data)
1636 struct spu_context *ctx = data;
1637 u64 ret;
1638 spu_acquire_saved(ctx);
1639 ret = __spufs_event_mask_get(data);
1640 spu_release(ctx);
1641 return ret;
1643 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1644 spufs_event_mask_set, "0x%llx\n")
1646 static u64 __spufs_event_status_get(void *data)
1648 struct spu_context *ctx = data;
1649 struct spu_state *state = &ctx->csa;
1650 u64 stat;
1651 stat = state->spu_chnlcnt_RW[0];
1652 if (stat)
1653 return state->spu_chnldata_RW[0];
1654 return 0;
1657 static u64 spufs_event_status_get(void *data)
1659 struct spu_context *ctx = data;
1660 u64 ret = 0;
1662 spu_acquire_saved(ctx);
1663 ret = __spufs_event_status_get(data);
1664 spu_release(ctx);
1665 return ret;
1667 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1668 NULL, "0x%llx\n")
1670 static void spufs_srr0_set(void *data, u64 val)
1672 struct spu_context *ctx = data;
1673 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1674 spu_acquire_saved(ctx);
1675 lscsa->srr0.slot[0] = (u32) val;
1676 spu_release(ctx);
1679 static u64 spufs_srr0_get(void *data)
1681 struct spu_context *ctx = data;
1682 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1683 u64 ret;
1684 spu_acquire_saved(ctx);
1685 ret = lscsa->srr0.slot[0];
1686 spu_release(ctx);
1687 return ret;
1689 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1690 "0x%llx\n")
1692 static u64 spufs_id_get(void *data)
1694 struct spu_context *ctx = data;
1695 u64 num;
1697 spu_acquire(ctx);
1698 if (ctx->state == SPU_STATE_RUNNABLE)
1699 num = ctx->spu->number;
1700 else
1701 num = (unsigned int)-1;
1702 spu_release(ctx);
1704 return num;
1706 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1708 static u64 __spufs_object_id_get(void *data)
1710 struct spu_context *ctx = data;
1711 return ctx->object_id;
1714 static u64 spufs_object_id_get(void *data)
1716 /* FIXME: Should there really be no locking here? */
1717 return __spufs_object_id_get(data);
1720 static void spufs_object_id_set(void *data, u64 id)
1722 struct spu_context *ctx = data;
1723 ctx->object_id = id;
1726 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1727 spufs_object_id_set, "0x%llx\n");
1729 static u64 __spufs_lslr_get(void *data)
1731 struct spu_context *ctx = data;
1732 return ctx->csa.priv2.spu_lslr_RW;
1735 static u64 spufs_lslr_get(void *data)
1737 struct spu_context *ctx = data;
1738 u64 ret;
1740 spu_acquire_saved(ctx);
1741 ret = __spufs_lslr_get(data);
1742 spu_release(ctx);
1744 return ret;
1746 DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1748 static int spufs_info_open(struct inode *inode, struct file *file)
1750 struct spufs_inode_info *i = SPUFS_I(inode);
1751 struct spu_context *ctx = i->i_ctx;
1752 file->private_data = ctx;
1753 return 0;
1756 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1757 char __user *buf, size_t len, loff_t *pos)
1759 u32 mbox_stat;
1760 u32 data;
1762 mbox_stat = ctx->csa.prob.mb_stat_R;
1763 if (mbox_stat & 0x0000ff) {
1764 data = ctx->csa.prob.pu_mb_R;
1767 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1770 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1771 size_t len, loff_t *pos)
1773 int ret;
1774 struct spu_context *ctx = file->private_data;
1776 if (!access_ok(VERIFY_WRITE, buf, len))
1777 return -EFAULT;
1779 spu_acquire_saved(ctx);
1780 spin_lock(&ctx->csa.register_lock);
1781 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1782 spin_unlock(&ctx->csa.register_lock);
1783 spu_release(ctx);
1785 return ret;
1788 static const struct file_operations spufs_mbox_info_fops = {
1789 .open = spufs_info_open,
1790 .read = spufs_mbox_info_read,
1791 .llseek = generic_file_llseek,
1794 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1795 char __user *buf, size_t len, loff_t *pos)
1797 u32 ibox_stat;
1798 u32 data;
1800 ibox_stat = ctx->csa.prob.mb_stat_R;
1801 if (ibox_stat & 0xff0000) {
1802 data = ctx->csa.priv2.puint_mb_R;
1805 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1808 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1809 size_t len, loff_t *pos)
1811 struct spu_context *ctx = file->private_data;
1812 int ret;
1814 if (!access_ok(VERIFY_WRITE, buf, len))
1815 return -EFAULT;
1817 spu_acquire_saved(ctx);
1818 spin_lock(&ctx->csa.register_lock);
1819 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1820 spin_unlock(&ctx->csa.register_lock);
1821 spu_release(ctx);
1823 return ret;
1826 static const struct file_operations spufs_ibox_info_fops = {
1827 .open = spufs_info_open,
1828 .read = spufs_ibox_info_read,
1829 .llseek = generic_file_llseek,
1832 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1833 char __user *buf, size_t len, loff_t *pos)
1835 int i, cnt;
1836 u32 data[4];
1837 u32 wbox_stat;
1839 wbox_stat = ctx->csa.prob.mb_stat_R;
1840 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1841 for (i = 0; i < cnt; i++) {
1842 data[i] = ctx->csa.spu_mailbox_data[i];
1845 return simple_read_from_buffer(buf, len, pos, &data,
1846 cnt * sizeof(u32));
1849 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1850 size_t len, loff_t *pos)
1852 struct spu_context *ctx = file->private_data;
1853 int ret;
1855 if (!access_ok(VERIFY_WRITE, buf, len))
1856 return -EFAULT;
1858 spu_acquire_saved(ctx);
1859 spin_lock(&ctx->csa.register_lock);
1860 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1861 spin_unlock(&ctx->csa.register_lock);
1862 spu_release(ctx);
1864 return ret;
1867 static const struct file_operations spufs_wbox_info_fops = {
1868 .open = spufs_info_open,
1869 .read = spufs_wbox_info_read,
1870 .llseek = generic_file_llseek,
1873 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1874 char __user *buf, size_t len, loff_t *pos)
1876 struct spu_dma_info info;
1877 struct mfc_cq_sr *qp, *spuqp;
1878 int i;
1880 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1881 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1882 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1883 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1884 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1885 for (i = 0; i < 16; i++) {
1886 qp = &info.dma_info_command_data[i];
1887 spuqp = &ctx->csa.priv2.spuq[i];
1889 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1890 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1891 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1892 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1895 return simple_read_from_buffer(buf, len, pos, &info,
1896 sizeof info);
1899 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1900 size_t len, loff_t *pos)
1902 struct spu_context *ctx = file->private_data;
1903 int ret;
1905 if (!access_ok(VERIFY_WRITE, buf, len))
1906 return -EFAULT;
1908 spu_acquire_saved(ctx);
1909 spin_lock(&ctx->csa.register_lock);
1910 ret = __spufs_dma_info_read(ctx, buf, len, pos);
1911 spin_unlock(&ctx->csa.register_lock);
1912 spu_release(ctx);
1914 return ret;
1917 static const struct file_operations spufs_dma_info_fops = {
1918 .open = spufs_info_open,
1919 .read = spufs_dma_info_read,
1922 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1923 char __user *buf, size_t len, loff_t *pos)
1925 struct spu_proxydma_info info;
1926 struct mfc_cq_sr *qp, *puqp;
1927 int ret = sizeof info;
1928 int i;
1930 if (len < ret)
1931 return -EINVAL;
1933 if (!access_ok(VERIFY_WRITE, buf, len))
1934 return -EFAULT;
1936 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1937 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1938 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1939 for (i = 0; i < 8; i++) {
1940 qp = &info.proxydma_info_command_data[i];
1941 puqp = &ctx->csa.priv2.puq[i];
1943 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1944 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1945 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1946 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1949 return simple_read_from_buffer(buf, len, pos, &info,
1950 sizeof info);
1953 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1954 size_t len, loff_t *pos)
1956 struct spu_context *ctx = file->private_data;
1957 int ret;
1959 spu_acquire_saved(ctx);
1960 spin_lock(&ctx->csa.register_lock);
1961 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
1962 spin_unlock(&ctx->csa.register_lock);
1963 spu_release(ctx);
1965 return ret;
1968 static const struct file_operations spufs_proxydma_info_fops = {
1969 .open = spufs_info_open,
1970 .read = spufs_proxydma_info_read,
1973 struct tree_descr spufs_dir_contents[] = {
1974 { "mem", &spufs_mem_fops, 0666, },
1975 { "regs", &spufs_regs_fops, 0666, },
1976 { "mbox", &spufs_mbox_fops, 0444, },
1977 { "ibox", &spufs_ibox_fops, 0444, },
1978 { "wbox", &spufs_wbox_fops, 0222, },
1979 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1980 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1981 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1982 { "signal1", &spufs_signal1_fops, 0666, },
1983 { "signal2", &spufs_signal2_fops, 0666, },
1984 { "signal1_type", &spufs_signal1_type, 0666, },
1985 { "signal2_type", &spufs_signal2_type, 0666, },
1986 { "cntl", &spufs_cntl_fops, 0666, },
1987 { "fpcr", &spufs_fpcr_fops, 0666, },
1988 { "lslr", &spufs_lslr_ops, 0444, },
1989 { "mfc", &spufs_mfc_fops, 0666, },
1990 { "mss", &spufs_mss_fops, 0666, },
1991 { "npc", &spufs_npc_ops, 0666, },
1992 { "srr0", &spufs_srr0_ops, 0666, },
1993 { "decr", &spufs_decr_ops, 0666, },
1994 { "decr_status", &spufs_decr_status_ops, 0666, },
1995 { "event_mask", &spufs_event_mask_ops, 0666, },
1996 { "event_status", &spufs_event_status_ops, 0444, },
1997 { "psmap", &spufs_psmap_fops, 0666, },
1998 { "phys-id", &spufs_id_ops, 0666, },
1999 { "object-id", &spufs_object_id_ops, 0666, },
2000 { "mbox_info", &spufs_mbox_info_fops, 0444, },
2001 { "ibox_info", &spufs_ibox_info_fops, 0444, },
2002 { "wbox_info", &spufs_wbox_info_fops, 0444, },
2003 { "dma_info", &spufs_dma_info_fops, 0444, },
2004 { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
2008 struct tree_descr spufs_dir_nosched_contents[] = {
2009 { "mem", &spufs_mem_fops, 0666, },
2010 { "mbox", &spufs_mbox_fops, 0444, },
2011 { "ibox", &spufs_ibox_fops, 0444, },
2012 { "wbox", &spufs_wbox_fops, 0222, },
2013 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2014 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2015 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2016 { "signal1", &spufs_signal1_fops, 0666, },
2017 { "signal2", &spufs_signal2_fops, 0666, },
2018 { "signal1_type", &spufs_signal1_type, 0666, },
2019 { "signal2_type", &spufs_signal2_type, 0666, },
2020 { "mss", &spufs_mss_fops, 0666, },
2021 { "mfc", &spufs_mfc_fops, 0666, },
2022 { "cntl", &spufs_cntl_fops, 0666, },
2023 { "npc", &spufs_npc_ops, 0666, },
2024 { "psmap", &spufs_psmap_fops, 0666, },
2025 { "phys-id", &spufs_id_ops, 0666, },
2026 { "object-id", &spufs_object_id_ops, 0666, },
2030 struct spufs_coredump_reader spufs_coredump_read[] = {
2031 { "regs", __spufs_regs_read, NULL, 128 * 16 },
2032 { "fpcr", __spufs_fpcr_read, NULL, 16 },
2033 { "lslr", NULL, __spufs_lslr_get, 11 },
2034 { "decr", NULL, __spufs_decr_get, 11 },
2035 { "decr_status", NULL, __spufs_decr_status_get, 11 },
2036 { "mem", __spufs_mem_read, NULL, 256 * 1024, },
2037 { "signal1", __spufs_signal1_read, NULL, 4 },
2038 { "signal1_type", NULL, __spufs_signal1_type_get, 2 },
2039 { "signal2", __spufs_signal2_read, NULL, 4 },
2040 { "signal2_type", NULL, __spufs_signal2_type_get, 2 },
2041 { "event_mask", NULL, __spufs_event_mask_get, 8 },
2042 { "event_status", NULL, __spufs_event_status_get, 8 },
2043 { "mbox_info", __spufs_mbox_info_read, NULL, 4 },
2044 { "ibox_info", __spufs_ibox_info_read, NULL, 4 },
2045 { "wbox_info", __spufs_wbox_info_read, NULL, 16 },
2046 { "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
2047 { "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
2048 { "object-id", NULL, __spufs_object_id_get, 19 },
2049 { },
2051 int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;