[PATCH] invalidate_complete_page() race fix
[linux-2.6/zen-sources.git] / arch / powerpc / platforms / cell / spufs / file.c
blob58e794f9da1b6a5b920f95ba6fe57f0f054d1cb7
1 /*
2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #undef DEBUG
25 #include <linux/fs.h>
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
32 #include <asm/io.h>
33 #include <asm/semaphore.h>
34 #include <asm/spu.h>
35 #include <asm/uaccess.h>
37 #include "spufs.h"
40 static int
41 spufs_mem_open(struct inode *inode, struct file *file)
43 struct spufs_inode_info *i = SPUFS_I(inode);
44 struct spu_context *ctx = i->i_ctx;
45 file->private_data = ctx;
46 file->f_mapping = inode->i_mapping;
47 ctx->local_store = inode->i_mapping;
48 return 0;
51 static ssize_t
52 spufs_mem_read(struct file *file, char __user *buffer,
53 size_t size, loff_t *pos)
55 struct spu_context *ctx = file->private_data;
56 char *local_store;
57 int ret;
59 spu_acquire(ctx);
61 local_store = ctx->ops->get_ls(ctx);
62 ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
64 spu_release(ctx);
65 return ret;
68 static ssize_t
69 spufs_mem_write(struct file *file, const char __user *buffer,
70 size_t size, loff_t *pos)
72 struct spu_context *ctx = file->private_data;
73 char *local_store;
74 int ret;
76 size = min_t(ssize_t, LS_SIZE - *pos, size);
77 if (size <= 0)
78 return -EFBIG;
79 *pos += size;
81 spu_acquire(ctx);
83 local_store = ctx->ops->get_ls(ctx);
84 ret = copy_from_user(local_store + *pos - size,
85 buffer, size) ? -EFAULT : size;
87 spu_release(ctx);
88 return ret;
91 #ifdef CONFIG_SPUFS_MMAP
92 static struct page *
93 spufs_mem_mmap_nopage(struct vm_area_struct *vma,
94 unsigned long address, int *type)
96 struct page *page = NOPAGE_SIGBUS;
98 struct spu_context *ctx = vma->vm_file->private_data;
99 unsigned long offset = address - vma->vm_start;
100 offset += vma->vm_pgoff << PAGE_SHIFT;
102 spu_acquire(ctx);
104 if (ctx->state == SPU_STATE_SAVED)
105 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
106 else
107 page = pfn_to_page((ctx->spu->local_store_phys + offset)
108 >> PAGE_SHIFT);
110 spu_release(ctx);
112 if (type)
113 *type = VM_FAULT_MINOR;
115 page_cache_get(page);
116 return page;
119 static struct vm_operations_struct spufs_mem_mmap_vmops = {
120 .nopage = spufs_mem_mmap_nopage,
123 static int
124 spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
126 if (!(vma->vm_flags & VM_SHARED))
127 return -EINVAL;
129 /* FIXME: */
130 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
131 | _PAGE_NO_CACHE);
133 vma->vm_ops = &spufs_mem_mmap_vmops;
134 return 0;
136 #endif
138 static struct file_operations spufs_mem_fops = {
139 .open = spufs_mem_open,
140 .read = spufs_mem_read,
141 .write = spufs_mem_write,
142 .llseek = generic_file_llseek,
143 #ifdef CONFIG_SPUFS_MMAP
144 .mmap = spufs_mem_mmap,
145 #endif
148 #ifdef CONFIG_SPUFS_MMAP
149 static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
150 unsigned long address,
151 int *type, unsigned long ps_offs)
153 struct page *page = NOPAGE_SIGBUS;
154 int fault_type = VM_FAULT_SIGBUS;
155 struct spu_context *ctx = vma->vm_file->private_data;
156 unsigned long offset = address - vma->vm_start;
157 unsigned long area;
158 int ret;
160 offset += vma->vm_pgoff << PAGE_SHIFT;
161 if (offset >= 0x4000)
162 goto out;
164 ret = spu_acquire_runnable(ctx);
165 if (ret)
166 goto out;
168 area = ctx->spu->problem_phys + ps_offs;
169 page = pfn_to_page((area + offset) >> PAGE_SHIFT);
170 fault_type = VM_FAULT_MINOR;
171 page_cache_get(page);
173 spu_release(ctx);
175 out:
176 if (type)
177 *type = fault_type;
179 return page;
182 static struct page *spufs_cntl_mmap_nopage(struct vm_area_struct *vma,
183 unsigned long address, int *type)
185 return spufs_ps_nopage(vma, address, type, 0x4000);
188 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
189 .nopage = spufs_cntl_mmap_nopage,
193 * mmap support for problem state control area [0x4000 - 0x4fff].
194 * Mapping this area requires that the application have CAP_SYS_RAWIO,
195 * as these registers require special care when read/writing.
197 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
199 if (!(vma->vm_flags & VM_SHARED))
200 return -EINVAL;
202 if (!capable(CAP_SYS_RAWIO))
203 return -EPERM;
205 vma->vm_flags |= VM_RESERVED;
206 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
207 | _PAGE_NO_CACHE | _PAGE_GUARDED);
209 vma->vm_ops = &spufs_cntl_mmap_vmops;
210 return 0;
212 #endif
214 static int spufs_cntl_open(struct inode *inode, struct file *file)
216 struct spufs_inode_info *i = SPUFS_I(inode);
217 struct spu_context *ctx = i->i_ctx;
219 file->private_data = ctx;
220 file->f_mapping = inode->i_mapping;
221 ctx->cntl = inode->i_mapping;
222 return 0;
225 static ssize_t
226 spufs_cntl_read(struct file *file, char __user *buffer,
227 size_t size, loff_t *pos)
229 /* FIXME: read from spu status */
230 return -EINVAL;
233 static ssize_t
234 spufs_cntl_write(struct file *file, const char __user *buffer,
235 size_t size, loff_t *pos)
237 /* FIXME: write to runctl bit */
238 return -EINVAL;
241 static struct file_operations spufs_cntl_fops = {
242 .open = spufs_cntl_open,
243 .read = spufs_cntl_read,
244 .write = spufs_cntl_write,
245 #ifdef CONFIG_SPUFS_MMAP
246 .mmap = spufs_cntl_mmap,
247 #endif
250 static int
251 spufs_regs_open(struct inode *inode, struct file *file)
253 struct spufs_inode_info *i = SPUFS_I(inode);
254 file->private_data = i->i_ctx;
255 return 0;
258 static ssize_t
259 spufs_regs_read(struct file *file, char __user *buffer,
260 size_t size, loff_t *pos)
262 struct spu_context *ctx = file->private_data;
263 struct spu_lscsa *lscsa = ctx->csa.lscsa;
264 int ret;
266 spu_acquire_saved(ctx);
268 ret = simple_read_from_buffer(buffer, size, pos,
269 lscsa->gprs, sizeof lscsa->gprs);
271 spu_release(ctx);
272 return ret;
275 static ssize_t
276 spufs_regs_write(struct file *file, const char __user *buffer,
277 size_t size, loff_t *pos)
279 struct spu_context *ctx = file->private_data;
280 struct spu_lscsa *lscsa = ctx->csa.lscsa;
281 int ret;
283 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
284 if (size <= 0)
285 return -EFBIG;
286 *pos += size;
288 spu_acquire_saved(ctx);
290 ret = copy_from_user(lscsa->gprs + *pos - size,
291 buffer, size) ? -EFAULT : size;
293 spu_release(ctx);
294 return ret;
297 static struct file_operations spufs_regs_fops = {
298 .open = spufs_regs_open,
299 .read = spufs_regs_read,
300 .write = spufs_regs_write,
301 .llseek = generic_file_llseek,
304 static ssize_t
305 spufs_fpcr_read(struct file *file, char __user * buffer,
306 size_t size, loff_t * pos)
308 struct spu_context *ctx = file->private_data;
309 struct spu_lscsa *lscsa = ctx->csa.lscsa;
310 int ret;
312 spu_acquire_saved(ctx);
314 ret = simple_read_from_buffer(buffer, size, pos,
315 &lscsa->fpcr, sizeof(lscsa->fpcr));
317 spu_release(ctx);
318 return ret;
321 static ssize_t
322 spufs_fpcr_write(struct file *file, const char __user * buffer,
323 size_t size, loff_t * pos)
325 struct spu_context *ctx = file->private_data;
326 struct spu_lscsa *lscsa = ctx->csa.lscsa;
327 int ret;
329 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
330 if (size <= 0)
331 return -EFBIG;
332 *pos += size;
334 spu_acquire_saved(ctx);
336 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
337 buffer, size) ? -EFAULT : size;
339 spu_release(ctx);
340 return ret;
343 static struct file_operations spufs_fpcr_fops = {
344 .open = spufs_regs_open,
345 .read = spufs_fpcr_read,
346 .write = spufs_fpcr_write,
347 .llseek = generic_file_llseek,
350 /* generic open function for all pipe-like files */
351 static int spufs_pipe_open(struct inode *inode, struct file *file)
353 struct spufs_inode_info *i = SPUFS_I(inode);
354 file->private_data = i->i_ctx;
356 return nonseekable_open(inode, file);
359 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
360 size_t len, loff_t *pos)
362 struct spu_context *ctx = file->private_data;
363 u32 mbox_data;
364 int ret;
366 if (len < 4)
367 return -EINVAL;
369 spu_acquire(ctx);
370 ret = ctx->ops->mbox_read(ctx, &mbox_data);
371 spu_release(ctx);
373 if (!ret)
374 return -EAGAIN;
376 if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
377 return -EFAULT;
379 return 4;
382 static struct file_operations spufs_mbox_fops = {
383 .open = spufs_pipe_open,
384 .read = spufs_mbox_read,
387 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
388 size_t len, loff_t *pos)
390 struct spu_context *ctx = file->private_data;
391 u32 mbox_stat;
393 if (len < 4)
394 return -EINVAL;
396 spu_acquire(ctx);
398 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
400 spu_release(ctx);
402 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
403 return -EFAULT;
405 return 4;
408 static struct file_operations spufs_mbox_stat_fops = {
409 .open = spufs_pipe_open,
410 .read = spufs_mbox_stat_read,
413 /* low-level ibox access function */
414 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
416 return ctx->ops->ibox_read(ctx, data);
419 static int spufs_ibox_fasync(int fd, struct file *file, int on)
421 struct spu_context *ctx = file->private_data;
423 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
426 /* interrupt-level ibox callback function. */
427 void spufs_ibox_callback(struct spu *spu)
429 struct spu_context *ctx = spu->ctx;
431 wake_up_all(&ctx->ibox_wq);
432 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
435 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
436 size_t len, loff_t *pos)
438 struct spu_context *ctx = file->private_data;
439 u32 ibox_data;
440 ssize_t ret;
442 if (len < 4)
443 return -EINVAL;
445 spu_acquire(ctx);
447 ret = 0;
448 if (file->f_flags & O_NONBLOCK) {
449 if (!spu_ibox_read(ctx, &ibox_data))
450 ret = -EAGAIN;
451 } else {
452 ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
455 spu_release(ctx);
457 if (ret)
458 return ret;
460 ret = 4;
461 if (copy_to_user(buf, &ibox_data, sizeof ibox_data))
462 ret = -EFAULT;
464 return ret;
467 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
469 struct spu_context *ctx = file->private_data;
470 unsigned int mask;
472 poll_wait(file, &ctx->ibox_wq, wait);
474 spu_acquire(ctx);
475 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
476 spu_release(ctx);
478 return mask;
481 static struct file_operations spufs_ibox_fops = {
482 .open = spufs_pipe_open,
483 .read = spufs_ibox_read,
484 .poll = spufs_ibox_poll,
485 .fasync = spufs_ibox_fasync,
488 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
489 size_t len, loff_t *pos)
491 struct spu_context *ctx = file->private_data;
492 u32 ibox_stat;
494 if (len < 4)
495 return -EINVAL;
497 spu_acquire(ctx);
498 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
499 spu_release(ctx);
501 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
502 return -EFAULT;
504 return 4;
507 static struct file_operations spufs_ibox_stat_fops = {
508 .open = spufs_pipe_open,
509 .read = spufs_ibox_stat_read,
512 /* low-level mailbox write */
513 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
515 return ctx->ops->wbox_write(ctx, data);
518 static int spufs_wbox_fasync(int fd, struct file *file, int on)
520 struct spu_context *ctx = file->private_data;
521 int ret;
523 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
525 return ret;
528 /* interrupt-level wbox callback function. */
529 void spufs_wbox_callback(struct spu *spu)
531 struct spu_context *ctx = spu->ctx;
533 wake_up_all(&ctx->wbox_wq);
534 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
537 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
538 size_t len, loff_t *pos)
540 struct spu_context *ctx = file->private_data;
541 u32 wbox_data;
542 int ret;
544 if (len < 4)
545 return -EINVAL;
547 if (copy_from_user(&wbox_data, buf, sizeof wbox_data))
548 return -EFAULT;
550 spu_acquire(ctx);
552 ret = 0;
553 if (file->f_flags & O_NONBLOCK) {
554 if (!spu_wbox_write(ctx, wbox_data))
555 ret = -EAGAIN;
556 } else {
557 ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
560 spu_release(ctx);
562 return ret ? ret : sizeof wbox_data;
565 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
567 struct spu_context *ctx = file->private_data;
568 unsigned int mask;
570 poll_wait(file, &ctx->wbox_wq, wait);
572 spu_acquire(ctx);
573 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
574 spu_release(ctx);
576 return mask;
579 static struct file_operations spufs_wbox_fops = {
580 .open = spufs_pipe_open,
581 .write = spufs_wbox_write,
582 .poll = spufs_wbox_poll,
583 .fasync = spufs_wbox_fasync,
586 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
587 size_t len, loff_t *pos)
589 struct spu_context *ctx = file->private_data;
590 u32 wbox_stat;
592 if (len < 4)
593 return -EINVAL;
595 spu_acquire(ctx);
596 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
597 spu_release(ctx);
599 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
600 return -EFAULT;
602 return 4;
605 static struct file_operations spufs_wbox_stat_fops = {
606 .open = spufs_pipe_open,
607 .read = spufs_wbox_stat_read,
610 static int spufs_signal1_open(struct inode *inode, struct file *file)
612 struct spufs_inode_info *i = SPUFS_I(inode);
613 struct spu_context *ctx = i->i_ctx;
614 file->private_data = ctx;
615 file->f_mapping = inode->i_mapping;
616 ctx->signal1 = inode->i_mapping;
617 return nonseekable_open(inode, file);
620 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
621 size_t len, loff_t *pos)
623 struct spu_context *ctx = file->private_data;
624 u32 data;
626 if (len < 4)
627 return -EINVAL;
629 spu_acquire(ctx);
630 data = ctx->ops->signal1_read(ctx);
631 spu_release(ctx);
633 if (copy_to_user(buf, &data, 4))
634 return -EFAULT;
636 return 4;
639 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
640 size_t len, loff_t *pos)
642 struct spu_context *ctx;
643 u32 data;
645 ctx = file->private_data;
647 if (len < 4)
648 return -EINVAL;
650 if (copy_from_user(&data, buf, 4))
651 return -EFAULT;
653 spu_acquire(ctx);
654 ctx->ops->signal1_write(ctx, data);
655 spu_release(ctx);
657 return 4;
660 #ifdef CONFIG_SPUFS_MMAP
661 static struct page *spufs_signal1_mmap_nopage(struct vm_area_struct *vma,
662 unsigned long address, int *type)
664 return spufs_ps_nopage(vma, address, type, 0x14000);
667 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
668 .nopage = spufs_signal1_mmap_nopage,
671 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
673 if (!(vma->vm_flags & VM_SHARED))
674 return -EINVAL;
676 vma->vm_flags |= VM_RESERVED;
677 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
678 | _PAGE_NO_CACHE | _PAGE_GUARDED);
680 vma->vm_ops = &spufs_signal1_mmap_vmops;
681 return 0;
683 #endif
685 static struct file_operations spufs_signal1_fops = {
686 .open = spufs_signal1_open,
687 .read = spufs_signal1_read,
688 .write = spufs_signal1_write,
689 #ifdef CONFIG_SPUFS_MMAP
690 .mmap = spufs_signal1_mmap,
691 #endif
694 static int spufs_signal2_open(struct inode *inode, struct file *file)
696 struct spufs_inode_info *i = SPUFS_I(inode);
697 struct spu_context *ctx = i->i_ctx;
698 file->private_data = ctx;
699 file->f_mapping = inode->i_mapping;
700 ctx->signal2 = inode->i_mapping;
701 return nonseekable_open(inode, file);
704 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
705 size_t len, loff_t *pos)
707 struct spu_context *ctx;
708 u32 data;
710 ctx = file->private_data;
712 if (len < 4)
713 return -EINVAL;
715 spu_acquire(ctx);
716 data = ctx->ops->signal2_read(ctx);
717 spu_release(ctx);
719 if (copy_to_user(buf, &data, 4))
720 return -EFAULT;
722 return 4;
725 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
726 size_t len, loff_t *pos)
728 struct spu_context *ctx;
729 u32 data;
731 ctx = file->private_data;
733 if (len < 4)
734 return -EINVAL;
736 if (copy_from_user(&data, buf, 4))
737 return -EFAULT;
739 spu_acquire(ctx);
740 ctx->ops->signal2_write(ctx, data);
741 spu_release(ctx);
743 return 4;
746 #ifdef CONFIG_SPUFS_MMAP
747 static struct page *spufs_signal2_mmap_nopage(struct vm_area_struct *vma,
748 unsigned long address, int *type)
750 return spufs_ps_nopage(vma, address, type, 0x1c000);
753 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
754 .nopage = spufs_signal2_mmap_nopage,
757 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
759 if (!(vma->vm_flags & VM_SHARED))
760 return -EINVAL;
762 /* FIXME: */
763 vma->vm_flags |= VM_RESERVED;
764 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
765 | _PAGE_NO_CACHE | _PAGE_GUARDED);
767 vma->vm_ops = &spufs_signal2_mmap_vmops;
768 return 0;
770 #endif
772 static struct file_operations spufs_signal2_fops = {
773 .open = spufs_signal2_open,
774 .read = spufs_signal2_read,
775 .write = spufs_signal2_write,
776 #ifdef CONFIG_SPUFS_MMAP
777 .mmap = spufs_signal2_mmap,
778 #endif
781 static void spufs_signal1_type_set(void *data, u64 val)
783 struct spu_context *ctx = data;
785 spu_acquire(ctx);
786 ctx->ops->signal1_type_set(ctx, val);
787 spu_release(ctx);
790 static u64 spufs_signal1_type_get(void *data)
792 struct spu_context *ctx = data;
793 u64 ret;
795 spu_acquire(ctx);
796 ret = ctx->ops->signal1_type_get(ctx);
797 spu_release(ctx);
799 return ret;
801 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
802 spufs_signal1_type_set, "%llu");
804 static void spufs_signal2_type_set(void *data, u64 val)
806 struct spu_context *ctx = data;
808 spu_acquire(ctx);
809 ctx->ops->signal2_type_set(ctx, val);
810 spu_release(ctx);
813 static u64 spufs_signal2_type_get(void *data)
815 struct spu_context *ctx = data;
816 u64 ret;
818 spu_acquire(ctx);
819 ret = ctx->ops->signal2_type_get(ctx);
820 spu_release(ctx);
822 return ret;
824 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
825 spufs_signal2_type_set, "%llu");
827 #ifdef CONFIG_SPUFS_MMAP
828 static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma,
829 unsigned long address, int *type)
831 return spufs_ps_nopage(vma, address, type, 0x0000);
834 static struct vm_operations_struct spufs_mss_mmap_vmops = {
835 .nopage = spufs_mss_mmap_nopage,
839 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
840 * Mapping this area requires that the application have CAP_SYS_RAWIO,
841 * as these registers require special care when read/writing.
843 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
845 if (!(vma->vm_flags & VM_SHARED))
846 return -EINVAL;
848 if (!capable(CAP_SYS_RAWIO))
849 return -EPERM;
851 vma->vm_flags |= VM_RESERVED;
852 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
853 | _PAGE_NO_CACHE | _PAGE_GUARDED);
855 vma->vm_ops = &spufs_mss_mmap_vmops;
856 return 0;
858 #endif
860 static int spufs_mss_open(struct inode *inode, struct file *file)
862 struct spufs_inode_info *i = SPUFS_I(inode);
864 file->private_data = i->i_ctx;
865 return nonseekable_open(inode, file);
868 static struct file_operations spufs_mss_fops = {
869 .open = spufs_mss_open,
870 #ifdef CONFIG_SPUFS_MMAP
871 .mmap = spufs_mss_mmap,
872 #endif
876 #ifdef CONFIG_SPUFS_MMAP
877 static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma,
878 unsigned long address, int *type)
880 return spufs_ps_nopage(vma, address, type, 0x3000);
883 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
884 .nopage = spufs_mfc_mmap_nopage,
888 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
889 * Mapping this area requires that the application have CAP_SYS_RAWIO,
890 * as these registers require special care when read/writing.
892 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
894 if (!(vma->vm_flags & VM_SHARED))
895 return -EINVAL;
897 if (!capable(CAP_SYS_RAWIO))
898 return -EPERM;
900 vma->vm_flags |= VM_RESERVED;
901 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
902 | _PAGE_NO_CACHE | _PAGE_GUARDED);
904 vma->vm_ops = &spufs_mfc_mmap_vmops;
905 return 0;
907 #endif
909 static int spufs_mfc_open(struct inode *inode, struct file *file)
911 struct spufs_inode_info *i = SPUFS_I(inode);
912 struct spu_context *ctx = i->i_ctx;
914 /* we don't want to deal with DMA into other processes */
915 if (ctx->owner != current->mm)
916 return -EINVAL;
918 if (atomic_read(&inode->i_count) != 1)
919 return -EBUSY;
921 file->private_data = ctx;
922 return nonseekable_open(inode, file);
925 /* interrupt-level mfc callback function. */
926 void spufs_mfc_callback(struct spu *spu)
928 struct spu_context *ctx = spu->ctx;
930 wake_up_all(&ctx->mfc_wq);
932 pr_debug("%s %s\n", __FUNCTION__, spu->name);
933 if (ctx->mfc_fasync) {
934 u32 free_elements, tagstatus;
935 unsigned int mask;
937 /* no need for spu_acquire in interrupt context */
938 free_elements = ctx->ops->get_mfc_free_elements(ctx);
939 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
941 mask = 0;
942 if (free_elements & 0xffff)
943 mask |= POLLOUT;
944 if (tagstatus & ctx->tagwait)
945 mask |= POLLIN;
947 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
951 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
953 /* See if there is one tag group is complete */
954 /* FIXME we need locking around tagwait */
955 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
956 ctx->tagwait &= ~*status;
957 if (*status)
958 return 1;
960 /* enable interrupt waiting for any tag group,
961 may silently fail if interrupts are already enabled */
962 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
963 return 0;
966 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
967 size_t size, loff_t *pos)
969 struct spu_context *ctx = file->private_data;
970 int ret = -EINVAL;
971 u32 status;
973 if (size != 4)
974 goto out;
976 spu_acquire(ctx);
977 if (file->f_flags & O_NONBLOCK) {
978 status = ctx->ops->read_mfc_tagstatus(ctx);
979 if (!(status & ctx->tagwait))
980 ret = -EAGAIN;
981 else
982 ctx->tagwait &= ~status;
983 } else {
984 ret = spufs_wait(ctx->mfc_wq,
985 spufs_read_mfc_tagstatus(ctx, &status));
987 spu_release(ctx);
989 if (ret)
990 goto out;
992 ret = 4;
993 if (copy_to_user(buffer, &status, 4))
994 ret = -EFAULT;
996 out:
997 return ret;
1000 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1002 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1003 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1005 switch (cmd->cmd) {
1006 case MFC_PUT_CMD:
1007 case MFC_PUTF_CMD:
1008 case MFC_PUTB_CMD:
1009 case MFC_GET_CMD:
1010 case MFC_GETF_CMD:
1011 case MFC_GETB_CMD:
1012 break;
1013 default:
1014 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1015 return -EIO;
1018 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1019 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1020 cmd->ea, cmd->lsa);
1021 return -EIO;
1024 switch (cmd->size & 0xf) {
1025 case 1:
1026 break;
1027 case 2:
1028 if (cmd->lsa & 1)
1029 goto error;
1030 break;
1031 case 4:
1032 if (cmd->lsa & 3)
1033 goto error;
1034 break;
1035 case 8:
1036 if (cmd->lsa & 7)
1037 goto error;
1038 break;
1039 case 0:
1040 if (cmd->lsa & 15)
1041 goto error;
1042 break;
1043 error:
1044 default:
1045 pr_debug("invalid DMA alignment %x for size %x\n",
1046 cmd->lsa & 0xf, cmd->size);
1047 return -EIO;
1050 if (cmd->size > 16 * 1024) {
1051 pr_debug("invalid DMA size %x\n", cmd->size);
1052 return -EIO;
1055 if (cmd->tag & 0xfff0) {
1056 /* we reserve the higher tag numbers for kernel use */
1057 pr_debug("invalid DMA tag\n");
1058 return -EIO;
1061 if (cmd->class) {
1062 /* not supported in this version */
1063 pr_debug("invalid DMA class\n");
1064 return -EIO;
1067 return 0;
1070 static int spu_send_mfc_command(struct spu_context *ctx,
1071 struct mfc_dma_command cmd,
1072 int *error)
1074 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1075 if (*error == -EAGAIN) {
1076 /* wait for any tag group to complete
1077 so we have space for the new command */
1078 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1079 /* try again, because the queue might be
1080 empty again */
1081 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1082 if (*error == -EAGAIN)
1083 return 0;
1085 return 1;
1088 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1089 size_t size, loff_t *pos)
1091 struct spu_context *ctx = file->private_data;
1092 struct mfc_dma_command cmd;
1093 int ret = -EINVAL;
1095 if (size != sizeof cmd)
1096 goto out;
1098 ret = -EFAULT;
1099 if (copy_from_user(&cmd, buffer, sizeof cmd))
1100 goto out;
1102 ret = spufs_check_valid_dma(&cmd);
1103 if (ret)
1104 goto out;
1106 spu_acquire_runnable(ctx);
1107 if (file->f_flags & O_NONBLOCK) {
1108 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1109 } else {
1110 int status;
1111 ret = spufs_wait(ctx->mfc_wq,
1112 spu_send_mfc_command(ctx, cmd, &status));
1113 if (status)
1114 ret = status;
1116 spu_release(ctx);
1118 if (ret)
1119 goto out;
1121 ctx->tagwait |= 1 << cmd.tag;
1123 out:
1124 return ret;
1127 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1129 struct spu_context *ctx = file->private_data;
1130 u32 free_elements, tagstatus;
1131 unsigned int mask;
1133 spu_acquire(ctx);
1134 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1135 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1136 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1137 spu_release(ctx);
1139 poll_wait(file, &ctx->mfc_wq, wait);
1141 mask = 0;
1142 if (free_elements & 0xffff)
1143 mask |= POLLOUT | POLLWRNORM;
1144 if (tagstatus & ctx->tagwait)
1145 mask |= POLLIN | POLLRDNORM;
1147 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1148 free_elements, tagstatus, ctx->tagwait);
1150 return mask;
1153 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1155 struct spu_context *ctx = file->private_data;
1156 int ret;
1158 spu_acquire(ctx);
1159 #if 0
1160 /* this currently hangs */
1161 ret = spufs_wait(ctx->mfc_wq,
1162 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1163 if (ret)
1164 goto out;
1165 ret = spufs_wait(ctx->mfc_wq,
1166 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1167 out:
1168 #else
1169 ret = 0;
1170 #endif
1171 spu_release(ctx);
1173 return ret;
1176 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1177 int datasync)
1179 return spufs_mfc_flush(file, NULL);
1182 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1184 struct spu_context *ctx = file->private_data;
1186 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1189 static struct file_operations spufs_mfc_fops = {
1190 .open = spufs_mfc_open,
1191 .read = spufs_mfc_read,
1192 .write = spufs_mfc_write,
1193 .poll = spufs_mfc_poll,
1194 .flush = spufs_mfc_flush,
1195 .fsync = spufs_mfc_fsync,
1196 .fasync = spufs_mfc_fasync,
1197 #ifdef CONFIG_SPUFS_MMAP
1198 .mmap = spufs_mfc_mmap,
1199 #endif
1202 static void spufs_npc_set(void *data, u64 val)
1204 struct spu_context *ctx = data;
1205 spu_acquire(ctx);
1206 ctx->ops->npc_write(ctx, val);
1207 spu_release(ctx);
1210 static u64 spufs_npc_get(void *data)
1212 struct spu_context *ctx = data;
1213 u64 ret;
1214 spu_acquire(ctx);
1215 ret = ctx->ops->npc_read(ctx);
1216 spu_release(ctx);
1217 return ret;
1219 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
1221 static void spufs_decr_set(void *data, u64 val)
1223 struct spu_context *ctx = data;
1224 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1225 spu_acquire_saved(ctx);
1226 lscsa->decr.slot[0] = (u32) val;
1227 spu_release(ctx);
1230 static u64 spufs_decr_get(void *data)
1232 struct spu_context *ctx = data;
1233 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1234 u64 ret;
1235 spu_acquire_saved(ctx);
1236 ret = lscsa->decr.slot[0];
1237 spu_release(ctx);
1238 return ret;
1240 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1241 "%llx\n")
1243 static void spufs_decr_status_set(void *data, u64 val)
1245 struct spu_context *ctx = data;
1246 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1247 spu_acquire_saved(ctx);
1248 lscsa->decr_status.slot[0] = (u32) val;
1249 spu_release(ctx);
1252 static u64 spufs_decr_status_get(void *data)
1254 struct spu_context *ctx = data;
1255 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1256 u64 ret;
1257 spu_acquire_saved(ctx);
1258 ret = lscsa->decr_status.slot[0];
1259 spu_release(ctx);
1260 return ret;
1262 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1263 spufs_decr_status_set, "%llx\n")
1265 static void spufs_spu_tag_mask_set(void *data, u64 val)
1267 struct spu_context *ctx = data;
1268 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1269 spu_acquire_saved(ctx);
1270 lscsa->tag_mask.slot[0] = (u32) val;
1271 spu_release(ctx);
1274 static u64 spufs_spu_tag_mask_get(void *data)
1276 struct spu_context *ctx = data;
1277 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1278 u64 ret;
1279 spu_acquire_saved(ctx);
1280 ret = lscsa->tag_mask.slot[0];
1281 spu_release(ctx);
1282 return ret;
1284 DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get,
1285 spufs_spu_tag_mask_set, "%llx\n")
1287 static void spufs_event_mask_set(void *data, u64 val)
1289 struct spu_context *ctx = data;
1290 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1291 spu_acquire_saved(ctx);
1292 lscsa->event_mask.slot[0] = (u32) val;
1293 spu_release(ctx);
1296 static u64 spufs_event_mask_get(void *data)
1298 struct spu_context *ctx = data;
1299 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1300 u64 ret;
1301 spu_acquire_saved(ctx);
1302 ret = lscsa->event_mask.slot[0];
1303 spu_release(ctx);
1304 return ret;
1306 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1307 spufs_event_mask_set, "%llx\n")
1309 static void spufs_srr0_set(void *data, u64 val)
1311 struct spu_context *ctx = data;
1312 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1313 spu_acquire_saved(ctx);
1314 lscsa->srr0.slot[0] = (u32) val;
1315 spu_release(ctx);
1318 static u64 spufs_srr0_get(void *data)
1320 struct spu_context *ctx = data;
1321 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1322 u64 ret;
1323 spu_acquire_saved(ctx);
1324 ret = lscsa->srr0.slot[0];
1325 spu_release(ctx);
1326 return ret;
1328 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1329 "%llx\n")
1331 static u64 spufs_id_get(void *data)
1333 struct spu_context *ctx = data;
1334 u64 num;
1336 spu_acquire(ctx);
1337 if (ctx->state == SPU_STATE_RUNNABLE)
1338 num = ctx->spu->number;
1339 else
1340 num = (unsigned int)-1;
1341 spu_release(ctx);
1343 return num;
1345 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, 0, "0x%llx\n")
1347 struct tree_descr spufs_dir_contents[] = {
1348 { "mem", &spufs_mem_fops, 0666, },
1349 { "regs", &spufs_regs_fops, 0666, },
1350 { "mbox", &spufs_mbox_fops, 0444, },
1351 { "ibox", &spufs_ibox_fops, 0444, },
1352 { "wbox", &spufs_wbox_fops, 0222, },
1353 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1354 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1355 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1356 { "signal1", &spufs_signal1_fops, 0666, },
1357 { "signal2", &spufs_signal2_fops, 0666, },
1358 { "signal1_type", &spufs_signal1_type, 0666, },
1359 { "signal2_type", &spufs_signal2_type, 0666, },
1360 { "mss", &spufs_mss_fops, 0666, },
1361 { "mfc", &spufs_mfc_fops, 0666, },
1362 { "cntl", &spufs_cntl_fops, 0666, },
1363 { "npc", &spufs_npc_ops, 0666, },
1364 { "fpcr", &spufs_fpcr_fops, 0666, },
1365 { "decr", &spufs_decr_ops, 0666, },
1366 { "decr_status", &spufs_decr_status_ops, 0666, },
1367 { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
1368 { "event_mask", &spufs_event_mask_ops, 0666, },
1369 { "srr0", &spufs_srr0_ops, 0666, },
1370 { "phys-id", &spufs_id_ops, 0666, },