[POWERPC] spufs: Add infrastructure needed for gang scheduling
[linux-2.6/sactl.git] / arch / powerpc / platforms / cell / spufs / run.c
blob63df8cf4ba1607e13ee8c01cdab8a392da12835b
1 #include <linux/wait.h>
2 #include <linux/ptrace.h>
4 #include <asm/spu.h>
5 #include <asm/unistd.h>
7 #include "spufs.h"
9 /* interrupt-level stop callback function. */
10 void spufs_stop_callback(struct spu *spu)
12 struct spu_context *ctx = spu->ctx;
14 wake_up_all(&ctx->stop_wq);
17 void spufs_dma_callback(struct spu *spu, int type)
19 struct spu_context *ctx = spu->ctx;
21 if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
22 ctx->event_return |= type;
23 wake_up_all(&ctx->stop_wq);
24 } else {
25 switch (type) {
26 case SPE_EVENT_DMA_ALIGNMENT:
27 case SPE_EVENT_INVALID_DMA:
28 force_sig(SIGBUS, /* info, */ current);
29 break;
30 case SPE_EVENT_SPE_ERROR:
31 force_sig(SIGILL, /* info */ current);
32 break;
37 static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
39 struct spu *spu;
40 u64 pte_fault;
42 *stat = ctx->ops->status_read(ctx);
43 if (ctx->state != SPU_STATE_RUNNABLE)
44 return 1;
45 spu = ctx->spu;
46 pte_fault = spu->dsisr &
47 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
48 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
51 static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
53 int ret;
55 if ((ret = spu_acquire_runnable(ctx)) != 0)
56 return ret;
57 ctx->ops->npc_write(ctx, *npc);
58 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
59 return 0;
62 static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
63 u32 * status)
65 int ret = 0;
67 *status = ctx->ops->status_read(ctx);
68 *npc = ctx->ops->npc_read(ctx);
69 spu_release(ctx);
71 if (signal_pending(current))
72 ret = -ERESTARTSYS;
73 if (unlikely(current->ptrace & PT_PTRACED)) {
74 if ((*status & SPU_STATUS_STOPPED_BY_STOP)
75 && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
76 force_sig(SIGTRAP, current);
77 ret = -ERESTARTSYS;
80 return ret;
83 static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
84 u32 *status)
86 int ret;
88 if ((ret = spu_run_fini(ctx, npc, status)) != 0)
89 return ret;
90 if (*status & (SPU_STATUS_STOPPED_BY_STOP |
91 SPU_STATUS_STOPPED_BY_HALT)) {
92 return *status;
94 if ((ret = spu_run_init(ctx, npc)) != 0)
95 return ret;
96 return 0;
100 * SPU syscall restarting is tricky because we violate the basic
101 * assumption that the signal handler is running on the interrupted
102 * thread. Here instead, the handler runs on PowerPC user space code,
103 * while the syscall was called from the SPU.
104 * This means we can only do a very rough approximation of POSIX
105 * signal semantics.
107 int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
108 unsigned int *npc)
110 int ret;
112 switch (*spu_ret) {
113 case -ERESTARTSYS:
114 case -ERESTARTNOINTR:
116 * Enter the regular syscall restarting for
117 * sys_spu_run, then restart the SPU syscall
118 * callback.
120 *npc -= 8;
121 ret = -ERESTARTSYS;
122 break;
123 case -ERESTARTNOHAND:
124 case -ERESTART_RESTARTBLOCK:
126 * Restart block is too hard for now, just return -EINTR
127 * to the SPU.
128 * ERESTARTNOHAND comes from sys_pause, we also return
129 * -EINTR from there.
130 * Assume that we need to be restarted ourselves though.
132 *spu_ret = -EINTR;
133 ret = -ERESTARTSYS;
134 break;
135 default:
136 printk(KERN_WARNING "%s: unexpected return code %ld\n",
137 __FUNCTION__, *spu_ret);
138 ret = 0;
140 return ret;
143 int spu_process_callback(struct spu_context *ctx)
145 struct spu_syscall_block s;
146 u32 ls_pointer, npc;
147 char *ls;
148 long spu_ret;
149 int ret;
151 /* get syscall block from local store */
152 npc = ctx->ops->npc_read(ctx);
153 ls = ctx->ops->get_ls(ctx);
154 ls_pointer = *(u32*)(ls + npc);
155 if (ls_pointer > (LS_SIZE - sizeof(s)))
156 return -EFAULT;
157 memcpy(&s, ls + ls_pointer, sizeof (s));
159 /* do actual syscall without pinning the spu */
160 ret = 0;
161 spu_ret = -ENOSYS;
162 npc += 4;
164 if (s.nr_ret < __NR_syscalls) {
165 spu_release(ctx);
166 /* do actual system call from here */
167 spu_ret = spu_sys_callback(&s);
168 if (spu_ret <= -ERESTARTSYS) {
169 ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
171 spu_acquire(ctx);
172 if (ret == -ERESTARTSYS)
173 return ret;
176 /* write result, jump over indirect pointer */
177 memcpy(ls + ls_pointer, &spu_ret, sizeof (spu_ret));
178 ctx->ops->npc_write(ctx, npc);
179 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
180 return ret;
183 static inline int spu_process_events(struct spu_context *ctx)
185 struct spu *spu = ctx->spu;
186 u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
187 int ret = 0;
189 if (spu->dsisr & pte_fault)
190 ret = spu_irq_class_1_bottom(spu);
191 if (spu->class_0_pending)
192 ret = spu_irq_class_0_bottom(spu);
193 if (!ret && signal_pending(current))
194 ret = -ERESTARTSYS;
195 return ret;
198 long spufs_run_spu(struct file *file, struct spu_context *ctx,
199 u32 *npc, u32 *event)
201 int ret;
202 u32 status;
204 if (down_interruptible(&ctx->run_sema))
205 return -ERESTARTSYS;
207 ctx->event_return = 0;
208 ret = spu_run_init(ctx, npc);
209 if (ret)
210 goto out;
212 do {
213 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
214 if (unlikely(ret))
215 break;
216 if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
217 (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
218 ret = spu_process_callback(ctx);
219 if (ret)
220 break;
221 status &= ~SPU_STATUS_STOPPED_BY_STOP;
223 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
224 ret = spu_reacquire_runnable(ctx, npc, &status);
225 if (ret)
226 goto out;
227 continue;
229 ret = spu_process_events(ctx);
231 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
232 SPU_STATUS_STOPPED_BY_HALT)));
234 ctx->ops->runcntl_stop(ctx);
235 ret = spu_run_fini(ctx, npc, &status);
236 if (!ret)
237 ret = status;
238 spu_yield(ctx);
240 out:
241 *event = ctx->event_return;
242 up(&ctx->run_sema);
243 return ret;