4 * (C) Copyright IBM Corp. 2005
6 * Author: Mark Nutter <mnutter@us.ibm.com>
8 * Host-side part of SPU context switch sequence outlined in
9 * Synergistic Processor Element, Book IV.
11 * A fully premptive switch of an SPE is very expensive in terms
12 * of time and system resources. SPE Book IV indicates that SPE
13 * allocation should follow a "serially reusable device" model,
14 * in which the SPE is assigned a task until it completes. When
15 * this is not possible, this sequence may be used to premptively
16 * save, and then later (optionally) restore the context of a
17 * program executing on an SPE.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <linux/hardirq.h>
38 #include <linux/sched.h>
39 #include <linux/kernel.h>
41 #include <linux/vmalloc.h>
42 #include <linux/smp.h>
43 #include <linux/stddef.h>
44 #include <linux/unistd.h>
48 #include <asm/spu_priv1.h>
49 #include <asm/spu_csa.h>
50 #include <asm/mmu_context.h>
54 #include "spu_save_dump.h"
55 #include "spu_restore_dump.h"
57 #define RELAX_SPIN_COUNT 1000
58 #define POLL_WHILE_TRUE(_c) { \
61 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
64 if (unlikely(_c)) yield(); \
69 #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
71 static inline void acquire_spu_lock(struct spu
*spu
)
75 * Acquire SPU-specific mutual exclusion lock.
80 static inline void release_spu_lock(struct spu
*spu
)
83 * Release SPU-specific mutual exclusion lock.
88 static inline int check_spu_isolate(struct spu_state
*csa
, struct spu
*spu
)
90 struct spu_problem __iomem
*prob
= spu
->problem
;
95 * If SPU_Status[E,L,IS] any field is '1', this
96 * SPU is in isolate state and cannot be context
99 isolate_state
= SPU_STATUS_ISOLATED_STATE
|
100 SPU_STATUS_ISOLATED_LOAD_STATUS
| SPU_STATUS_ISOLATED_EXIT_STATUS
;
101 return (in_be32(&prob
->spu_status_R
) & isolate_state
) ? 1 : 0;
104 static inline void disable_interrupts(struct spu_state
*csa
, struct spu
*spu
)
108 * Save INT_Mask_class0 in CSA.
109 * Write INT_MASK_class0 with value of 0.
110 * Save INT_Mask_class1 in CSA.
111 * Write INT_MASK_class1 with value of 0.
112 * Save INT_Mask_class2 in CSA.
113 * Write INT_MASK_class2 with value of 0.
114 * Synchronize all three interrupts to be sure
115 * we no longer execute a handler on another CPU.
117 spin_lock_irq(&spu
->register_lock
);
119 csa
->priv1
.int_mask_class0_RW
= spu_int_mask_get(spu
, 0);
120 csa
->priv1
.int_mask_class1_RW
= spu_int_mask_get(spu
, 1);
121 csa
->priv1
.int_mask_class2_RW
= spu_int_mask_get(spu
, 2);
123 spu_int_mask_set(spu
, 0, 0ul);
124 spu_int_mask_set(spu
, 1, 0ul);
125 spu_int_mask_set(spu
, 2, 0ul);
127 spin_unlock_irq(&spu
->register_lock
);
130 * This flag needs to be set before calling synchronize_irq so
131 * that the update will be visible to the relevant handlers
134 set_bit(SPU_CONTEXT_SWITCH_PENDING
, &spu
->flags
);
135 clear_bit(SPU_CONTEXT_FAULT_PENDING
, &spu
->flags
);
136 synchronize_irq(spu
->irqs
[0]);
137 synchronize_irq(spu
->irqs
[1]);
138 synchronize_irq(spu
->irqs
[2]);
141 static inline void set_watchdog_timer(struct spu_state
*csa
, struct spu
*spu
)
145 * Set a software watchdog timer, which specifies the
146 * maximum allowable time for a context save sequence.
148 * For present, this implementation will not set a global
149 * watchdog timer, as virtualization & variable system load
150 * may cause unpredictable execution times.
154 static inline void inhibit_user_access(struct spu_state
*csa
, struct spu
*spu
)
158 * Inhibit user-space access (if provided) to this
159 * SPU by unmapping the virtual pages assigned to
160 * the SPU memory-mapped I/O (MMIO) for problem
165 static inline void set_switch_pending(struct spu_state
*csa
, struct spu
*spu
)
169 * Set a software context switch pending flag.
170 * Done above in Step 3 - disable_interrupts().
174 static inline void save_mfc_cntl(struct spu_state
*csa
, struct spu
*spu
)
176 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
179 * Suspend DMA and save MFC_CNTL.
181 switch (in_be64(&priv2
->mfc_control_RW
) &
182 MFC_CNTL_SUSPEND_DMA_STATUS_MASK
) {
183 case MFC_CNTL_SUSPEND_IN_PROGRESS
:
184 POLL_WHILE_FALSE((in_be64(&priv2
->mfc_control_RW
) &
185 MFC_CNTL_SUSPEND_DMA_STATUS_MASK
) ==
186 MFC_CNTL_SUSPEND_COMPLETE
);
188 case MFC_CNTL_SUSPEND_COMPLETE
:
190 csa
->priv2
.mfc_control_RW
=
191 in_be64(&priv2
->mfc_control_RW
) |
192 MFC_CNTL_SUSPEND_DMA_QUEUE
;
194 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION
:
195 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_SUSPEND_DMA_QUEUE
);
196 POLL_WHILE_FALSE((in_be64(&priv2
->mfc_control_RW
) &
197 MFC_CNTL_SUSPEND_DMA_STATUS_MASK
) ==
198 MFC_CNTL_SUSPEND_COMPLETE
);
200 csa
->priv2
.mfc_control_RW
=
201 in_be64(&priv2
->mfc_control_RW
) &
202 ~MFC_CNTL_SUSPEND_DMA_QUEUE
&
203 ~MFC_CNTL_SUSPEND_MASK
;
208 static inline void save_spu_runcntl(struct spu_state
*csa
, struct spu
*spu
)
210 struct spu_problem __iomem
*prob
= spu
->problem
;
213 * Save SPU_Runcntl in the CSA. This value contains
214 * the "Application Desired State".
216 csa
->prob
.spu_runcntl_RW
= in_be32(&prob
->spu_runcntl_RW
);
219 static inline void save_mfc_sr1(struct spu_state
*csa
, struct spu
*spu
)
222 * Save MFC_SR1 in the CSA.
224 csa
->priv1
.mfc_sr1_RW
= spu_mfc_sr1_get(spu
);
227 static inline void save_spu_status(struct spu_state
*csa
, struct spu
*spu
)
229 struct spu_problem __iomem
*prob
= spu
->problem
;
232 * Read SPU_Status[R], and save to CSA.
234 if ((in_be32(&prob
->spu_status_R
) & SPU_STATUS_RUNNING
) == 0) {
235 csa
->prob
.spu_status_R
= in_be32(&prob
->spu_status_R
);
239 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_STOP
);
241 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
244 SPU_STATUS_INVALID_INSTR
| SPU_STATUS_SINGLE_STEP
|
245 SPU_STATUS_STOPPED_BY_HALT
| SPU_STATUS_STOPPED_BY_STOP
;
246 if ((in_be32(&prob
->spu_status_R
) & stopped
) == 0)
247 csa
->prob
.spu_status_R
= SPU_STATUS_RUNNING
;
249 csa
->prob
.spu_status_R
= in_be32(&prob
->spu_status_R
);
253 static inline void save_mfc_stopped_status(struct spu_state
*csa
,
256 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
257 const u64 mask
= MFC_CNTL_DECREMENTER_RUNNING
|
258 MFC_CNTL_DMA_QUEUES_EMPTY
;
261 * Read MFC_CNTL[Ds]. Update saved copy of
264 * update: do the same with MFC_CNTL[Q].
266 csa
->priv2
.mfc_control_RW
&= ~mask
;
267 csa
->priv2
.mfc_control_RW
|= in_be64(&priv2
->mfc_control_RW
) & mask
;
270 static inline void halt_mfc_decr(struct spu_state
*csa
, struct spu
*spu
)
272 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
275 * Write MFC_CNTL[Dh] set to a '1' to halt
278 out_be64(&priv2
->mfc_control_RW
,
279 MFC_CNTL_DECREMENTER_HALTED
| MFC_CNTL_SUSPEND_MASK
);
283 static inline void save_timebase(struct spu_state
*csa
, struct spu
*spu
)
286 * Read PPE Timebase High and Timebase low registers
287 * and save in CSA. TBD.
289 csa
->suspend_time
= get_cycles();
292 static inline void remove_other_spu_access(struct spu_state
*csa
,
296 * Remove other SPU access to this SPU by unmapping
297 * this SPU's pages from their address space. TBD.
301 static inline void do_mfc_mssync(struct spu_state
*csa
, struct spu
*spu
)
303 struct spu_problem __iomem
*prob
= spu
->problem
;
307 * Write SPU_MSSync register. Poll SPU_MSSync[P]
310 out_be64(&prob
->spc_mssync_RW
, 1UL);
311 POLL_WHILE_TRUE(in_be64(&prob
->spc_mssync_RW
) & MS_SYNC_PENDING
);
314 static inline void issue_mfc_tlbie(struct spu_state
*csa
, struct spu
*spu
)
319 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
320 * Then issue a PPE sync instruction.
322 spu_tlb_invalidate(spu
);
326 static inline void handle_pending_interrupts(struct spu_state
*csa
,
330 * Handle any pending interrupts from this SPU
331 * here. This is OS or hypervisor specific. One
332 * option is to re-enable interrupts to handle any
333 * pending interrupts, with the interrupt handlers
334 * recognizing the software Context Switch Pending
335 * flag, to ensure the SPU execution or MFC command
336 * queue is not restarted. TBD.
340 static inline void save_mfc_queues(struct spu_state
*csa
, struct spu
*spu
)
342 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
346 * If MFC_Cntl[Se]=0 then save
347 * MFC command queues.
349 if ((in_be64(&priv2
->mfc_control_RW
) & MFC_CNTL_DMA_QUEUES_EMPTY
) == 0) {
350 for (i
= 0; i
< 8; i
++) {
351 csa
->priv2
.puq
[i
].mfc_cq_data0_RW
=
352 in_be64(&priv2
->puq
[i
].mfc_cq_data0_RW
);
353 csa
->priv2
.puq
[i
].mfc_cq_data1_RW
=
354 in_be64(&priv2
->puq
[i
].mfc_cq_data1_RW
);
355 csa
->priv2
.puq
[i
].mfc_cq_data2_RW
=
356 in_be64(&priv2
->puq
[i
].mfc_cq_data2_RW
);
357 csa
->priv2
.puq
[i
].mfc_cq_data3_RW
=
358 in_be64(&priv2
->puq
[i
].mfc_cq_data3_RW
);
360 for (i
= 0; i
< 16; i
++) {
361 csa
->priv2
.spuq
[i
].mfc_cq_data0_RW
=
362 in_be64(&priv2
->spuq
[i
].mfc_cq_data0_RW
);
363 csa
->priv2
.spuq
[i
].mfc_cq_data1_RW
=
364 in_be64(&priv2
->spuq
[i
].mfc_cq_data1_RW
);
365 csa
->priv2
.spuq
[i
].mfc_cq_data2_RW
=
366 in_be64(&priv2
->spuq
[i
].mfc_cq_data2_RW
);
367 csa
->priv2
.spuq
[i
].mfc_cq_data3_RW
=
368 in_be64(&priv2
->spuq
[i
].mfc_cq_data3_RW
);
373 static inline void save_ppu_querymask(struct spu_state
*csa
, struct spu
*spu
)
375 struct spu_problem __iomem
*prob
= spu
->problem
;
378 * Save the PPU_QueryMask register
381 csa
->prob
.dma_querymask_RW
= in_be32(&prob
->dma_querymask_RW
);
384 static inline void save_ppu_querytype(struct spu_state
*csa
, struct spu
*spu
)
386 struct spu_problem __iomem
*prob
= spu
->problem
;
389 * Save the PPU_QueryType register
392 csa
->prob
.dma_querytype_RW
= in_be32(&prob
->dma_querytype_RW
);
395 static inline void save_ppu_tagstatus(struct spu_state
*csa
, struct spu
*spu
)
397 struct spu_problem __iomem
*prob
= spu
->problem
;
399 /* Save the Prxy_TagStatus register in the CSA.
401 * It is unnecessary to restore dma_tagstatus_R, however,
402 * dma_tagstatus_R in the CSA is accessed via backing_ops, so
405 csa
->prob
.dma_tagstatus_R
= in_be32(&prob
->dma_tagstatus_R
);
408 static inline void save_mfc_csr_tsq(struct spu_state
*csa
, struct spu
*spu
)
410 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
413 * Save the MFC_CSR_TSQ register
416 csa
->priv2
.spu_tag_status_query_RW
=
417 in_be64(&priv2
->spu_tag_status_query_RW
);
420 static inline void save_mfc_csr_cmd(struct spu_state
*csa
, struct spu
*spu
)
422 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
425 * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
426 * registers in the CSA.
428 csa
->priv2
.spu_cmd_buf1_RW
= in_be64(&priv2
->spu_cmd_buf1_RW
);
429 csa
->priv2
.spu_cmd_buf2_RW
= in_be64(&priv2
->spu_cmd_buf2_RW
);
432 static inline void save_mfc_csr_ato(struct spu_state
*csa
, struct spu
*spu
)
434 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
437 * Save the MFC_CSR_ATO register in
440 csa
->priv2
.spu_atomic_status_RW
= in_be64(&priv2
->spu_atomic_status_RW
);
443 static inline void save_mfc_tclass_id(struct spu_state
*csa
, struct spu
*spu
)
446 * Save the MFC_TCLASS_ID register in
449 csa
->priv1
.mfc_tclass_id_RW
= spu_mfc_tclass_id_get(spu
);
452 static inline void set_mfc_tclass_id(struct spu_state
*csa
, struct spu
*spu
)
456 * Write the MFC_TCLASS_ID register with
457 * the value 0x10000000.
459 spu_mfc_tclass_id_set(spu
, 0x10000000);
463 static inline void purge_mfc_queue(struct spu_state
*csa
, struct spu
*spu
)
465 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
469 * Write MFC_CNTL[Pc]=1 (purge queue).
471 out_be64(&priv2
->mfc_control_RW
,
472 MFC_CNTL_PURGE_DMA_REQUEST
|
473 MFC_CNTL_SUSPEND_MASK
);
477 static inline void wait_purge_complete(struct spu_state
*csa
, struct spu
*spu
)
479 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
482 * Poll MFC_CNTL[Ps] until value '11' is read
485 POLL_WHILE_FALSE((in_be64(&priv2
->mfc_control_RW
) &
486 MFC_CNTL_PURGE_DMA_STATUS_MASK
) ==
487 MFC_CNTL_PURGE_DMA_COMPLETE
);
490 static inline void setup_mfc_sr1(struct spu_state
*csa
, struct spu
*spu
)
494 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and
495 * MFC_SR1[TL,R,Pr,T] set correctly for the
496 * OS specific environment.
498 * Implementation note: The SPU-side code
499 * for save/restore is privileged, so the
500 * MFC_SR1[Pr] bit is not set.
503 spu_mfc_sr1_set(spu
, (MFC_STATE1_MASTER_RUN_CONTROL_MASK
|
504 MFC_STATE1_RELOCATE_MASK
|
505 MFC_STATE1_BUS_TLBIE_MASK
));
508 static inline void save_spu_npc(struct spu_state
*csa
, struct spu
*spu
)
510 struct spu_problem __iomem
*prob
= spu
->problem
;
513 * Save SPU_NPC in the CSA.
515 csa
->prob
.spu_npc_RW
= in_be32(&prob
->spu_npc_RW
);
518 static inline void save_spu_privcntl(struct spu_state
*csa
, struct spu
*spu
)
520 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
523 * Save SPU_PrivCntl in the CSA.
525 csa
->priv2
.spu_privcntl_RW
= in_be64(&priv2
->spu_privcntl_RW
);
528 static inline void reset_spu_privcntl(struct spu_state
*csa
, struct spu
*spu
)
530 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
534 * Write SPU_PrivCntl[S,Le,A] fields reset to 0.
536 out_be64(&priv2
->spu_privcntl_RW
, 0UL);
540 static inline void save_spu_lslr(struct spu_state
*csa
, struct spu
*spu
)
542 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
545 * Save SPU_LSLR in the CSA.
547 csa
->priv2
.spu_lslr_RW
= in_be64(&priv2
->spu_lslr_RW
);
550 static inline void reset_spu_lslr(struct spu_state
*csa
, struct spu
*spu
)
552 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
558 out_be64(&priv2
->spu_lslr_RW
, LS_ADDR_MASK
);
562 static inline void save_spu_cfg(struct spu_state
*csa
, struct spu
*spu
)
564 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
567 * Save SPU_Cfg in the CSA.
569 csa
->priv2
.spu_cfg_RW
= in_be64(&priv2
->spu_cfg_RW
);
572 static inline void save_pm_trace(struct spu_state
*csa
, struct spu
*spu
)
575 * Save PM_Trace_Tag_Wait_Mask in the CSA.
576 * Not performed by this implementation.
580 static inline void save_mfc_rag(struct spu_state
*csa
, struct spu
*spu
)
583 * Save RA_GROUP_ID register and the
584 * RA_ENABLE reigster in the CSA.
586 csa
->priv1
.resource_allocation_groupID_RW
=
587 spu_resource_allocation_groupID_get(spu
);
588 csa
->priv1
.resource_allocation_enable_RW
=
589 spu_resource_allocation_enable_get(spu
);
592 static inline void save_ppu_mb_stat(struct spu_state
*csa
, struct spu
*spu
)
594 struct spu_problem __iomem
*prob
= spu
->problem
;
597 * Save MB_Stat register in the CSA.
599 csa
->prob
.mb_stat_R
= in_be32(&prob
->mb_stat_R
);
602 static inline void save_ppu_mb(struct spu_state
*csa
, struct spu
*spu
)
604 struct spu_problem __iomem
*prob
= spu
->problem
;
607 * Save the PPU_MB register in the CSA.
609 csa
->prob
.pu_mb_R
= in_be32(&prob
->pu_mb_R
);
612 static inline void save_ppuint_mb(struct spu_state
*csa
, struct spu
*spu
)
614 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
617 * Save the PPUINT_MB register in the CSA.
619 csa
->priv2
.puint_mb_R
= in_be64(&priv2
->puint_mb_R
);
622 static inline void save_ch_part1(struct spu_state
*csa
, struct spu
*spu
)
624 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
625 u64 idx
, ch_indices
[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
631 /* Save CH 1, without channel count */
632 out_be64(&priv2
->spu_chnlcntptr_RW
, 1);
633 csa
->spu_chnldata_RW
[1] = in_be64(&priv2
->spu_chnldata_RW
);
635 /* Save the following CH: [0,3,4,24,25,27] */
636 for (i
= 0; i
< ARRAY_SIZE(ch_indices
); i
++) {
638 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
640 csa
->spu_chnldata_RW
[idx
] = in_be64(&priv2
->spu_chnldata_RW
);
641 csa
->spu_chnlcnt_RW
[idx
] = in_be64(&priv2
->spu_chnlcnt_RW
);
642 out_be64(&priv2
->spu_chnldata_RW
, 0UL);
643 out_be64(&priv2
->spu_chnlcnt_RW
, 0UL);
648 static inline void save_spu_mb(struct spu_state
*csa
, struct spu
*spu
)
650 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
654 * Save SPU Read Mailbox Channel.
656 out_be64(&priv2
->spu_chnlcntptr_RW
, 29UL);
658 csa
->spu_chnlcnt_RW
[29] = in_be64(&priv2
->spu_chnlcnt_RW
);
659 for (i
= 0; i
< 4; i
++) {
660 csa
->spu_mailbox_data
[i
] = in_be64(&priv2
->spu_chnldata_RW
);
662 out_be64(&priv2
->spu_chnlcnt_RW
, 0UL);
666 static inline void save_mfc_cmd(struct spu_state
*csa
, struct spu
*spu
)
668 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
671 * Save MFC_CMD Channel.
673 out_be64(&priv2
->spu_chnlcntptr_RW
, 21UL);
675 csa
->spu_chnlcnt_RW
[21] = in_be64(&priv2
->spu_chnlcnt_RW
);
679 static inline void reset_ch(struct spu_state
*csa
, struct spu
*spu
)
681 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
682 u64 ch_indices
[4] = { 21UL, 23UL, 28UL, 30UL };
683 u64 ch_counts
[4] = { 16UL, 1UL, 1UL, 1UL };
688 * Reset the following CH: [21, 23, 28, 30]
690 for (i
= 0; i
< 4; i
++) {
692 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
694 out_be64(&priv2
->spu_chnlcnt_RW
, ch_counts
[i
]);
699 static inline void resume_mfc_queue(struct spu_state
*csa
, struct spu
*spu
)
701 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
705 * Write MFC_CNTL[Sc]=0 (resume queue processing).
707 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_RESUME_DMA_QUEUE
);
710 static inline void setup_mfc_slbs(struct spu_state
*csa
, struct spu
*spu
,
711 unsigned int *code
, int code_size
)
715 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
716 * register, then initialize SLB_VSID and SLB_ESID
717 * to provide access to SPU context save code and
720 * This implementation places both the context
721 * switch code and LSCSA in kernel address space.
723 * Further this implementation assumes that the
724 * MFC_SR1[R]=1 (in other words, assume that
725 * translation is desired by OS environment).
727 spu_invalidate_slbs(spu
);
728 spu_setup_kernel_slbs(spu
, csa
->lscsa
, code
, code_size
);
731 static inline void set_switch_active(struct spu_state
*csa
, struct spu
*spu
)
735 * Change the software context switch pending flag
736 * to context switch active. This implementation does
737 * not uses a switch active flag.
739 * Now that we have saved the mfc in the csa, we can add in the
740 * restart command if an exception occurred.
742 if (test_bit(SPU_CONTEXT_FAULT_PENDING
, &spu
->flags
))
743 csa
->priv2
.mfc_control_RW
|= MFC_CNTL_RESTART_DMA_COMMAND
;
744 clear_bit(SPU_CONTEXT_SWITCH_PENDING
, &spu
->flags
);
748 static inline void enable_interrupts(struct spu_state
*csa
, struct spu
*spu
)
750 unsigned long class1_mask
= CLASS1_ENABLE_SEGMENT_FAULT_INTR
|
751 CLASS1_ENABLE_STORAGE_FAULT_INTR
;
755 * Reset and then enable interrupts, as
758 * This implementation enables only class1
759 * (translation) interrupts.
761 spin_lock_irq(&spu
->register_lock
);
762 spu_int_stat_clear(spu
, 0, CLASS0_INTR_MASK
);
763 spu_int_stat_clear(spu
, 1, CLASS1_INTR_MASK
);
764 spu_int_stat_clear(spu
, 2, CLASS2_INTR_MASK
);
765 spu_int_mask_set(spu
, 0, 0ul);
766 spu_int_mask_set(spu
, 1, class1_mask
);
767 spu_int_mask_set(spu
, 2, 0ul);
768 spin_unlock_irq(&spu
->register_lock
);
771 static inline int send_mfc_dma(struct spu
*spu
, unsigned long ea
,
772 unsigned int ls_offset
, unsigned int size
,
773 unsigned int tag
, unsigned int rclass
,
776 struct spu_problem __iomem
*prob
= spu
->problem
;
777 union mfc_tag_size_class_cmd command
;
778 unsigned int transfer_size
;
779 volatile unsigned int status
= 0x0;
783 (size
> MFC_MAX_DMA_SIZE
) ? MFC_MAX_DMA_SIZE
: size
;
784 command
.u
.mfc_size
= transfer_size
;
785 command
.u
.mfc_tag
= tag
;
786 command
.u
.mfc_rclassid
= rclass
;
787 command
.u
.mfc_cmd
= cmd
;
789 out_be32(&prob
->mfc_lsa_W
, ls_offset
);
790 out_be64(&prob
->mfc_ea_W
, ea
);
791 out_be64(&prob
->mfc_union_W
.all64
, command
.all64
);
793 in_be32(&prob
->mfc_union_W
.by32
.mfc_class_cmd32
);
794 if (unlikely(status
& 0x2)) {
797 } while (status
& 0x3);
798 size
-= transfer_size
;
800 ls_offset
+= transfer_size
;
805 static inline void save_ls_16kb(struct spu_state
*csa
, struct spu
*spu
)
807 unsigned long addr
= (unsigned long)&csa
->lscsa
->ls
[0];
808 unsigned int ls_offset
= 0x0;
809 unsigned int size
= 16384;
810 unsigned int tag
= 0;
811 unsigned int rclass
= 0;
812 unsigned int cmd
= MFC_PUT_CMD
;
815 * Issue a DMA command to copy the first 16K bytes
816 * of local storage to the CSA.
818 send_mfc_dma(spu
, addr
, ls_offset
, size
, tag
, rclass
, cmd
);
821 static inline void set_spu_npc(struct spu_state
*csa
, struct spu
*spu
)
823 struct spu_problem __iomem
*prob
= spu
->problem
;
827 * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
828 * point address of context save code in local
831 * This implementation uses SPU-side save/restore
832 * programs with entry points at LSA of 0.
834 out_be32(&prob
->spu_npc_RW
, 0);
838 static inline void set_signot1(struct spu_state
*csa
, struct spu
*spu
)
840 struct spu_problem __iomem
*prob
= spu
->problem
;
848 * Write SPU_Sig_Notify_1 register with upper 32-bits
849 * of the CSA.LSCSA effective address.
851 addr64
.ull
= (u64
) csa
->lscsa
;
852 out_be32(&prob
->signal_notify1
, addr64
.ui
[0]);
856 static inline void set_signot2(struct spu_state
*csa
, struct spu
*spu
)
858 struct spu_problem __iomem
*prob
= spu
->problem
;
866 * Write SPU_Sig_Notify_2 register with lower 32-bits
867 * of the CSA.LSCSA effective address.
869 addr64
.ull
= (u64
) csa
->lscsa
;
870 out_be32(&prob
->signal_notify2
, addr64
.ui
[1]);
874 static inline void send_save_code(struct spu_state
*csa
, struct spu
*spu
)
876 unsigned long addr
= (unsigned long)&spu_save_code
[0];
877 unsigned int ls_offset
= 0x0;
878 unsigned int size
= sizeof(spu_save_code
);
879 unsigned int tag
= 0;
880 unsigned int rclass
= 0;
881 unsigned int cmd
= MFC_GETFS_CMD
;
884 * Issue a DMA command to copy context save code
885 * to local storage and start SPU.
887 send_mfc_dma(spu
, addr
, ls_offset
, size
, tag
, rclass
, cmd
);
890 static inline void set_ppu_querymask(struct spu_state
*csa
, struct spu
*spu
)
892 struct spu_problem __iomem
*prob
= spu
->problem
;
896 * Write PPU_QueryMask=1 (enable Tag Group 0)
897 * and issue eieio instruction.
899 out_be32(&prob
->dma_querymask_RW
, MFC_TAGID_TO_TAGMASK(0));
903 static inline void wait_tag_complete(struct spu_state
*csa
, struct spu
*spu
)
905 struct spu_problem __iomem
*prob
= spu
->problem
;
906 u32 mask
= MFC_TAGID_TO_TAGMASK(0);
913 * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
914 * or write PPU_QueryType[TS]=01 and wait for Tag Group
915 * Complete Interrupt. Write INT_Stat_Class0 or
916 * INT_Stat_Class2 with value of 'handled'.
918 POLL_WHILE_FALSE(in_be32(&prob
->dma_tagstatus_R
) & mask
);
920 local_irq_save(flags
);
921 spu_int_stat_clear(spu
, 0, CLASS0_INTR_MASK
);
922 spu_int_stat_clear(spu
, 2, CLASS2_INTR_MASK
);
923 local_irq_restore(flags
);
926 static inline void wait_spu_stopped(struct spu_state
*csa
, struct spu
*spu
)
928 struct spu_problem __iomem
*prob
= spu
->problem
;
933 * Poll until SPU_Status[R]=0 or wait for SPU Class 0
934 * or SPU Class 2 interrupt. Write INT_Stat_class0
935 * or INT_Stat_class2 with value of handled.
937 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) & SPU_STATUS_RUNNING
);
939 local_irq_save(flags
);
940 spu_int_stat_clear(spu
, 0, CLASS0_INTR_MASK
);
941 spu_int_stat_clear(spu
, 2, CLASS2_INTR_MASK
);
942 local_irq_restore(flags
);
945 static inline int check_save_status(struct spu_state
*csa
, struct spu
*spu
)
947 struct spu_problem __iomem
*prob
= spu
->problem
;
951 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
952 * context save succeeded, otherwise context save
955 complete
= ((SPU_SAVE_COMPLETE
<< SPU_STOP_STATUS_SHIFT
) |
956 SPU_STATUS_STOPPED_BY_STOP
);
957 return (in_be32(&prob
->spu_status_R
) != complete
) ? 1 : 0;
960 static inline void terminate_spu_app(struct spu_state
*csa
, struct spu
*spu
)
963 * If required, notify the "using application" that
964 * the SPU task has been terminated. TBD.
968 static inline void suspend_mfc_and_halt_decr(struct spu_state
*csa
,
971 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
974 * Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend
975 * the queue and halt the decrementer.
977 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_SUSPEND_DMA_QUEUE
|
978 MFC_CNTL_DECREMENTER_HALTED
);
982 static inline void wait_suspend_mfc_complete(struct spu_state
*csa
,
985 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
989 * Poll MFC_CNTL[Ss] until 11 is returned.
991 POLL_WHILE_FALSE((in_be64(&priv2
->mfc_control_RW
) &
992 MFC_CNTL_SUSPEND_DMA_STATUS_MASK
) ==
993 MFC_CNTL_SUSPEND_COMPLETE
);
996 static inline int suspend_spe(struct spu_state
*csa
, struct spu
*spu
)
998 struct spu_problem __iomem
*prob
= spu
->problem
;
1001 * If SPU_Status[R]=1, stop SPU execution
1002 * and wait for stop to complete.
1004 * Returns 1 if SPU_Status[R]=1 on entry.
1007 if (in_be32(&prob
->spu_status_R
) & SPU_STATUS_RUNNING
) {
1008 if (in_be32(&prob
->spu_status_R
) &
1009 SPU_STATUS_ISOLATED_EXIT_STATUS
) {
1010 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1011 SPU_STATUS_RUNNING
);
1013 if ((in_be32(&prob
->spu_status_R
) &
1014 SPU_STATUS_ISOLATED_LOAD_STATUS
)
1015 || (in_be32(&prob
->spu_status_R
) &
1016 SPU_STATUS_ISOLATED_STATE
)) {
1017 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_STOP
);
1019 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1020 SPU_STATUS_RUNNING
);
1021 out_be32(&prob
->spu_runcntl_RW
, 0x2);
1023 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1024 SPU_STATUS_RUNNING
);
1026 if (in_be32(&prob
->spu_status_R
) &
1027 SPU_STATUS_WAITING_FOR_CHANNEL
) {
1028 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_STOP
);
1030 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1031 SPU_STATUS_RUNNING
);
1038 static inline void clear_spu_status(struct spu_state
*csa
, struct spu
*spu
)
1040 struct spu_problem __iomem
*prob
= spu
->problem
;
1042 /* Restore, Step 10:
1043 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
1044 * release SPU from isolate state.
1046 if (!(in_be32(&prob
->spu_status_R
) & SPU_STATUS_RUNNING
)) {
1047 if (in_be32(&prob
->spu_status_R
) &
1048 SPU_STATUS_ISOLATED_EXIT_STATUS
) {
1049 spu_mfc_sr1_set(spu
,
1050 MFC_STATE1_MASTER_RUN_CONTROL_MASK
);
1052 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_RUNNABLE
);
1054 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1055 SPU_STATUS_RUNNING
);
1057 if ((in_be32(&prob
->spu_status_R
) &
1058 SPU_STATUS_ISOLATED_LOAD_STATUS
)
1059 || (in_be32(&prob
->spu_status_R
) &
1060 SPU_STATUS_ISOLATED_STATE
)) {
1061 spu_mfc_sr1_set(spu
,
1062 MFC_STATE1_MASTER_RUN_CONTROL_MASK
);
1064 out_be32(&prob
->spu_runcntl_RW
, 0x2);
1066 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1067 SPU_STATUS_RUNNING
);
1072 static inline void reset_ch_part1(struct spu_state
*csa
, struct spu
*spu
)
1074 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1075 u64 ch_indices
[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1079 /* Restore, Step 20:
1083 out_be64(&priv2
->spu_chnlcntptr_RW
, 1);
1084 out_be64(&priv2
->spu_chnldata_RW
, 0UL);
1086 /* Reset the following CH: [0,3,4,24,25,27] */
1087 for (i
= 0; i
< ARRAY_SIZE(ch_indices
); i
++) {
1088 idx
= ch_indices
[i
];
1089 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
1091 out_be64(&priv2
->spu_chnldata_RW
, 0UL);
1092 out_be64(&priv2
->spu_chnlcnt_RW
, 0UL);
1097 static inline void reset_ch_part2(struct spu_state
*csa
, struct spu
*spu
)
1099 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1100 u64 ch_indices
[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1101 u64 ch_counts
[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1105 /* Restore, Step 21:
1106 * Reset the following CH: [21, 23, 28, 29, 30]
1108 for (i
= 0; i
< 5; i
++) {
1109 idx
= ch_indices
[i
];
1110 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
1112 out_be64(&priv2
->spu_chnlcnt_RW
, ch_counts
[i
]);
1117 static inline void setup_spu_status_part1(struct spu_state
*csa
,
1120 u32 status_P
= SPU_STATUS_STOPPED_BY_STOP
;
1121 u32 status_I
= SPU_STATUS_INVALID_INSTR
;
1122 u32 status_H
= SPU_STATUS_STOPPED_BY_HALT
;
1123 u32 status_S
= SPU_STATUS_SINGLE_STEP
;
1124 u32 status_S_I
= SPU_STATUS_SINGLE_STEP
| SPU_STATUS_INVALID_INSTR
;
1125 u32 status_S_P
= SPU_STATUS_SINGLE_STEP
| SPU_STATUS_STOPPED_BY_STOP
;
1126 u32 status_P_H
= SPU_STATUS_STOPPED_BY_HALT
|SPU_STATUS_STOPPED_BY_STOP
;
1127 u32 status_P_I
= SPU_STATUS_STOPPED_BY_STOP
|SPU_STATUS_INVALID_INSTR
;
1130 /* Restore, Step 27:
1131 * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
1132 * instruction sequence to the end of the SPU based restore
1133 * code (after the "context restored" stop and signal) to
1134 * restore the correct SPU status.
1136 * NOTE: Rather than modifying the SPU executable, we
1137 * instead add a new 'stopped_status' field to the
1138 * LSCSA. The SPU-side restore reads this field and
1139 * takes the appropriate action when exiting.
1143 (csa
->prob
.spu_status_R
>> SPU_STOP_STATUS_SHIFT
) & 0xFFFF;
1144 if ((csa
->prob
.spu_status_R
& status_P_I
) == status_P_I
) {
1146 /* SPU_Status[P,I]=1 - Illegal Instruction followed
1147 * by Stop and Signal instruction, followed by 'br -4'.
1150 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_P_I
;
1151 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1153 } else if ((csa
->prob
.spu_status_R
& status_P_H
) == status_P_H
) {
1155 /* SPU_Status[P,H]=1 - Halt Conditional, followed
1156 * by Stop and Signal instruction, followed by
1159 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_P_H
;
1160 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1162 } else if ((csa
->prob
.spu_status_R
& status_S_P
) == status_S_P
) {
1164 /* SPU_Status[S,P]=1 - Stop and Signal instruction
1165 * followed by 'br -4'.
1167 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_S_P
;
1168 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1170 } else if ((csa
->prob
.spu_status_R
& status_S_I
) == status_S_I
) {
1172 /* SPU_Status[S,I]=1 - Illegal instruction followed
1175 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_S_I
;
1176 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1178 } else if ((csa
->prob
.spu_status_R
& status_P
) == status_P
) {
1180 /* SPU_Status[P]=1 - Stop and Signal instruction
1181 * followed by 'br -4'.
1183 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_P
;
1184 csa
->lscsa
->stopped_status
.slot
[1] = status_code
;
1186 } else if ((csa
->prob
.spu_status_R
& status_H
) == status_H
) {
1188 /* SPU_Status[H]=1 - Halt Conditional, followed
1191 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_H
;
1193 } else if ((csa
->prob
.spu_status_R
& status_S
) == status_S
) {
1195 /* SPU_Status[S]=1 - Two nop instructions.
1197 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_S
;
1199 } else if ((csa
->prob
.spu_status_R
& status_I
) == status_I
) {
1201 /* SPU_Status[I]=1 - Illegal instruction followed
1204 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_I
;
1209 static inline void setup_spu_status_part2(struct spu_state
*csa
,
1214 /* Restore, Step 28:
1215 * If the CSA.SPU_Status[I,S,H,P,R]=0 then
1216 * add a 'br *' instruction to the end of
1217 * the SPU based restore code.
1219 * NOTE: Rather than modifying the SPU executable, we
1220 * instead add a new 'stopped_status' field to the
1221 * LSCSA. The SPU-side restore reads this field and
1222 * takes the appropriate action when exiting.
1224 mask
= SPU_STATUS_INVALID_INSTR
|
1225 SPU_STATUS_SINGLE_STEP
|
1226 SPU_STATUS_STOPPED_BY_HALT
|
1227 SPU_STATUS_STOPPED_BY_STOP
| SPU_STATUS_RUNNING
;
1228 if (!(csa
->prob
.spu_status_R
& mask
)) {
1229 csa
->lscsa
->stopped_status
.slot
[0] = SPU_STOPPED_STATUS_R
;
1233 static inline void restore_mfc_rag(struct spu_state
*csa
, struct spu
*spu
)
1235 /* Restore, Step 29:
1236 * Restore RA_GROUP_ID register and the
1237 * RA_ENABLE reigster from the CSA.
1239 spu_resource_allocation_groupID_set(spu
,
1240 csa
->priv1
.resource_allocation_groupID_RW
);
1241 spu_resource_allocation_enable_set(spu
,
1242 csa
->priv1
.resource_allocation_enable_RW
);
1245 static inline void send_restore_code(struct spu_state
*csa
, struct spu
*spu
)
1247 unsigned long addr
= (unsigned long)&spu_restore_code
[0];
1248 unsigned int ls_offset
= 0x0;
1249 unsigned int size
= sizeof(spu_restore_code
);
1250 unsigned int tag
= 0;
1251 unsigned int rclass
= 0;
1252 unsigned int cmd
= MFC_GETFS_CMD
;
1254 /* Restore, Step 37:
1255 * Issue MFC DMA command to copy context
1256 * restore code to local storage.
1258 send_mfc_dma(spu
, addr
, ls_offset
, size
, tag
, rclass
, cmd
);
1261 static inline void setup_decr(struct spu_state
*csa
, struct spu
*spu
)
1263 /* Restore, Step 34:
1264 * If CSA.MFC_CNTL[Ds]=1 (decrementer was
1265 * running) then adjust decrementer, set
1266 * decrementer running status in LSCSA,
1267 * and set decrementer "wrapped" status
1270 if (csa
->priv2
.mfc_control_RW
& MFC_CNTL_DECREMENTER_RUNNING
) {
1271 cycles_t resume_time
= get_cycles();
1272 cycles_t delta_time
= resume_time
- csa
->suspend_time
;
1274 csa
->lscsa
->decr_status
.slot
[0] = SPU_DECR_STATUS_RUNNING
;
1275 if (csa
->lscsa
->decr
.slot
[0] < delta_time
) {
1276 csa
->lscsa
->decr_status
.slot
[0] |=
1277 SPU_DECR_STATUS_WRAPPED
;
1280 csa
->lscsa
->decr
.slot
[0] -= delta_time
;
1282 csa
->lscsa
->decr_status
.slot
[0] = 0;
1286 static inline void setup_ppu_mb(struct spu_state
*csa
, struct spu
*spu
)
1288 /* Restore, Step 35:
1289 * Copy the CSA.PU_MB data into the LSCSA.
1291 csa
->lscsa
->ppu_mb
.slot
[0] = csa
->prob
.pu_mb_R
;
1294 static inline void setup_ppuint_mb(struct spu_state
*csa
, struct spu
*spu
)
1296 /* Restore, Step 36:
1297 * Copy the CSA.PUINT_MB data into the LSCSA.
1299 csa
->lscsa
->ppuint_mb
.slot
[0] = csa
->priv2
.puint_mb_R
;
1302 static inline int check_restore_status(struct spu_state
*csa
, struct spu
*spu
)
1304 struct spu_problem __iomem
*prob
= spu
->problem
;
1307 /* Restore, Step 40:
1308 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
1309 * context restore succeeded, otherwise context restore
1312 complete
= ((SPU_RESTORE_COMPLETE
<< SPU_STOP_STATUS_SHIFT
) |
1313 SPU_STATUS_STOPPED_BY_STOP
);
1314 return (in_be32(&prob
->spu_status_R
) != complete
) ? 1 : 0;
1317 static inline void restore_spu_privcntl(struct spu_state
*csa
, struct spu
*spu
)
1319 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1321 /* Restore, Step 41:
1322 * Restore SPU_PrivCntl from the CSA.
1324 out_be64(&priv2
->spu_privcntl_RW
, csa
->priv2
.spu_privcntl_RW
);
1328 static inline void restore_status_part1(struct spu_state
*csa
, struct spu
*spu
)
1330 struct spu_problem __iomem
*prob
= spu
->problem
;
1333 /* Restore, Step 42:
1334 * If any CSA.SPU_Status[I,S,H,P]=1, then
1335 * restore the error or single step state.
1337 mask
= SPU_STATUS_INVALID_INSTR
|
1338 SPU_STATUS_SINGLE_STEP
|
1339 SPU_STATUS_STOPPED_BY_HALT
| SPU_STATUS_STOPPED_BY_STOP
;
1340 if (csa
->prob
.spu_status_R
& mask
) {
1341 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_RUNNABLE
);
1343 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1344 SPU_STATUS_RUNNING
);
1348 static inline void restore_status_part2(struct spu_state
*csa
, struct spu
*spu
)
1350 struct spu_problem __iomem
*prob
= spu
->problem
;
1353 /* Restore, Step 43:
1354 * If all CSA.SPU_Status[I,S,H,P,R]=0 then write
1355 * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
1356 * then write '00' to SPU_RunCntl[R0R1] and wait
1357 * for SPU_Status[R]=0.
1359 mask
= SPU_STATUS_INVALID_INSTR
|
1360 SPU_STATUS_SINGLE_STEP
|
1361 SPU_STATUS_STOPPED_BY_HALT
|
1362 SPU_STATUS_STOPPED_BY_STOP
| SPU_STATUS_RUNNING
;
1363 if (!(csa
->prob
.spu_status_R
& mask
)) {
1364 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_RUNNABLE
);
1366 POLL_WHILE_FALSE(in_be32(&prob
->spu_status_R
) &
1367 SPU_STATUS_RUNNING
);
1368 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_STOP
);
1370 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) &
1371 SPU_STATUS_RUNNING
);
1375 static inline void restore_ls_16kb(struct spu_state
*csa
, struct spu
*spu
)
1377 unsigned long addr
= (unsigned long)&csa
->lscsa
->ls
[0];
1378 unsigned int ls_offset
= 0x0;
1379 unsigned int size
= 16384;
1380 unsigned int tag
= 0;
1381 unsigned int rclass
= 0;
1382 unsigned int cmd
= MFC_GET_CMD
;
1384 /* Restore, Step 44:
1385 * Issue a DMA command to restore the first
1386 * 16kb of local storage from CSA.
1388 send_mfc_dma(spu
, addr
, ls_offset
, size
, tag
, rclass
, cmd
);
1391 static inline void suspend_mfc(struct spu_state
*csa
, struct spu
*spu
)
1393 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1395 /* Restore, Step 47.
1396 * Write MFC_Cntl[Sc,Sm]='1','0' to suspend
1399 out_be64(&priv2
->mfc_control_RW
, MFC_CNTL_SUSPEND_DMA_QUEUE
);
1403 static inline void clear_interrupts(struct spu_state
*csa
, struct spu
*spu
)
1405 /* Restore, Step 49:
1406 * Write INT_MASK_class0 with value of 0.
1407 * Write INT_MASK_class1 with value of 0.
1408 * Write INT_MASK_class2 with value of 0.
1409 * Write INT_STAT_class0 with value of -1.
1410 * Write INT_STAT_class1 with value of -1.
1411 * Write INT_STAT_class2 with value of -1.
1413 spin_lock_irq(&spu
->register_lock
);
1414 spu_int_mask_set(spu
, 0, 0ul);
1415 spu_int_mask_set(spu
, 1, 0ul);
1416 spu_int_mask_set(spu
, 2, 0ul);
1417 spu_int_stat_clear(spu
, 0, CLASS0_INTR_MASK
);
1418 spu_int_stat_clear(spu
, 1, CLASS1_INTR_MASK
);
1419 spu_int_stat_clear(spu
, 2, CLASS2_INTR_MASK
);
1420 spin_unlock_irq(&spu
->register_lock
);
1423 static inline void restore_mfc_queues(struct spu_state
*csa
, struct spu
*spu
)
1425 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1428 /* Restore, Step 50:
1429 * If MFC_Cntl[Se]!=0 then restore
1430 * MFC command queues.
1432 if ((csa
->priv2
.mfc_control_RW
& MFC_CNTL_DMA_QUEUES_EMPTY_MASK
) == 0) {
1433 for (i
= 0; i
< 8; i
++) {
1434 out_be64(&priv2
->puq
[i
].mfc_cq_data0_RW
,
1435 csa
->priv2
.puq
[i
].mfc_cq_data0_RW
);
1436 out_be64(&priv2
->puq
[i
].mfc_cq_data1_RW
,
1437 csa
->priv2
.puq
[i
].mfc_cq_data1_RW
);
1438 out_be64(&priv2
->puq
[i
].mfc_cq_data2_RW
,
1439 csa
->priv2
.puq
[i
].mfc_cq_data2_RW
);
1440 out_be64(&priv2
->puq
[i
].mfc_cq_data3_RW
,
1441 csa
->priv2
.puq
[i
].mfc_cq_data3_RW
);
1443 for (i
= 0; i
< 16; i
++) {
1444 out_be64(&priv2
->spuq
[i
].mfc_cq_data0_RW
,
1445 csa
->priv2
.spuq
[i
].mfc_cq_data0_RW
);
1446 out_be64(&priv2
->spuq
[i
].mfc_cq_data1_RW
,
1447 csa
->priv2
.spuq
[i
].mfc_cq_data1_RW
);
1448 out_be64(&priv2
->spuq
[i
].mfc_cq_data2_RW
,
1449 csa
->priv2
.spuq
[i
].mfc_cq_data2_RW
);
1450 out_be64(&priv2
->spuq
[i
].mfc_cq_data3_RW
,
1451 csa
->priv2
.spuq
[i
].mfc_cq_data3_RW
);
1457 static inline void restore_ppu_querymask(struct spu_state
*csa
, struct spu
*spu
)
1459 struct spu_problem __iomem
*prob
= spu
->problem
;
1461 /* Restore, Step 51:
1462 * Restore the PPU_QueryMask register from CSA.
1464 out_be32(&prob
->dma_querymask_RW
, csa
->prob
.dma_querymask_RW
);
1468 static inline void restore_ppu_querytype(struct spu_state
*csa
, struct spu
*spu
)
1470 struct spu_problem __iomem
*prob
= spu
->problem
;
1472 /* Restore, Step 52:
1473 * Restore the PPU_QueryType register from CSA.
1475 out_be32(&prob
->dma_querytype_RW
, csa
->prob
.dma_querytype_RW
);
1479 static inline void restore_mfc_csr_tsq(struct spu_state
*csa
, struct spu
*spu
)
1481 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1483 /* Restore, Step 53:
1484 * Restore the MFC_CSR_TSQ register from CSA.
1486 out_be64(&priv2
->spu_tag_status_query_RW
,
1487 csa
->priv2
.spu_tag_status_query_RW
);
1491 static inline void restore_mfc_csr_cmd(struct spu_state
*csa
, struct spu
*spu
)
1493 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1495 /* Restore, Step 54:
1496 * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
1497 * registers from CSA.
1499 out_be64(&priv2
->spu_cmd_buf1_RW
, csa
->priv2
.spu_cmd_buf1_RW
);
1500 out_be64(&priv2
->spu_cmd_buf2_RW
, csa
->priv2
.spu_cmd_buf2_RW
);
1504 static inline void restore_mfc_csr_ato(struct spu_state
*csa
, struct spu
*spu
)
1506 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1508 /* Restore, Step 55:
1509 * Restore the MFC_CSR_ATO register from CSA.
1511 out_be64(&priv2
->spu_atomic_status_RW
, csa
->priv2
.spu_atomic_status_RW
);
1514 static inline void restore_mfc_tclass_id(struct spu_state
*csa
, struct spu
*spu
)
1516 /* Restore, Step 56:
1517 * Restore the MFC_TCLASS_ID register from CSA.
1519 spu_mfc_tclass_id_set(spu
, csa
->priv1
.mfc_tclass_id_RW
);
1523 static inline void set_llr_event(struct spu_state
*csa
, struct spu
*spu
)
1525 u64 ch0_cnt
, ch0_data
;
1528 /* Restore, Step 57:
1529 * Set the Lock Line Reservation Lost Event by:
1530 * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
1531 * 2. If CSA.SPU_Channel_0_Count=0 and
1532 * CSA.SPU_Wr_Event_Mask[Lr]=1 and
1533 * CSA.SPU_Event_Status[Lr]=0 then set
1534 * CSA.SPU_Event_Status_Count=1.
1536 ch0_cnt
= csa
->spu_chnlcnt_RW
[0];
1537 ch0_data
= csa
->spu_chnldata_RW
[0];
1538 ch1_data
= csa
->spu_chnldata_RW
[1];
1539 csa
->spu_chnldata_RW
[0] |= MFC_LLR_LOST_EVENT
;
1540 if ((ch0_cnt
== 0) && !(ch0_data
& MFC_LLR_LOST_EVENT
) &&
1541 (ch1_data
& MFC_LLR_LOST_EVENT
)) {
1542 csa
->spu_chnlcnt_RW
[0] = 1;
1546 static inline void restore_decr_wrapped(struct spu_state
*csa
, struct spu
*spu
)
1548 /* Restore, Step 58:
1549 * If the status of the CSA software decrementer
1550 * "wrapped" flag is set, OR in a '1' to
1551 * CSA.SPU_Event_Status[Tm].
1553 if (!(csa
->lscsa
->decr_status
.slot
[0] & SPU_DECR_STATUS_WRAPPED
))
1556 if ((csa
->spu_chnlcnt_RW
[0] == 0) &&
1557 (csa
->spu_chnldata_RW
[1] & 0x20) &&
1558 !(csa
->spu_chnldata_RW
[0] & 0x20))
1559 csa
->spu_chnlcnt_RW
[0] = 1;
1561 csa
->spu_chnldata_RW
[0] |= 0x20;
1564 static inline void restore_ch_part1(struct spu_state
*csa
, struct spu
*spu
)
1566 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1567 u64 idx
, ch_indices
[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1570 /* Restore, Step 59:
1571 * Restore the following CH: [0,3,4,24,25,27]
1573 for (i
= 0; i
< ARRAY_SIZE(ch_indices
); i
++) {
1574 idx
= ch_indices
[i
];
1575 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
1577 out_be64(&priv2
->spu_chnldata_RW
, csa
->spu_chnldata_RW
[idx
]);
1578 out_be64(&priv2
->spu_chnlcnt_RW
, csa
->spu_chnlcnt_RW
[idx
]);
1583 static inline void restore_ch_part2(struct spu_state
*csa
, struct spu
*spu
)
1585 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1586 u64 ch_indices
[3] = { 9UL, 21UL, 23UL };
1587 u64 ch_counts
[3] = { 1UL, 16UL, 1UL };
1591 /* Restore, Step 60:
1592 * Restore the following CH: [9,21,23].
1595 ch_counts
[1] = csa
->spu_chnlcnt_RW
[21];
1597 for (i
= 0; i
< 3; i
++) {
1598 idx
= ch_indices
[i
];
1599 out_be64(&priv2
->spu_chnlcntptr_RW
, idx
);
1601 out_be64(&priv2
->spu_chnlcnt_RW
, ch_counts
[i
]);
1606 static inline void restore_spu_lslr(struct spu_state
*csa
, struct spu
*spu
)
1608 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1610 /* Restore, Step 61:
1611 * Restore the SPU_LSLR register from CSA.
1613 out_be64(&priv2
->spu_lslr_RW
, csa
->priv2
.spu_lslr_RW
);
1617 static inline void restore_spu_cfg(struct spu_state
*csa
, struct spu
*spu
)
1619 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1621 /* Restore, Step 62:
1622 * Restore the SPU_Cfg register from CSA.
1624 out_be64(&priv2
->spu_cfg_RW
, csa
->priv2
.spu_cfg_RW
);
1628 static inline void restore_pm_trace(struct spu_state
*csa
, struct spu
*spu
)
1630 /* Restore, Step 63:
1631 * Restore PM_Trace_Tag_Wait_Mask from CSA.
1632 * Not performed by this implementation.
1636 static inline void restore_spu_npc(struct spu_state
*csa
, struct spu
*spu
)
1638 struct spu_problem __iomem
*prob
= spu
->problem
;
1640 /* Restore, Step 64:
1641 * Restore SPU_NPC from CSA.
1643 out_be32(&prob
->spu_npc_RW
, csa
->prob
.spu_npc_RW
);
1647 static inline void restore_spu_mb(struct spu_state
*csa
, struct spu
*spu
)
1649 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1652 /* Restore, Step 65:
1653 * Restore MFC_RdSPU_MB from CSA.
1655 out_be64(&priv2
->spu_chnlcntptr_RW
, 29UL);
1657 out_be64(&priv2
->spu_chnlcnt_RW
, csa
->spu_chnlcnt_RW
[29]);
1658 for (i
= 0; i
< 4; i
++) {
1659 out_be64(&priv2
->spu_chnldata_RW
, csa
->spu_mailbox_data
[i
]);
1664 static inline void check_ppu_mb_stat(struct spu_state
*csa
, struct spu
*spu
)
1666 struct spu_problem __iomem
*prob
= spu
->problem
;
1669 /* Restore, Step 66:
1670 * If CSA.MB_Stat[P]=0 (mailbox empty) then
1671 * read from the PPU_MB register.
1673 if ((csa
->prob
.mb_stat_R
& 0xFF) == 0) {
1674 dummy
= in_be32(&prob
->pu_mb_R
);
1679 static inline void check_ppuint_mb_stat(struct spu_state
*csa
, struct spu
*spu
)
1681 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1684 /* Restore, Step 66:
1685 * If CSA.MB_Stat[I]=0 (mailbox empty) then
1686 * read from the PPUINT_MB register.
1688 if ((csa
->prob
.mb_stat_R
& 0xFF0000) == 0) {
1689 dummy
= in_be64(&priv2
->puint_mb_R
);
1691 spu_int_stat_clear(spu
, 2, CLASS2_ENABLE_MAILBOX_INTR
);
1696 static inline void restore_mfc_sr1(struct spu_state
*csa
, struct spu
*spu
)
1698 /* Restore, Step 69:
1699 * Restore the MFC_SR1 register from CSA.
1701 spu_mfc_sr1_set(spu
, csa
->priv1
.mfc_sr1_RW
);
1705 static inline void set_int_route(struct spu_state
*csa
, struct spu
*spu
)
1707 struct spu_context
*ctx
= spu
->ctx
;
1709 spu_cpu_affinity_set(spu
, ctx
->last_ran
);
1712 static inline void restore_other_spu_access(struct spu_state
*csa
,
1715 /* Restore, Step 70:
1716 * Restore other SPU mappings to this SPU. TBD.
1720 static inline void restore_spu_runcntl(struct spu_state
*csa
, struct spu
*spu
)
1722 struct spu_problem __iomem
*prob
= spu
->problem
;
1724 /* Restore, Step 71:
1725 * If CSA.SPU_Status[R]=1 then write
1726 * SPU_RunCntl[R0R1]='01'.
1728 if (csa
->prob
.spu_status_R
& SPU_STATUS_RUNNING
) {
1729 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_RUNNABLE
);
1734 static inline void restore_mfc_cntl(struct spu_state
*csa
, struct spu
*spu
)
1736 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1738 /* Restore, Step 72:
1739 * Restore the MFC_CNTL register for the CSA.
1741 out_be64(&priv2
->mfc_control_RW
, csa
->priv2
.mfc_control_RW
);
1745 * The queue is put back into the same state that was evident prior to
1746 * the context switch. The suspend flag is added to the saved state in
1747 * the csa, if the operational state was suspending or suspended. In
1748 * this case, the code that suspended the mfc is responsible for
1749 * continuing it. Note that SPE faults do not change the operational
1754 static inline void enable_user_access(struct spu_state
*csa
, struct spu
*spu
)
1756 /* Restore, Step 73:
1757 * Enable user-space access (if provided) to this
1758 * SPU by mapping the virtual pages assigned to
1759 * the SPU memory-mapped I/O (MMIO) for problem
1764 static inline void reset_switch_active(struct spu_state
*csa
, struct spu
*spu
)
1766 /* Restore, Step 74:
1767 * Reset the "context switch active" flag.
1768 * Not performed by this implementation.
1772 static inline void reenable_interrupts(struct spu_state
*csa
, struct spu
*spu
)
1774 /* Restore, Step 75:
1775 * Re-enable SPU interrupts.
1777 spin_lock_irq(&spu
->register_lock
);
1778 spu_int_mask_set(spu
, 0, csa
->priv1
.int_mask_class0_RW
);
1779 spu_int_mask_set(spu
, 1, csa
->priv1
.int_mask_class1_RW
);
1780 spu_int_mask_set(spu
, 2, csa
->priv1
.int_mask_class2_RW
);
1781 spin_unlock_irq(&spu
->register_lock
);
1784 static int quiece_spu(struct spu_state
*prev
, struct spu
*spu
)
1787 * Combined steps 2-18 of SPU context save sequence, which
1788 * quiesce the SPU state (disable SPU execution, MFC command
1789 * queues, decrementer, SPU interrupts, etc.).
1791 * Returns 0 on success.
1792 * 2 if failed step 2.
1793 * 6 if failed step 6.
1796 if (check_spu_isolate(prev
, spu
)) { /* Step 2. */
1799 disable_interrupts(prev
, spu
); /* Step 3. */
1800 set_watchdog_timer(prev
, spu
); /* Step 4. */
1801 inhibit_user_access(prev
, spu
); /* Step 5. */
1802 if (check_spu_isolate(prev
, spu
)) { /* Step 6. */
1805 set_switch_pending(prev
, spu
); /* Step 7. */
1806 save_mfc_cntl(prev
, spu
); /* Step 8. */
1807 save_spu_runcntl(prev
, spu
); /* Step 9. */
1808 save_mfc_sr1(prev
, spu
); /* Step 10. */
1809 save_spu_status(prev
, spu
); /* Step 11. */
1810 save_mfc_stopped_status(prev
, spu
); /* Step 12. */
1811 halt_mfc_decr(prev
, spu
); /* Step 13. */
1812 save_timebase(prev
, spu
); /* Step 14. */
1813 remove_other_spu_access(prev
, spu
); /* Step 15. */
1814 do_mfc_mssync(prev
, spu
); /* Step 16. */
1815 issue_mfc_tlbie(prev
, spu
); /* Step 17. */
1816 handle_pending_interrupts(prev
, spu
); /* Step 18. */
1821 static void save_csa(struct spu_state
*prev
, struct spu
*spu
)
1824 * Combine steps 19-44 of SPU context save sequence, which
1825 * save regions of the privileged & problem state areas.
1828 save_mfc_queues(prev
, spu
); /* Step 19. */
1829 save_ppu_querymask(prev
, spu
); /* Step 20. */
1830 save_ppu_querytype(prev
, spu
); /* Step 21. */
1831 save_ppu_tagstatus(prev
, spu
); /* NEW. */
1832 save_mfc_csr_tsq(prev
, spu
); /* Step 22. */
1833 save_mfc_csr_cmd(prev
, spu
); /* Step 23. */
1834 save_mfc_csr_ato(prev
, spu
); /* Step 24. */
1835 save_mfc_tclass_id(prev
, spu
); /* Step 25. */
1836 set_mfc_tclass_id(prev
, spu
); /* Step 26. */
1837 save_mfc_cmd(prev
, spu
); /* Step 26a - moved from 44. */
1838 purge_mfc_queue(prev
, spu
); /* Step 27. */
1839 wait_purge_complete(prev
, spu
); /* Step 28. */
1840 setup_mfc_sr1(prev
, spu
); /* Step 30. */
1841 save_spu_npc(prev
, spu
); /* Step 31. */
1842 save_spu_privcntl(prev
, spu
); /* Step 32. */
1843 reset_spu_privcntl(prev
, spu
); /* Step 33. */
1844 save_spu_lslr(prev
, spu
); /* Step 34. */
1845 reset_spu_lslr(prev
, spu
); /* Step 35. */
1846 save_spu_cfg(prev
, spu
); /* Step 36. */
1847 save_pm_trace(prev
, spu
); /* Step 37. */
1848 save_mfc_rag(prev
, spu
); /* Step 38. */
1849 save_ppu_mb_stat(prev
, spu
); /* Step 39. */
1850 save_ppu_mb(prev
, spu
); /* Step 40. */
1851 save_ppuint_mb(prev
, spu
); /* Step 41. */
1852 save_ch_part1(prev
, spu
); /* Step 42. */
1853 save_spu_mb(prev
, spu
); /* Step 43. */
1854 reset_ch(prev
, spu
); /* Step 45. */
1857 static void save_lscsa(struct spu_state
*prev
, struct spu
*spu
)
1860 * Perform steps 46-57 of SPU context save sequence,
1861 * which save regions of the local store and register
1865 resume_mfc_queue(prev
, spu
); /* Step 46. */
1867 setup_mfc_slbs(prev
, spu
, spu_save_code
, sizeof(spu_save_code
));
1868 set_switch_active(prev
, spu
); /* Step 48. */
1869 enable_interrupts(prev
, spu
); /* Step 49. */
1870 save_ls_16kb(prev
, spu
); /* Step 50. */
1871 set_spu_npc(prev
, spu
); /* Step 51. */
1872 set_signot1(prev
, spu
); /* Step 52. */
1873 set_signot2(prev
, spu
); /* Step 53. */
1874 send_save_code(prev
, spu
); /* Step 54. */
1875 set_ppu_querymask(prev
, spu
); /* Step 55. */
1876 wait_tag_complete(prev
, spu
); /* Step 56. */
1877 wait_spu_stopped(prev
, spu
); /* Step 57. */
1880 static void force_spu_isolate_exit(struct spu
*spu
)
1882 struct spu_problem __iomem
*prob
= spu
->problem
;
1883 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
1885 /* Stop SPE execution and wait for completion. */
1886 out_be32(&prob
->spu_runcntl_RW
, SPU_RUNCNTL_STOP
);
1888 POLL_WHILE_TRUE(in_be32(&prob
->spu_status_R
) & SPU_STATUS_RUNNING
);
1890 /* Restart SPE master runcntl. */
1891 spu_mfc_sr1_set(spu
, MFC_STATE1_MASTER_RUN_CONTROL_MASK
);
1894 /* Initiate isolate exit request and wait for completion. */
1895 out_be64(&priv2
->spu_privcntl_RW
, 4LL);
1897 out_be32(&prob
->spu_runcntl_RW
, 2);
1899 POLL_WHILE_FALSE((in_be32(&prob
->spu_status_R
)
1900 & SPU_STATUS_STOPPED_BY_STOP
));
1902 /* Reset load request to normal. */
1903 out_be64(&priv2
->spu_privcntl_RW
, SPU_PRIVCNT_LOAD_REQUEST_NORMAL
);
1909 * Check SPU run-control state and force isolated
1910 * exit function as necessary.
1912 static void stop_spu_isolate(struct spu
*spu
)
1914 struct spu_problem __iomem
*prob
= spu
->problem
;
1916 if (in_be32(&prob
->spu_status_R
) & SPU_STATUS_ISOLATED_STATE
) {
1917 /* The SPU is in isolated state; the only way
1918 * to get it out is to perform an isolated
1919 * exit (clean) operation.
1921 force_spu_isolate_exit(spu
);
1925 static void harvest(struct spu_state
*prev
, struct spu
*spu
)
1928 * Perform steps 2-25 of SPU context restore sequence,
1929 * which resets an SPU either after a failed save, or
1930 * when using SPU for first time.
1933 disable_interrupts(prev
, spu
); /* Step 2. */
1934 inhibit_user_access(prev
, spu
); /* Step 3. */
1935 terminate_spu_app(prev
, spu
); /* Step 4. */
1936 set_switch_pending(prev
, spu
); /* Step 5. */
1937 stop_spu_isolate(spu
); /* NEW. */
1938 remove_other_spu_access(prev
, spu
); /* Step 6. */
1939 suspend_mfc_and_halt_decr(prev
, spu
); /* Step 7. */
1940 wait_suspend_mfc_complete(prev
, spu
); /* Step 8. */
1941 if (!suspend_spe(prev
, spu
)) /* Step 9. */
1942 clear_spu_status(prev
, spu
); /* Step 10. */
1943 do_mfc_mssync(prev
, spu
); /* Step 11. */
1944 issue_mfc_tlbie(prev
, spu
); /* Step 12. */
1945 handle_pending_interrupts(prev
, spu
); /* Step 13. */
1946 purge_mfc_queue(prev
, spu
); /* Step 14. */
1947 wait_purge_complete(prev
, spu
); /* Step 15. */
1948 reset_spu_privcntl(prev
, spu
); /* Step 16. */
1949 reset_spu_lslr(prev
, spu
); /* Step 17. */
1950 setup_mfc_sr1(prev
, spu
); /* Step 18. */
1951 spu_invalidate_slbs(spu
); /* Step 19. */
1952 reset_ch_part1(prev
, spu
); /* Step 20. */
1953 reset_ch_part2(prev
, spu
); /* Step 21. */
1954 enable_interrupts(prev
, spu
); /* Step 22. */
1955 set_switch_active(prev
, spu
); /* Step 23. */
1956 set_mfc_tclass_id(prev
, spu
); /* Step 24. */
1957 resume_mfc_queue(prev
, spu
); /* Step 25. */
1960 static void restore_lscsa(struct spu_state
*next
, struct spu
*spu
)
1963 * Perform steps 26-40 of SPU context restore sequence,
1964 * which restores regions of the local store and register
1968 set_watchdog_timer(next
, spu
); /* Step 26. */
1969 setup_spu_status_part1(next
, spu
); /* Step 27. */
1970 setup_spu_status_part2(next
, spu
); /* Step 28. */
1971 restore_mfc_rag(next
, spu
); /* Step 29. */
1973 setup_mfc_slbs(next
, spu
, spu_restore_code
, sizeof(spu_restore_code
));
1974 set_spu_npc(next
, spu
); /* Step 31. */
1975 set_signot1(next
, spu
); /* Step 32. */
1976 set_signot2(next
, spu
); /* Step 33. */
1977 setup_decr(next
, spu
); /* Step 34. */
1978 setup_ppu_mb(next
, spu
); /* Step 35. */
1979 setup_ppuint_mb(next
, spu
); /* Step 36. */
1980 send_restore_code(next
, spu
); /* Step 37. */
1981 set_ppu_querymask(next
, spu
); /* Step 38. */
1982 wait_tag_complete(next
, spu
); /* Step 39. */
1983 wait_spu_stopped(next
, spu
); /* Step 40. */
1986 static void restore_csa(struct spu_state
*next
, struct spu
*spu
)
1989 * Combine steps 41-76 of SPU context restore sequence, which
1990 * restore regions of the privileged & problem state areas.
1993 restore_spu_privcntl(next
, spu
); /* Step 41. */
1994 restore_status_part1(next
, spu
); /* Step 42. */
1995 restore_status_part2(next
, spu
); /* Step 43. */
1996 restore_ls_16kb(next
, spu
); /* Step 44. */
1997 wait_tag_complete(next
, spu
); /* Step 45. */
1998 suspend_mfc(next
, spu
); /* Step 46. */
1999 wait_suspend_mfc_complete(next
, spu
); /* Step 47. */
2000 issue_mfc_tlbie(next
, spu
); /* Step 48. */
2001 clear_interrupts(next
, spu
); /* Step 49. */
2002 restore_mfc_queues(next
, spu
); /* Step 50. */
2003 restore_ppu_querymask(next
, spu
); /* Step 51. */
2004 restore_ppu_querytype(next
, spu
); /* Step 52. */
2005 restore_mfc_csr_tsq(next
, spu
); /* Step 53. */
2006 restore_mfc_csr_cmd(next
, spu
); /* Step 54. */
2007 restore_mfc_csr_ato(next
, spu
); /* Step 55. */
2008 restore_mfc_tclass_id(next
, spu
); /* Step 56. */
2009 set_llr_event(next
, spu
); /* Step 57. */
2010 restore_decr_wrapped(next
, spu
); /* Step 58. */
2011 restore_ch_part1(next
, spu
); /* Step 59. */
2012 restore_ch_part2(next
, spu
); /* Step 60. */
2013 restore_spu_lslr(next
, spu
); /* Step 61. */
2014 restore_spu_cfg(next
, spu
); /* Step 62. */
2015 restore_pm_trace(next
, spu
); /* Step 63. */
2016 restore_spu_npc(next
, spu
); /* Step 64. */
2017 restore_spu_mb(next
, spu
); /* Step 65. */
2018 check_ppu_mb_stat(next
, spu
); /* Step 66. */
2019 check_ppuint_mb_stat(next
, spu
); /* Step 67. */
2020 spu_invalidate_slbs(spu
); /* Modified Step 68. */
2021 restore_mfc_sr1(next
, spu
); /* Step 69. */
2022 set_int_route(next
, spu
); /* NEW */
2023 restore_other_spu_access(next
, spu
); /* Step 70. */
2024 restore_spu_runcntl(next
, spu
); /* Step 71. */
2025 restore_mfc_cntl(next
, spu
); /* Step 72. */
2026 enable_user_access(next
, spu
); /* Step 73. */
2027 reset_switch_active(next
, spu
); /* Step 74. */
2028 reenable_interrupts(next
, spu
); /* Step 75. */
2031 static int __do_spu_save(struct spu_state
*prev
, struct spu
*spu
)
2036 * SPU context save can be broken into three phases:
2038 * (a) quiesce [steps 2-16].
2039 * (b) save of CSA, performed by PPE [steps 17-42]
2040 * (c) save of LSCSA, mostly performed by SPU [steps 43-52].
2042 * Returns 0 on success.
2043 * 2,6 if failed to quiece SPU
2044 * 53 if SPU-side of save failed.
2047 rc
= quiece_spu(prev
, spu
); /* Steps 2-16. */
2058 save_csa(prev
, spu
); /* Steps 17-43. */
2059 save_lscsa(prev
, spu
); /* Steps 44-53. */
2060 return check_save_status(prev
, spu
); /* Step 54. */
2063 static int __do_spu_restore(struct spu_state
*next
, struct spu
*spu
)
2068 * SPU context restore can be broken into three phases:
2070 * (a) harvest (or reset) SPU [steps 2-24].
2071 * (b) restore LSCSA [steps 25-40], mostly performed by SPU.
2072 * (c) restore CSA [steps 41-76], performed by PPE.
2074 * The 'harvest' step is not performed here, but rather
2078 restore_lscsa(next
, spu
); /* Steps 24-39. */
2079 rc
= check_restore_status(next
, spu
); /* Step 40. */
2082 /* Failed. Return now. */
2086 /* Fall through to next step. */
2089 restore_csa(next
, spu
);
2095 * spu_save - SPU context save, with locking.
2096 * @prev: pointer to SPU context save area, to be saved.
2097 * @spu: pointer to SPU iomem structure.
2099 * Acquire locks, perform the save operation then return.
2101 int spu_save(struct spu_state
*prev
, struct spu
*spu
)
2105 acquire_spu_lock(spu
); /* Step 1. */
2106 rc
= __do_spu_save(prev
, spu
); /* Steps 2-53. */
2107 release_spu_lock(spu
);
2108 if (rc
!= 0 && rc
!= 2 && rc
!= 6) {
2109 panic("%s failed on SPU[%d], rc=%d.\n",
2110 __func__
, spu
->number
, rc
);
2114 EXPORT_SYMBOL_GPL(spu_save
);
2117 * spu_restore - SPU context restore, with harvest and locking.
2118 * @new: pointer to SPU context save area, to be restored.
2119 * @spu: pointer to SPU iomem structure.
2121 * Perform harvest + restore, as we may not be coming
2122 * from a previous successful save operation, and the
2123 * hardware state is unknown.
2125 int spu_restore(struct spu_state
*new, struct spu
*spu
)
2129 acquire_spu_lock(spu
);
2131 spu
->slb_replace
= 0;
2132 rc
= __do_spu_restore(new, spu
);
2133 release_spu_lock(spu
);
2135 panic("%s failed on SPU[%d] rc=%d.\n",
2136 __func__
, spu
->number
, rc
);
2140 EXPORT_SYMBOL_GPL(spu_restore
);
2142 static void init_prob(struct spu_state
*csa
)
2144 csa
->spu_chnlcnt_RW
[9] = 1;
2145 csa
->spu_chnlcnt_RW
[21] = 16;
2146 csa
->spu_chnlcnt_RW
[23] = 1;
2147 csa
->spu_chnlcnt_RW
[28] = 1;
2148 csa
->spu_chnlcnt_RW
[30] = 1;
2149 csa
->prob
.spu_runcntl_RW
= SPU_RUNCNTL_STOP
;
2150 csa
->prob
.mb_stat_R
= 0x000400;
2153 static void init_priv1(struct spu_state
*csa
)
2155 /* Enable decode, relocate, tlbie response, master runcntl. */
2156 csa
->priv1
.mfc_sr1_RW
= MFC_STATE1_LOCAL_STORAGE_DECODE_MASK
|
2157 MFC_STATE1_MASTER_RUN_CONTROL_MASK
|
2158 MFC_STATE1_PROBLEM_STATE_MASK
|
2159 MFC_STATE1_RELOCATE_MASK
| MFC_STATE1_BUS_TLBIE_MASK
;
2161 /* Enable OS-specific set of interrupts. */
2162 csa
->priv1
.int_mask_class0_RW
= CLASS0_ENABLE_DMA_ALIGNMENT_INTR
|
2163 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR
|
2164 CLASS0_ENABLE_SPU_ERROR_INTR
;
2165 csa
->priv1
.int_mask_class1_RW
= CLASS1_ENABLE_SEGMENT_FAULT_INTR
|
2166 CLASS1_ENABLE_STORAGE_FAULT_INTR
;
2167 csa
->priv1
.int_mask_class2_RW
= CLASS2_ENABLE_SPU_STOP_INTR
|
2168 CLASS2_ENABLE_SPU_HALT_INTR
|
2169 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR
;
2172 static void init_priv2(struct spu_state
*csa
)
2174 csa
->priv2
.spu_lslr_RW
= LS_ADDR_MASK
;
2175 csa
->priv2
.mfc_control_RW
= MFC_CNTL_RESUME_DMA_QUEUE
|
2176 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION
|
2177 MFC_CNTL_DMA_QUEUES_EMPTY_MASK
;
2181 * spu_alloc_csa - allocate and initialize an SPU context save area.
2183 * Allocate and initialize the contents of an SPU context save area.
2184 * This includes enabling address translation, interrupt masks, etc.,
2185 * as appropriate for the given OS environment.
2187 * Note that storage for the 'lscsa' is allocated separately,
2188 * as it is by far the largest of the context save regions,
2189 * and may need to be pinned or otherwise specially aligned.
2191 int spu_init_csa(struct spu_state
*csa
)
2197 memset(csa
, 0, sizeof(struct spu_state
));
2199 rc
= spu_alloc_lscsa(csa
);
2203 spin_lock_init(&csa
->register_lock
);
2212 void spu_fini_csa(struct spu_state
*csa
)
2214 spu_free_lscsa(csa
);