35 #include <linux/export.h>
36 #include <linux/errno.h>
38 #include <linux/sched.h>
39 #include <linux/kernel.h>
43 #include <linux/stddef.h>
50 #include <asm/mmu_context.h>
54 #include "spu_save_dump.h"
55 #include "spu_restore_dump.h"
58 #define POLL_WHILE_TRUE(_c) { \
63 #define RELAX_SPIN_COUNT 1000
64 #define POLL_WHILE_TRUE(_c) { \
67 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
70 if (unlikely(_c)) yield(); \
76 #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
78 static inline void acquire_spu_lock(
struct spu *spu)
87 static inline void release_spu_lock(
struct spu *spu)
95 static inline int check_spu_isolate(
struct spu_state *csa,
struct spu *spu)
97 struct spu_problem
__iomem *prob = spu->problem;
106 isolate_state = SPU_STATUS_ISOLATED_STATE |
107 SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
108 return (
in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
111 static inline void disable_interrupts(
struct spu_state *csa,
struct spu *spu)
124 spin_lock_irq(&spu->register_lock);
126 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
127 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
128 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
130 spu_int_mask_set(spu, 0, 0ul);
131 spu_int_mask_set(spu, 1, 0ul);
132 spu_int_mask_set(spu, 2, 0ul);
134 spin_unlock_irq(&spu->register_lock);
141 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
142 clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
148 static inline void set_watchdog_timer(
struct spu_state *csa,
struct spu *spu)
161 static inline void inhibit_user_access(
struct spu_state *csa,
struct spu *spu)
172 static inline void set_switch_pending(
struct spu_state *csa,
struct spu *spu)
181 static inline void save_mfc_cntl(
struct spu_state *csa,
struct spu *spu)
183 struct spu_priv2
__iomem *priv2 = spu->priv2;
188 switch (in_be64(&priv2->mfc_control_RW) &
189 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
190 case MFC_CNTL_SUSPEND_IN_PROGRESS:
192 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
193 MFC_CNTL_SUSPEND_COMPLETE);
195 case MFC_CNTL_SUSPEND_COMPLETE:
197 csa->priv2.mfc_control_RW =
198 in_be64(&priv2->mfc_control_RW) |
199 MFC_CNTL_SUSPEND_DMA_QUEUE;
201 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
202 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
204 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
205 MFC_CNTL_SUSPEND_COMPLETE);
207 csa->priv2.mfc_control_RW =
208 in_be64(&priv2->mfc_control_RW) &
209 ~MFC_CNTL_SUSPEND_DMA_QUEUE &
210 ~MFC_CNTL_SUSPEND_MASK;
215 static inline void save_spu_runcntl(
struct spu_state *csa,
struct spu *spu)
217 struct spu_problem
__iomem *prob = spu->problem;
223 csa->prob.spu_runcntl_RW =
in_be32(&prob->spu_runcntl_RW);
226 static inline void save_mfc_sr1(
struct spu_state *csa,
struct spu *spu)
231 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
234 static inline void save_spu_status(
struct spu_state *csa,
struct spu *spu)
236 struct spu_problem
__iomem *prob = spu->problem;
241 if ((
in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
242 csa->prob.spu_status_R =
in_be32(&prob->spu_status_R);
246 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
251 SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
252 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
253 if ((
in_be32(&prob->spu_status_R) & stopped) == 0)
254 csa->prob.spu_status_R = SPU_STATUS_RUNNING;
256 csa->prob.spu_status_R =
in_be32(&prob->spu_status_R);
260 static inline void save_mfc_stopped_status(
struct spu_state *csa,
263 struct spu_priv2
__iomem *priv2 = spu->priv2;
264 const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
265 MFC_CNTL_DMA_QUEUES_EMPTY;
273 csa->priv2.mfc_control_RW &= ~mask;
274 csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) &
mask;
277 static inline void halt_mfc_decr(
struct spu_state *csa,
struct spu *spu)
279 struct spu_priv2
__iomem *priv2 = spu->priv2;
285 out_be64(&priv2->mfc_control_RW,
286 MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
290 static inline void save_timebase(
struct spu_state *csa,
struct spu *spu)
299 static inline void remove_other_spu_access(
struct spu_state *csa,
308 static inline void do_mfc_mssync(
struct spu_state *csa,
struct spu *spu)
310 struct spu_problem
__iomem *prob = spu->problem;
317 out_be64(&prob->spc_mssync_RW, 1
UL);
321 static inline void issue_mfc_tlbie(
struct spu_state *csa,
struct spu *spu)
329 spu_tlb_invalidate(spu);
333 static inline void handle_pending_interrupts(
struct spu_state *csa,
347 static inline void save_mfc_queues(
struct spu_state *csa,
struct spu *spu)
349 struct spu_priv2
__iomem *priv2 = spu->priv2;
356 if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
357 for (i = 0; i < 8; i++) {
358 csa->priv2.puq[
i].mfc_cq_data0_RW =
359 in_be64(&priv2->puq[i].mfc_cq_data0_RW);
360 csa->priv2.puq[
i].mfc_cq_data1_RW =
361 in_be64(&priv2->puq[i].mfc_cq_data1_RW);
362 csa->priv2.puq[
i].mfc_cq_data2_RW =
363 in_be64(&priv2->puq[i].mfc_cq_data2_RW);
364 csa->priv2.puq[
i].mfc_cq_data3_RW =
365 in_be64(&priv2->puq[i].mfc_cq_data3_RW);
367 for (i = 0; i < 16; i++) {
368 csa->priv2.spuq[
i].mfc_cq_data0_RW =
369 in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
370 csa->priv2.spuq[
i].mfc_cq_data1_RW =
371 in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
372 csa->priv2.spuq[
i].mfc_cq_data2_RW =
373 in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
374 csa->priv2.spuq[
i].mfc_cq_data3_RW =
375 in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
380 static inline void save_ppu_querymask(
struct spu_state *csa,
struct spu *spu)
382 struct spu_problem
__iomem *prob = spu->problem;
388 csa->prob.dma_querymask_RW =
in_be32(&prob->dma_querymask_RW);
391 static inline void save_ppu_querytype(
struct spu_state *csa,
struct spu *spu)
393 struct spu_problem
__iomem *prob = spu->problem;
399 csa->prob.dma_querytype_RW =
in_be32(&prob->dma_querytype_RW);
402 static inline void save_ppu_tagstatus(
struct spu_state *csa,
struct spu *spu)
404 struct spu_problem
__iomem *prob = spu->problem;
412 csa->prob.dma_tagstatus_R =
in_be32(&prob->dma_tagstatus_R);
415 static inline void save_mfc_csr_tsq(
struct spu_state *csa,
struct spu *spu)
417 struct spu_priv2
__iomem *priv2 = spu->priv2;
423 csa->priv2.spu_tag_status_query_RW =
424 in_be64(&priv2->spu_tag_status_query_RW);
427 static inline void save_mfc_csr_cmd(
struct spu_state *csa,
struct spu *spu)
429 struct spu_priv2
__iomem *priv2 = spu->priv2;
435 csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
436 csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
439 static inline void save_mfc_csr_ato(
struct spu_state *csa,
struct spu *spu)
441 struct spu_priv2
__iomem *priv2 = spu->priv2;
447 csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
450 static inline void save_mfc_tclass_id(
struct spu_state *csa,
struct spu *spu)
456 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
459 static inline void set_mfc_tclass_id(
struct spu_state *csa,
struct spu *spu)
466 spu_mfc_tclass_id_set(spu, 0x10000000);
470 static inline void purge_mfc_queue(
struct spu_state *csa,
struct spu *spu)
472 struct spu_priv2
__iomem *priv2 = spu->priv2;
478 out_be64(&priv2->mfc_control_RW,
479 MFC_CNTL_PURGE_DMA_REQUEST |
480 MFC_CNTL_SUSPEND_MASK);
484 static inline void wait_purge_complete(
struct spu_state *csa,
struct spu *spu)
486 struct spu_priv2
__iomem *priv2 = spu->priv2;
493 MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
494 MFC_CNTL_PURGE_DMA_COMPLETE);
497 static inline void setup_mfc_sr1(
struct spu_state *csa,
struct spu *spu)
510 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
511 MFC_STATE1_RELOCATE_MASK |
512 MFC_STATE1_BUS_TLBIE_MASK));
515 static inline void save_spu_npc(
struct spu_state *csa,
struct spu *spu)
517 struct spu_problem
__iomem *prob = spu->problem;
522 csa->prob.spu_npc_RW =
in_be32(&prob->spu_npc_RW);
525 static inline void save_spu_privcntl(
struct spu_state *csa,
struct spu *spu)
527 struct spu_priv2
__iomem *priv2 = spu->priv2;
532 csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
535 static inline void reset_spu_privcntl(
struct spu_state *csa,
struct spu *spu)
537 struct spu_priv2
__iomem *priv2 = spu->priv2;
543 out_be64(&priv2->spu_privcntl_RW, 0
UL);
547 static inline void save_spu_lslr(
struct spu_state *csa,
struct spu *spu)
549 struct spu_priv2
__iomem *priv2 = spu->priv2;
554 csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
557 static inline void reset_spu_lslr(
struct spu_state *csa,
struct spu *spu)
559 struct spu_priv2
__iomem *priv2 = spu->priv2;
565 out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
569 static inline void save_spu_cfg(
struct spu_state *csa,
struct spu *spu)
571 struct spu_priv2
__iomem *priv2 = spu->priv2;
576 csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
579 static inline void save_pm_trace(
struct spu_state *csa,
struct spu *spu)
587 static inline void save_mfc_rag(
struct spu_state *csa,
struct spu *spu)
593 csa->priv1.resource_allocation_groupID_RW =
594 spu_resource_allocation_groupID_get(spu);
595 csa->priv1.resource_allocation_enable_RW =
596 spu_resource_allocation_enable_get(spu);
599 static inline void save_ppu_mb_stat(
struct spu_state *csa,
struct spu *spu)
601 struct spu_problem
__iomem *prob = spu->problem;
606 csa->prob.mb_stat_R =
in_be32(&prob->mb_stat_R);
609 static inline void save_ppu_mb(
struct spu_state *csa,
struct spu *spu)
611 struct spu_problem
__iomem *prob = spu->problem;
616 csa->prob.pu_mb_R =
in_be32(&prob->pu_mb_R);
619 static inline void save_ppuint_mb(
struct spu_state *csa,
struct spu *spu)
621 struct spu_priv2
__iomem *priv2 = spu->priv2;
626 csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
629 static inline void save_ch_part1(
struct spu_state *csa,
struct spu *spu)
631 struct spu_priv2
__iomem *priv2 = spu->priv2;
639 out_be64(&priv2->spu_chnlcntptr_RW, 1);
640 csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
643 for (i = 0; i <
ARRAY_SIZE(ch_indices); i++) {
645 out_be64(&priv2->spu_chnlcntptr_RW, idx);
647 csa->spu_chnldata_RW[
idx] = in_be64(&priv2->spu_chnldata_RW);
648 csa->spu_chnlcnt_RW[
idx] = in_be64(&priv2->spu_chnlcnt_RW);
649 out_be64(&priv2->spu_chnldata_RW, 0
UL);
650 out_be64(&priv2->spu_chnlcnt_RW, 0
UL);
655 static inline void save_spu_mb(
struct spu_state *csa,
struct spu *spu)
657 struct spu_priv2
__iomem *priv2 = spu->priv2;
663 out_be64(&priv2->spu_chnlcntptr_RW, 29
UL);
665 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
666 for (i = 0; i < 4; i++) {
667 csa->spu_mailbox_data[
i] = in_be64(&priv2->spu_chnldata_RW);
669 out_be64(&priv2->spu_chnlcnt_RW, 0
UL);
673 static inline void save_mfc_cmd(
struct spu_state *csa,
struct spu *spu)
675 struct spu_priv2
__iomem *priv2 = spu->priv2;
680 out_be64(&priv2->spu_chnlcntptr_RW, 21
UL);
682 csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
686 static inline void reset_ch(
struct spu_state *csa,
struct spu *spu)
688 struct spu_priv2
__iomem *priv2 = spu->priv2;
689 u64 ch_indices[4] = { 21
UL, 23
UL, 28
UL, 30UL };
690 u64 ch_counts[4] = { 16
UL, 1
UL, 1
UL, 1UL };
697 for (i = 0; i < 4; i++) {
699 out_be64(&priv2->spu_chnlcntptr_RW, idx);
701 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
706 static inline void resume_mfc_queue(
struct spu_state *csa,
struct spu *spu)
708 struct spu_priv2
__iomem *priv2 = spu->priv2;
714 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
717 static inline void setup_mfc_slbs(
struct spu_state *csa,
struct spu *spu,
718 unsigned int *
code,
int code_size)
738 static inline void set_switch_active(
struct spu_state *csa,
struct spu *spu)
749 if (
test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
750 csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
751 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
755 static inline void enable_interrupts(
struct spu_state *csa,
struct spu *spu)
757 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
758 CLASS1_ENABLE_STORAGE_FAULT_INTR;
768 spin_lock_irq(&spu->register_lock);
769 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
770 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
771 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
772 spu_int_mask_set(spu, 0, 0ul);
773 spu_int_mask_set(spu, 1, class1_mask);
774 spu_int_mask_set(spu, 2, 0ul);
775 spin_unlock_irq(&spu->register_lock);
778 static inline int send_mfc_dma(
struct spu *spu,
unsigned long ea,
779 unsigned int ls_offset,
unsigned int size,
780 unsigned int tag,
unsigned int rclass,
783 struct spu_problem
__iomem *prob = spu->problem;
784 union mfc_tag_size_class_cmd
command;
786 volatile unsigned int status = 0x0;
790 (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
793 command.u.mfc_rclassid = rclass;
796 out_be32(&prob->mfc_lsa_W, ls_offset);
797 out_be64(&prob->mfc_ea_W, ea);
798 out_be64(&prob->mfc_union_W.all64,
command.all64);
800 in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
804 }
while (status & 0x3);
812 static inline void save_ls_16kb(
struct spu_state *csa,
struct spu *spu)
814 unsigned long addr = (
unsigned long)&csa->lscsa->ls[0];
815 unsigned int ls_offset = 0x0;
816 unsigned int size = 16384;
817 unsigned int tag = 0;
818 unsigned int rclass = 0;
819 unsigned int cmd = MFC_PUT_CMD;
825 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
828 static inline void set_spu_npc(
struct spu_state *csa,
struct spu *spu)
830 struct spu_problem
__iomem *prob = spu->problem;
845 static inline void set_signot1(
struct spu_state *csa,
struct spu *spu)
847 struct spu_problem
__iomem *prob = spu->problem;
863 static inline void set_signot2(
struct spu_state *csa,
struct spu *spu)
865 struct spu_problem
__iomem *prob = spu->problem;
881 static inline void send_save_code(
struct spu_state *csa,
struct spu *spu)
883 unsigned long addr = (
unsigned long)&spu_save_code[0];
884 unsigned int ls_offset = 0x0;
885 unsigned int size =
sizeof(spu_save_code);
886 unsigned int tag = 0;
887 unsigned int rclass = 0;
888 unsigned int cmd = MFC_GETFS_CMD;
894 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
897 static inline void set_ppu_querymask(
struct spu_state *csa,
struct spu *spu)
899 struct spu_problem
__iomem *prob = spu->problem;
906 out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
910 static inline void wait_tag_complete(
struct spu_state *csa,
struct spu *spu)
912 struct spu_problem
__iomem *prob = spu->problem;
913 u32 mask = MFC_TAGID_TO_TAGMASK(0);
928 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
929 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
933 static inline void wait_spu_stopped(
struct spu_state *csa,
struct spu *spu)
935 struct spu_problem
__iomem *prob = spu->problem;
947 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
948 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
952 static inline int check_save_status(
struct spu_state *csa,
struct spu *spu)
954 struct spu_problem
__iomem *prob = spu->problem;
962 complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
963 SPU_STATUS_STOPPED_BY_STOP);
967 static inline void terminate_spu_app(
struct spu_state *csa,
struct spu *spu)
975 static inline void suspend_mfc_and_halt_decr(
struct spu_state *csa,
978 struct spu_priv2
__iomem *priv2 = spu->priv2;
984 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
985 MFC_CNTL_DECREMENTER_HALTED);
989 static inline void wait_suspend_mfc_complete(
struct spu_state *csa,
992 struct spu_priv2
__iomem *priv2 = spu->priv2;
999 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
1000 MFC_CNTL_SUSPEND_COMPLETE);
1003 static inline int suspend_spe(
struct spu_state *csa,
struct spu *spu)
1005 struct spu_problem
__iomem *prob = spu->problem;
1014 if (
in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1015 if (
in_be32(&prob->spu_status_R) &
1016 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1018 SPU_STATUS_RUNNING);
1020 if ((
in_be32(&prob->spu_status_R) &
1021 SPU_STATUS_ISOLATED_LOAD_STATUS)
1022 || (
in_be32(&prob->spu_status_R) &
1023 SPU_STATUS_ISOLATED_STATE)) {
1024 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1027 SPU_STATUS_RUNNING);
1028 out_be32(&prob->spu_runcntl_RW, 0x2);
1031 SPU_STATUS_RUNNING);
1033 if (
in_be32(&prob->spu_status_R) &
1034 SPU_STATUS_WAITING_FOR_CHANNEL) {
1035 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1038 SPU_STATUS_RUNNING);
1045 static inline void clear_spu_status(
struct spu_state *csa,
struct spu *spu)
1047 struct spu_problem
__iomem *prob = spu->problem;
1053 if (!(
in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1054 if (
in_be32(&prob->spu_status_R) &
1055 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1056 spu_mfc_sr1_set(spu,
1057 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1059 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1062 SPU_STATUS_RUNNING);
1064 if ((
in_be32(&prob->spu_status_R) &
1065 SPU_STATUS_ISOLATED_LOAD_STATUS)
1066 || (
in_be32(&prob->spu_status_R) &
1067 SPU_STATUS_ISOLATED_STATE)) {
1068 spu_mfc_sr1_set(spu,
1069 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1071 out_be32(&prob->spu_runcntl_RW, 0x2);
1074 SPU_STATUS_RUNNING);
1079 static inline void reset_ch_part1(
struct spu_state *csa,
struct spu *spu)
1081 struct spu_priv2
__iomem *priv2 = spu->priv2;
1090 out_be64(&priv2->spu_chnlcntptr_RW, 1);
1091 out_be64(&priv2->spu_chnldata_RW, 0
UL);
1094 for (i = 0; i <
ARRAY_SIZE(ch_indices); i++) {
1095 idx = ch_indices[
i];
1096 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1098 out_be64(&priv2->spu_chnldata_RW, 0
UL);
1099 out_be64(&priv2->spu_chnlcnt_RW, 0
UL);
1104 static inline void reset_ch_part2(
struct spu_state *csa,
struct spu *spu)
1106 struct spu_priv2
__iomem *priv2 = spu->priv2;
1107 u64 ch_indices[5] = { 21
UL, 23
UL, 28
UL, 29
UL, 30UL };
1115 for (i = 0; i < 5; i++) {
1116 idx = ch_indices[
i];
1117 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1119 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1124 static inline void setup_spu_status_part1(
struct spu_state *csa,
1127 u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1128 u32 status_I = SPU_STATUS_INVALID_INSTR;
1129 u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1130 u32 status_S = SPU_STATUS_SINGLE_STEP;
1131 u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1132 u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1133 u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1134 u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1150 (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1151 if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1157 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1160 }
else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1166 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1169 }
else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1174 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1177 }
else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1182 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1185 }
else if ((csa->prob.spu_status_R & status_P) == status_P) {
1190 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1193 }
else if ((csa->prob.spu_status_R & status_H) == status_H) {
1198 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1200 }
else if ((csa->prob.spu_status_R & status_S) == status_S) {
1204 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1206 }
else if ((csa->prob.spu_status_R & status_I) == status_I) {
1211 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1216 static inline void setup_spu_status_part2(
struct spu_state *csa,
1231 mask = SPU_STATUS_INVALID_INSTR |
1232 SPU_STATUS_SINGLE_STEP |
1233 SPU_STATUS_STOPPED_BY_HALT |
1234 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1235 if (!(csa->prob.spu_status_R & mask)) {
1236 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1240 static inline void restore_mfc_rag(
struct spu_state *csa,
struct spu *spu)
1246 spu_resource_allocation_groupID_set(spu,
1247 csa->priv1.resource_allocation_groupID_RW);
1248 spu_resource_allocation_enable_set(spu,
1249 csa->priv1.resource_allocation_enable_RW);
1252 static inline void send_restore_code(
struct spu_state *csa,
struct spu *spu)
1254 unsigned long addr = (
unsigned long)&spu_restore_code[0];
1255 unsigned int ls_offset = 0x0;
1256 unsigned int size =
sizeof(spu_restore_code);
1257 unsigned int tag = 0;
1258 unsigned int rclass = 0;
1259 unsigned int cmd = MFC_GETFS_CMD;
1265 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1268 static inline void setup_decr(
struct spu_state *csa,
struct spu *spu)
1277 if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1279 cycles_t delta_time = resume_time - csa->suspend_time;
1281 csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
1282 if (csa->lscsa->decr.slot[0] < delta_time) {
1283 csa->lscsa->decr_status.slot[0] |=
1284 SPU_DECR_STATUS_WRAPPED;
1287 csa->lscsa->decr.slot[0] -= delta_time;
1289 csa->lscsa->decr_status.slot[0] = 0;
1293 static inline void setup_ppu_mb(
struct spu_state *csa,
struct spu *spu)
1298 csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1301 static inline void setup_ppuint_mb(
struct spu_state *csa,
struct spu *spu)
1306 csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1309 static inline int check_restore_status(
struct spu_state *csa,
struct spu *spu)
1311 struct spu_problem
__iomem *prob = spu->problem;
1319 complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1320 SPU_STATUS_STOPPED_BY_STOP);
1324 static inline void restore_spu_privcntl(
struct spu_state *csa,
struct spu *spu)
1326 struct spu_priv2
__iomem *priv2 = spu->priv2;
1331 out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1335 static inline void restore_status_part1(
struct spu_state *csa,
struct spu *spu)
1337 struct spu_problem
__iomem *prob = spu->problem;
1344 mask = SPU_STATUS_INVALID_INSTR |
1345 SPU_STATUS_SINGLE_STEP |
1346 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1347 if (csa->prob.spu_status_R & mask) {
1348 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1351 SPU_STATUS_RUNNING);
1355 static inline void restore_status_part2(
struct spu_state *csa,
struct spu *spu)
1357 struct spu_problem
__iomem *prob = spu->problem;
1366 mask = SPU_STATUS_INVALID_INSTR |
1367 SPU_STATUS_SINGLE_STEP |
1368 SPU_STATUS_STOPPED_BY_HALT |
1369 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1370 if (!(csa->prob.spu_status_R & mask)) {
1371 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1374 SPU_STATUS_RUNNING);
1375 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1378 SPU_STATUS_RUNNING);
1382 static inline void restore_ls_16kb(
struct spu_state *csa,
struct spu *spu)
1384 unsigned long addr = (
unsigned long)&csa->lscsa->ls[0];
1385 unsigned int ls_offset = 0x0;
1386 unsigned int size = 16384;
1387 unsigned int tag = 0;
1388 unsigned int rclass = 0;
1389 unsigned int cmd = MFC_GET_CMD;
1395 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1398 static inline void suspend_mfc(
struct spu_state *csa,
struct spu *spu)
1400 struct spu_priv2
__iomem *priv2 = spu->priv2;
1406 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
1410 static inline void clear_interrupts(
struct spu_state *csa,
struct spu *spu)
1420 spin_lock_irq(&spu->register_lock);
1421 spu_int_mask_set(spu, 0, 0ul);
1422 spu_int_mask_set(spu, 1, 0ul);
1423 spu_int_mask_set(spu, 2, 0ul);
1424 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
1425 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
1426 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
1427 spin_unlock_irq(&spu->register_lock);
1430 static inline void restore_mfc_queues(
struct spu_state *csa,
struct spu *spu)
1432 struct spu_priv2
__iomem *priv2 = spu->priv2;
1439 if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1440 for (i = 0; i < 8; i++) {
1441 out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1442 csa->priv2.puq[i].mfc_cq_data0_RW);
1443 out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1444 csa->priv2.puq[i].mfc_cq_data1_RW);
1445 out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1446 csa->priv2.puq[i].mfc_cq_data2_RW);
1447 out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1448 csa->priv2.puq[i].mfc_cq_data3_RW);
1450 for (i = 0; i < 16; i++) {
1451 out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1452 csa->priv2.spuq[i].mfc_cq_data0_RW);
1453 out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1454 csa->priv2.spuq[i].mfc_cq_data1_RW);
1455 out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1456 csa->priv2.spuq[i].mfc_cq_data2_RW);
1457 out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1458 csa->priv2.spuq[i].mfc_cq_data3_RW);
1464 static inline void restore_ppu_querymask(
struct spu_state *csa,
struct spu *spu)
1466 struct spu_problem
__iomem *prob = spu->problem;
1471 out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1475 static inline void restore_ppu_querytype(
struct spu_state *csa,
struct spu *spu)
1477 struct spu_problem
__iomem *prob = spu->problem;
1482 out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1486 static inline void restore_mfc_csr_tsq(
struct spu_state *csa,
struct spu *spu)
1488 struct spu_priv2
__iomem *priv2 = spu->priv2;
1493 out_be64(&priv2->spu_tag_status_query_RW,
1494 csa->priv2.spu_tag_status_query_RW);
1498 static inline void restore_mfc_csr_cmd(
struct spu_state *csa,
struct spu *spu)
1500 struct spu_priv2
__iomem *priv2 = spu->priv2;
1506 out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1507 out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1511 static inline void restore_mfc_csr_ato(
struct spu_state *csa,
struct spu *spu)
1513 struct spu_priv2
__iomem *priv2 = spu->priv2;
1518 out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1521 static inline void restore_mfc_tclass_id(
struct spu_state *csa,
struct spu *spu)
1526 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1530 static inline void set_llr_event(
struct spu_state *csa,
struct spu *spu)
1532 u64 ch0_cnt, ch0_data;
1543 ch0_cnt = csa->spu_chnlcnt_RW[0];
1544 ch0_data = csa->spu_chnldata_RW[0];
1545 ch1_data = csa->spu_chnldata_RW[1];
1546 csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1547 if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1548 (ch1_data & MFC_LLR_LOST_EVENT)) {
1549 csa->spu_chnlcnt_RW[0] = 1;
1553 static inline void restore_decr_wrapped(
struct spu_state *csa,
struct spu *spu)
1560 if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))
1563 if ((csa->spu_chnlcnt_RW[0] == 0) &&
1564 (csa->spu_chnldata_RW[1] & 0x20) &&
1565 !(csa->spu_chnldata_RW[0] & 0x20))
1566 csa->spu_chnlcnt_RW[0] = 1;
1568 csa->spu_chnldata_RW[0] |= 0x20;
1571 static inline void restore_ch_part1(
struct spu_state *csa,
struct spu *spu)
1573 struct spu_priv2
__iomem *priv2 = spu->priv2;
1580 for (i = 0; i <
ARRAY_SIZE(ch_indices); i++) {
1581 idx = ch_indices[
i];
1582 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1584 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1585 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1590 static inline void restore_ch_part2(
struct spu_state *csa,
struct spu *spu)
1592 struct spu_priv2
__iomem *priv2 = spu->priv2;
1593 u64 ch_indices[3] = { 9
UL, 21
UL, 23UL };
1594 u64 ch_counts[3] = { 1
UL, 16
UL, 1UL };
1602 ch_counts[1] = csa->spu_chnlcnt_RW[21];
1604 for (i = 0; i < 3; i++) {
1605 idx = ch_indices[
i];
1606 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1608 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1613 static inline void restore_spu_lslr(
struct spu_state *csa,
struct spu *spu)
1615 struct spu_priv2
__iomem *priv2 = spu->priv2;
1620 out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1624 static inline void restore_spu_cfg(
struct spu_state *csa,
struct spu *spu)
1626 struct spu_priv2
__iomem *priv2 = spu->priv2;
1631 out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1635 static inline void restore_pm_trace(
struct spu_state *csa,
struct spu *spu)
1643 static inline void restore_spu_npc(
struct spu_state *csa,
struct spu *spu)
1645 struct spu_problem
__iomem *prob = spu->problem;
1650 out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1654 static inline void restore_spu_mb(
struct spu_state *csa,
struct spu *spu)
1656 struct spu_priv2
__iomem *priv2 = spu->priv2;
1662 out_be64(&priv2->spu_chnlcntptr_RW, 29
UL);
1664 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1665 for (i = 0; i < 4; i++) {
1666 out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1671 static inline void check_ppu_mb_stat(
struct spu_state *csa,
struct spu *spu)
1673 struct spu_problem
__iomem *prob = spu->problem;
1680 if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1681 dummy =
in_be32(&prob->pu_mb_R);
1686 static inline void check_ppuint_mb_stat(
struct spu_state *csa,
struct spu *spu)
1688 struct spu_priv2
__iomem *priv2 = spu->priv2;
1695 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1696 dummy = in_be64(&priv2->puint_mb_R);
1698 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1703 static inline void restore_mfc_sr1(
struct spu_state *csa,
struct spu *spu)
1708 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1712 static inline void set_int_route(
struct spu_state *csa,
struct spu *spu)
1716 spu_cpu_affinity_set(spu, ctx->
last_ran);
1719 static inline void restore_other_spu_access(
struct spu_state *csa,
1727 static inline void restore_spu_runcntl(
struct spu_state *csa,
struct spu *spu)
1729 struct spu_problem
__iomem *prob = spu->problem;
1735 if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1736 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1741 static inline void restore_mfc_cntl(
struct spu_state *csa,
struct spu *spu)
1743 struct spu_priv2
__iomem *priv2 = spu->priv2;
1748 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1761 static inline void enable_user_access(
struct spu_state *csa,
struct spu *spu)
1771 static inline void reset_switch_active(
struct spu_state *csa,
struct spu *spu)
1779 static inline void reenable_interrupts(
struct spu_state *csa,
struct spu *spu)
1784 spin_lock_irq(&spu->register_lock);
1785 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1786 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1787 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1788 spin_unlock_irq(&spu->register_lock);
1791 static int quiece_spu(
struct spu_state *
prev,
struct spu *spu)
1803 if (check_spu_isolate(prev, spu)) {
1806 disable_interrupts(prev, spu);
1807 set_watchdog_timer(prev, spu);
1808 inhibit_user_access(prev, spu);
1809 if (check_spu_isolate(prev, spu)) {
1812 set_switch_pending(prev, spu);
1813 save_mfc_cntl(prev, spu);
1814 save_spu_runcntl(prev, spu);
1815 save_mfc_sr1(prev, spu);
1816 save_spu_status(prev, spu);
1817 save_mfc_stopped_status(prev, spu);
1818 halt_mfc_decr(prev, spu);
1819 save_timebase(prev, spu);
1820 remove_other_spu_access(prev, spu);
1821 do_mfc_mssync(prev, spu);
1822 issue_mfc_tlbie(prev, spu);
1823 handle_pending_interrupts(prev, spu);
1828 static void save_csa(
struct spu_state *prev,
struct spu *spu)
1835 save_mfc_queues(prev, spu);
1836 save_ppu_querymask(prev, spu);
1837 save_ppu_querytype(prev, spu);
1838 save_ppu_tagstatus(prev, spu);
1839 save_mfc_csr_tsq(prev, spu);
1840 save_mfc_csr_cmd(prev, spu);
1841 save_mfc_csr_ato(prev, spu);
1842 save_mfc_tclass_id(prev, spu);
1843 set_mfc_tclass_id(prev, spu);
1844 save_mfc_cmd(prev, spu);
1845 purge_mfc_queue(prev, spu);
1846 wait_purge_complete(prev, spu);
1847 setup_mfc_sr1(prev, spu);
1848 save_spu_npc(prev, spu);
1849 save_spu_privcntl(prev, spu);
1850 reset_spu_privcntl(prev, spu);
1851 save_spu_lslr(prev, spu);
1852 reset_spu_lslr(prev, spu);
1853 save_spu_cfg(prev, spu);
1854 save_pm_trace(prev, spu);
1855 save_mfc_rag(prev, spu);
1856 save_ppu_mb_stat(prev, spu);
1857 save_ppu_mb(prev, spu);
1858 save_ppuint_mb(prev, spu);
1859 save_ch_part1(prev, spu);
1860 save_spu_mb(prev, spu);
1861 reset_ch(prev, spu);
1864 static void save_lscsa(
struct spu_state *prev,
struct spu *spu)
1872 resume_mfc_queue(prev, spu);
1874 setup_mfc_slbs(prev, spu, spu_save_code,
sizeof(spu_save_code));
1875 set_switch_active(prev, spu);
1876 enable_interrupts(prev, spu);
1877 save_ls_16kb(prev, spu);
1878 set_spu_npc(prev, spu);
1879 set_signot1(prev, spu);
1880 set_signot2(prev, spu);
1881 send_save_code(prev, spu);
1882 set_ppu_querymask(prev, spu);
1883 wait_tag_complete(prev, spu);
1884 wait_spu_stopped(prev, spu);
1887 static void force_spu_isolate_exit(
struct spu *spu)
1889 struct spu_problem
__iomem *prob = spu->problem;
1890 struct spu_priv2
__iomem *priv2 = spu->priv2;
1893 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1898 spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1902 out_be64(&priv2->spu_privcntl_RW, 4
LL);
1904 out_be32(&prob->spu_runcntl_RW, 2);
1907 & SPU_STATUS_STOPPED_BY_STOP));
1910 out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
1919 static void stop_spu_isolate(
struct spu *spu)
1921 struct spu_problem
__iomem *prob = spu->problem;
1923 if (
in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
1928 force_spu_isolate_exit(spu);
1932 static void harvest(
struct spu_state *prev,
struct spu *spu)
1940 disable_interrupts(prev, spu);
1941 inhibit_user_access(prev, spu);
1942 terminate_spu_app(prev, spu);
1943 set_switch_pending(prev, spu);
1944 stop_spu_isolate(spu);
1945 remove_other_spu_access(prev, spu);
1946 suspend_mfc_and_halt_decr(prev, spu);
1947 wait_suspend_mfc_complete(prev, spu);
1948 if (!suspend_spe(prev, spu))
1949 clear_spu_status(prev, spu);
1950 do_mfc_mssync(prev, spu);
1951 issue_mfc_tlbie(prev, spu);
1952 handle_pending_interrupts(prev, spu);
1953 purge_mfc_queue(prev, spu);
1954 wait_purge_complete(prev, spu);
1955 reset_spu_privcntl(prev, spu);
1956 reset_spu_lslr(prev, spu);
1957 setup_mfc_sr1(prev, spu);
1959 reset_ch_part1(prev, spu);
1960 reset_ch_part2(prev, spu);
1961 enable_interrupts(prev, spu);
1962 set_switch_active(prev, spu);
1963 set_mfc_tclass_id(prev, spu);
1964 resume_mfc_queue(prev, spu);
1967 static void restore_lscsa(
struct spu_state *
next,
struct spu *spu)
1975 set_watchdog_timer(next, spu);
1976 setup_spu_status_part1(next, spu);
1977 setup_spu_status_part2(next, spu);
1978 restore_mfc_rag(next, spu);
1980 setup_mfc_slbs(next, spu, spu_restore_code,
sizeof(spu_restore_code));
1981 set_spu_npc(next, spu);
1982 set_signot1(next, spu);
1983 set_signot2(next, spu);
1984 setup_decr(next, spu);
1985 setup_ppu_mb(next, spu);
1986 setup_ppuint_mb(next, spu);
1987 send_restore_code(next, spu);
1988 set_ppu_querymask(next, spu);
1989 wait_tag_complete(next, spu);
1990 wait_spu_stopped(next, spu);
1993 static void restore_csa(
struct spu_state *next,
struct spu *spu)
2000 restore_spu_privcntl(next, spu);
2001 restore_status_part1(next, spu);
2002 restore_status_part2(next, spu);
2003 restore_ls_16kb(next, spu);
2004 wait_tag_complete(next, spu);
2005 suspend_mfc(next, spu);
2006 wait_suspend_mfc_complete(next, spu);
2007 issue_mfc_tlbie(next, spu);
2008 clear_interrupts(next, spu);
2009 restore_mfc_queues(next, spu);
2010 restore_ppu_querymask(next, spu);
2011 restore_ppu_querytype(next, spu);
2012 restore_mfc_csr_tsq(next, spu);
2013 restore_mfc_csr_cmd(next, spu);
2014 restore_mfc_csr_ato(next, spu);
2015 restore_mfc_tclass_id(next, spu);
2016 set_llr_event(next, spu);
2017 restore_decr_wrapped(next, spu);
2018 restore_ch_part1(next, spu);
2019 restore_ch_part2(next, spu);
2020 restore_spu_lslr(next, spu);
2021 restore_spu_cfg(next, spu);
2022 restore_pm_trace(next, spu);
2023 restore_spu_npc(next, spu);
2024 restore_spu_mb(next, spu);
2025 check_ppu_mb_stat(next, spu);
2026 check_ppuint_mb_stat(next, spu);
2028 restore_mfc_sr1(next, spu);
2029 set_int_route(next, spu);
2030 restore_other_spu_access(next, spu);
2031 restore_spu_runcntl(next, spu);
2032 restore_mfc_cntl(next, spu);
2033 enable_user_access(next, spu);
2034 reset_switch_active(next, spu);
2035 reenable_interrupts(next, spu);
2038 static int __do_spu_save(
struct spu_state *prev,
struct spu *spu)
2054 rc = quiece_spu(prev, spu);
2065 save_csa(prev, spu);
2066 save_lscsa(prev, spu);
2067 return check_save_status(prev, spu);
2070 static int __do_spu_restore(
struct spu_state *next,
struct spu *spu)
2085 restore_lscsa(next, spu);
2086 rc = check_restore_status(next, spu);
2096 restore_csa(next, spu);
2112 acquire_spu_lock(spu);
2113 rc = __do_spu_save(prev, spu);
2114 release_spu_lock(spu);
2115 if (rc != 0 && rc != 2 && rc != 6) {
2116 panic(
"%s failed on SPU[%d], rc=%d.\n",
2117 __func__, spu->number, rc);
2136 acquire_spu_lock(spu);
2138 spu->slb_replace = 0;
2139 rc = __do_spu_restore(
new, spu);
2140 release_spu_lock(spu);
2142 panic(
"%s failed on SPU[%d] rc=%d.\n",
2143 __func__, spu->number, rc);
2149 static void init_prob(
struct spu_state *csa)
2151 csa->spu_chnlcnt_RW[9] = 1;
2152 csa->spu_chnlcnt_RW[21] = 16;
2153 csa->spu_chnlcnt_RW[23] = 1;
2154 csa->spu_chnlcnt_RW[28] = 1;
2155 csa->spu_chnlcnt_RW[30] = 1;
2156 csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
2157 csa->prob.mb_stat_R = 0x000400;
2160 static void init_priv1(
struct spu_state *csa)
2163 csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
2164 MFC_STATE1_MASTER_RUN_CONTROL_MASK |
2165 MFC_STATE1_PROBLEM_STATE_MASK |
2166 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2169 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2170 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
2171 CLASS0_ENABLE_SPU_ERROR_INTR;
2172 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2173 CLASS1_ENABLE_STORAGE_FAULT_INTR;
2174 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2175 CLASS2_ENABLE_SPU_HALT_INTR |
2176 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
2179 static void init_priv2(
struct spu_state *csa)
2181 csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
2182 csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
2183 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
2184 MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
2204 memset(csa, 0,
sizeof(
struct spu_state));