22 #include <linux/sched.h>
37 unsigned long ea,
int type)
41 if (ctx->
flags & SPU_CREATE_EVENTS_ENABLED) {
47 memset(&info, 0,
sizeof(info));
50 case SPE_EVENT_INVALID_DMA:
54 case SPE_EVENT_SPE_DATA_STORAGE:
56 info.si_addr = (
void __user *)ea;
58 ctx->
ops->restart_dma(ctx);
60 case SPE_EVENT_DMA_ALIGNMENT:
65 case SPE_EVENT_SPE_ERROR:
67 info.si_addr = (
void __user *)(
unsigned long)
68 ctx->
ops->npc_read(ctx) - 4;
79 unsigned long stat = ctx->
csa.class_0_pending & CLASS0_INTR_MASK;
84 if (stat & CLASS0_DMA_ALIGNMENT_INTR)
85 spufs_handle_event(ctx, ctx->
csa.class_0_dar,
86 SPE_EVENT_DMA_ALIGNMENT);
88 if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
89 spufs_handle_event(ctx, ctx->
csa.class_0_dar,
90 SPE_EVENT_INVALID_DMA);
92 if (stat & CLASS0_SPU_ERROR_INTR)
93 spufs_handle_event(ctx, ctx->
csa.class_0_dar,
96 ctx->
csa.class_0_pending = 0;
126 ea = ctx->
csa.class_1_dar;
127 dsisr = ctx->
csa.class_1_dsisr;
129 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
134 pr_debug(
"ctx %p: ea %016llx, dsisr %016llx state %d\n", ctx, ea,
137 ctx->
stats.hash_flt++;
138 if (ctx->
state == SPU_STATE_RUNNABLE)
139 ctx->
spu->stats.hash_flt++;
145 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ?
_PAGE_RW : 0
UL;
165 ctx->
csa.class_1_dar = ctx->
csa.class_1_dsisr = 0;
173 if (flt & VM_FAULT_MAJOR)
174 ctx->
stats.maj_flt++;
176 ctx->
stats.min_flt++;
177 if (ctx->
state == SPU_STATE_RUNNABLE) {
178 if (flt & VM_FAULT_MAJOR)
179 ctx->
spu->stats.maj_flt++;
181 ctx->
spu->stats.min_flt++;
185 ctx->
ops->restart_dma(ctx);
187 spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);