24 #include <linux/errno.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
30 #include <linux/stddef.h>
32 #include <linux/poll.h>
37 #include <asm/spu_info.h>
38 #include <asm/mmu_context.h>
53 ch0_cnt = ctx->
csa.spu_chnlcnt_RW[0];
54 ch0_data = ctx->
csa.spu_chnldata_RW[0];
55 ch1_data = ctx->
csa.spu_chnldata_RW[1];
56 ctx->
csa.spu_chnldata_RW[0] |=
event;
57 if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
58 ctx->
csa.spu_chnlcnt_RW[0] = 1;
67 spin_lock(&ctx->
csa.register_lock);
68 mbox_stat = ctx->
csa.prob.mb_stat_R;
69 if (mbox_stat & 0x0000ff) {
74 *data = ctx->
csa.prob.pu_mb_R;
75 ctx->
csa.prob.mb_stat_R &= ~(0x0000ff);
76 ctx->
csa.spu_chnlcnt_RW[28] = 1;
77 gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
80 spin_unlock(&ctx->
csa.register_lock);
86 return ctx->
csa.prob.mb_stat_R;
89 static unsigned int spu_backing_mbox_stat_poll(
struct spu_context *ctx,
96 spin_lock_irq(&ctx->
csa.register_lock);
97 stat = ctx->
csa.prob.mb_stat_R;
108 ctx->
csa.priv1.int_stat_class2_RW &=
109 ~CLASS2_MAILBOX_INTR;
110 ctx->
csa.priv1.int_mask_class2_RW |=
111 CLASS2_ENABLE_MAILBOX_INTR;
118 ctx->
csa.priv1.int_stat_class2_RW &=
119 ~CLASS2_MAILBOX_THRESHOLD_INTR;
120 ctx->
csa.priv1.int_mask_class2_RW |=
121 CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
124 spin_unlock_irq(&ctx->
csa.register_lock);
128 static int spu_backing_ibox_read(
struct spu_context *ctx,
u32 * data)
132 spin_lock(&ctx->
csa.register_lock);
133 if (ctx->
csa.prob.mb_stat_R & 0xff0000) {
138 *data = ctx->
csa.priv2.puint_mb_R;
139 ctx->
csa.prob.mb_stat_R &= ~(0xff0000);
140 ctx->
csa.spu_chnlcnt_RW[30] = 1;
141 gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
145 ctx->
csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
148 spin_unlock(&ctx->
csa.register_lock);
152 static int spu_backing_wbox_write(
struct spu_context *ctx,
u32 data)
156 spin_lock(&ctx->
csa.register_lock);
157 if ((ctx->
csa.prob.mb_stat_R) & 0x00ff00) {
158 int slot = ctx->
csa.spu_chnlcnt_RW[29];
159 int avail = (ctx->
csa.prob.mb_stat_R & 0x00ff00) >> 8;
165 BUG_ON(avail != (4 - slot));
167 ctx->
csa.spu_chnlcnt_RW[29] = ++
slot;
168 ctx->
csa.prob.mb_stat_R &= ~(0x00ff00);
169 ctx->
csa.prob.mb_stat_R |= (((4 -
slot) & 0xff) << 8);
170 gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
175 ctx->
csa.priv1.int_mask_class2_RW |=
176 CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
179 spin_unlock(&ctx->
csa.register_lock);
185 return ctx->
csa.spu_chnldata_RW[3];
188 static void spu_backing_signal1_write(
struct spu_context *ctx,
u32 data)
190 spin_lock(&ctx->
csa.register_lock);
191 if (ctx->
csa.priv2.spu_cfg_RW & 0x1)
192 ctx->
csa.spu_chnldata_RW[3] |=
data;
194 ctx->
csa.spu_chnldata_RW[3] =
data;
195 ctx->
csa.spu_chnlcnt_RW[3] = 1;
196 gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
197 spin_unlock(&ctx->
csa.register_lock);
202 return ctx->
csa.spu_chnldata_RW[4];
205 static void spu_backing_signal2_write(
struct spu_context *ctx,
u32 data)
207 spin_lock(&ctx->
csa.register_lock);
208 if (ctx->
csa.priv2.spu_cfg_RW & 0x2)
209 ctx->
csa.spu_chnldata_RW[4] |=
data;
211 ctx->
csa.spu_chnldata_RW[4] =
data;
212 ctx->
csa.spu_chnlcnt_RW[4] = 1;
213 gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
214 spin_unlock(&ctx->
csa.register_lock);
221 spin_lock(&ctx->
csa.register_lock);
222 tmp = ctx->
csa.priv2.spu_cfg_RW;
227 ctx->
csa.priv2.spu_cfg_RW =
tmp;
228 spin_unlock(&ctx->
csa.register_lock);
233 return ((ctx->
csa.priv2.spu_cfg_RW & 1) != 0);
236 static void spu_backing_signal2_type_set(
struct spu_context *ctx,
u64 val)
240 spin_lock(&ctx->
csa.register_lock);
241 tmp = ctx->
csa.priv2.spu_cfg_RW;
246 ctx->
csa.priv2.spu_cfg_RW =
tmp;
247 spin_unlock(&ctx->
csa.register_lock);
252 return ((ctx->
csa.priv2.spu_cfg_RW & 2) != 0);
257 return ctx->
csa.prob.spu_npc_RW;
260 static void spu_backing_npc_write(
struct spu_context *ctx,
u32 val)
262 ctx->
csa.prob.spu_npc_RW =
val;
267 return ctx->
csa.prob.spu_status_R;
270 static char *spu_backing_get_ls(
struct spu_context *ctx)
272 return ctx->
csa.lscsa->ls;
275 static void spu_backing_privcntl_write(
struct spu_context *ctx,
u64 val)
277 ctx->
csa.priv2.spu_privcntl_RW =
val;
282 return ctx->
csa.prob.spu_runcntl_RW;
285 static void spu_backing_runcntl_write(
struct spu_context *ctx,
u32 val)
287 spin_lock(&ctx->
csa.register_lock);
288 ctx->
csa.prob.spu_runcntl_RW =
val;
289 if (val & SPU_RUNCNTL_RUNNABLE) {
290 ctx->
csa.prob.spu_status_R &=
291 ~SPU_STATUS_STOPPED_BY_STOP &
292 ~SPU_STATUS_STOPPED_BY_HALT &
293 ~SPU_STATUS_SINGLE_STEP &
294 ~SPU_STATUS_INVALID_INSTR &
295 ~SPU_STATUS_INVALID_CH;
296 ctx->
csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
298 ctx->
csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
300 spin_unlock(&ctx->
csa.register_lock);
303 static void spu_backing_runcntl_stop(
struct spu_context *ctx)
305 spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
308 static void spu_backing_master_start(
struct spu_context *ctx)
310 struct spu_state *csa = &ctx->
csa;
313 spin_lock(&csa->register_lock);
314 sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
315 csa->priv1.mfc_sr1_RW = sr1;
316 spin_unlock(&csa->register_lock);
319 static void spu_backing_master_stop(
struct spu_context *ctx)
321 struct spu_state *csa = &ctx->
csa;
324 spin_lock(&csa->register_lock);
325 sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
326 csa->priv1.mfc_sr1_RW = sr1;
327 spin_unlock(&csa->register_lock);
333 struct spu_problem_collapsed *prob = &ctx->
csa.prob;
336 spin_lock(&ctx->
csa.register_lock);
338 if (prob->dma_querytype_RW)
342 prob->dma_querymask_RW =
mask;
343 prob->dma_querytype_RW =
mode;
349 ctx->
csa.prob.dma_tagstatus_R &=
mask;
351 spin_unlock(&ctx->
csa.register_lock);
356 static u32 spu_backing_read_mfc_tagstatus(
struct spu_context * ctx)
358 return ctx->
csa.prob.dma_tagstatus_R;
361 static u32 spu_backing_get_mfc_free_elements(
struct spu_context *ctx)
363 return ctx->
csa.prob.dma_qstatus_R;
366 static int spu_backing_send_mfc_command(
struct spu_context *ctx,
371 spin_lock(&ctx->
csa.register_lock);
374 spin_unlock(&ctx->
csa.register_lock);
379 static void spu_backing_restart_dma(
struct spu_context *ctx)
381 ctx->
csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
385 .mbox_read = spu_backing_mbox_read,
386 .mbox_stat_read = spu_backing_mbox_stat_read,
387 .mbox_stat_poll = spu_backing_mbox_stat_poll,
388 .ibox_read = spu_backing_ibox_read,
389 .wbox_write = spu_backing_wbox_write,
390 .signal1_read = spu_backing_signal1_read,
391 .signal1_write = spu_backing_signal1_write,
392 .signal2_read = spu_backing_signal2_read,
393 .signal2_write = spu_backing_signal2_write,
394 .signal1_type_set = spu_backing_signal1_type_set,
395 .signal1_type_get = spu_backing_signal1_type_get,
396 .signal2_type_set = spu_backing_signal2_type_set,
397 .signal2_type_get = spu_backing_signal2_type_get,
398 .npc_read = spu_backing_npc_read,
399 .npc_write = spu_backing_npc_write,
400 .status_read = spu_backing_status_read,
401 .get_ls = spu_backing_get_ls,
402 .privcntl_write = spu_backing_privcntl_write,
403 .runcntl_read = spu_backing_runcntl_read,
404 .runcntl_write = spu_backing_runcntl_write,
405 .runcntl_stop = spu_backing_runcntl_stop,
406 .master_start = spu_backing_master_start,
407 .master_stop = spu_backing_master_stop,
408 .set_mfc_query = spu_backing_set_mfc_query,
409 .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
410 .get_mfc_free_elements = spu_backing_get_mfc_free_elements,
411 .send_mfc_command = spu_backing_send_mfc_command,
412 .restart_dma = spu_backing_restart_dma,