Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
hw_ops.c
Go to the documentation of this file.
1 /* hw_ops.c - query/set operations on active SPU context.
2  *
3  * Copyright (C) IBM 2005
4  * Author: Mark Nutter <[email protected]>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2, or (at your option)
9  * any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 
21 #include <linux/errno.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/poll.h>
26 #include <linux/smp.h>
27 #include <linux/stddef.h>
28 #include <linux/unistd.h>
29 
30 #include <asm/io.h>
31 #include <asm/spu.h>
32 #include <asm/spu_priv1.h>
33 #include <asm/spu_csa.h>
34 #include <asm/mmu_context.h>
35 #include "spufs.h"
36 
37 static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data)
38 {
39  struct spu *spu = ctx->spu;
40  struct spu_problem __iomem *prob = spu->problem;
41  u32 mbox_stat;
42  int ret = 0;
43 
44  spin_lock_irq(&spu->register_lock);
45  mbox_stat = in_be32(&prob->mb_stat_R);
46  if (mbox_stat & 0x0000ff) {
47  *data = in_be32(&prob->pu_mb_R);
48  ret = 4;
49  }
50  spin_unlock_irq(&spu->register_lock);
51  return ret;
52 }
53 
54 static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
55 {
56  return in_be32(&ctx->spu->problem->mb_stat_R);
57 }
58 
59 static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
60  unsigned int events)
61 {
62  struct spu *spu = ctx->spu;
63  int ret = 0;
64  u32 stat;
65 
66  spin_lock_irq(&spu->register_lock);
67  stat = in_be32(&spu->problem->mb_stat_R);
68 
69  /* if the requested event is there, return the poll
70  mask, otherwise enable the interrupt to get notified,
71  but first mark any pending interrupts as done so
72  we don't get woken up unnecessarily */
73 
74  if (events & (POLLIN | POLLRDNORM)) {
75  if (stat & 0xff0000)
76  ret |= POLLIN | POLLRDNORM;
77  else {
78  spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR);
79  spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
80  }
81  }
82  if (events & (POLLOUT | POLLWRNORM)) {
83  if (stat & 0x00ff00)
84  ret = POLLOUT | POLLWRNORM;
85  else {
86  spu_int_stat_clear(spu, 2,
87  CLASS2_MAILBOX_THRESHOLD_INTR);
88  spu_int_mask_or(spu, 2,
89  CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
90  }
91  }
92  spin_unlock_irq(&spu->register_lock);
93  return ret;
94 }
95 
96 static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
97 {
98  struct spu *spu = ctx->spu;
99  struct spu_problem __iomem *prob = spu->problem;
100  struct spu_priv2 __iomem *priv2 = spu->priv2;
101  int ret;
102 
103  spin_lock_irq(&spu->register_lock);
104  if (in_be32(&prob->mb_stat_R) & 0xff0000) {
105  /* read the first available word */
106  *data = in_be64(&priv2->puint_mb_R);
107  ret = 4;
108  } else {
109  /* make sure we get woken up by the interrupt */
110  spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
111  ret = 0;
112  }
113  spin_unlock_irq(&spu->register_lock);
114  return ret;
115 }
116 
117 static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
118 {
119  struct spu *spu = ctx->spu;
120  struct spu_problem __iomem *prob = spu->problem;
121  int ret;
122 
123  spin_lock_irq(&spu->register_lock);
124  if (in_be32(&prob->mb_stat_R) & 0x00ff00) {
125  /* we have space to write wbox_data to */
126  out_be32(&prob->spu_mb_W, data);
127  ret = 4;
128  } else {
129  /* make sure we get woken up by the interrupt when space
130  becomes available */
131  spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
132  ret = 0;
133  }
134  spin_unlock_irq(&spu->register_lock);
135  return ret;
136 }
137 
138 static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
139 {
140  out_be32(&ctx->spu->problem->signal_notify1, data);
141 }
142 
143 static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
144 {
145  out_be32(&ctx->spu->problem->signal_notify2, data);
146 }
147 
148 static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val)
149 {
150  struct spu *spu = ctx->spu;
151  struct spu_priv2 __iomem *priv2 = spu->priv2;
152  u64 tmp;
153 
154  spin_lock_irq(&spu->register_lock);
155  tmp = in_be64(&priv2->spu_cfg_RW);
156  if (val)
157  tmp |= 1;
158  else
159  tmp &= ~1;
160  out_be64(&priv2->spu_cfg_RW, tmp);
161  spin_unlock_irq(&spu->register_lock);
162 }
163 
164 static u64 spu_hw_signal1_type_get(struct spu_context *ctx)
165 {
166  return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0);
167 }
168 
169 static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val)
170 {
171  struct spu *spu = ctx->spu;
172  struct spu_priv2 __iomem *priv2 = spu->priv2;
173  u64 tmp;
174 
175  spin_lock_irq(&spu->register_lock);
176  tmp = in_be64(&priv2->spu_cfg_RW);
177  if (val)
178  tmp |= 2;
179  else
180  tmp &= ~2;
181  out_be64(&priv2->spu_cfg_RW, tmp);
182  spin_unlock_irq(&spu->register_lock);
183 }
184 
185 static u64 spu_hw_signal2_type_get(struct spu_context *ctx)
186 {
187  return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0);
188 }
189 
190 static u32 spu_hw_npc_read(struct spu_context *ctx)
191 {
192  return in_be32(&ctx->spu->problem->spu_npc_RW);
193 }
194 
195 static void spu_hw_npc_write(struct spu_context *ctx, u32 val)
196 {
197  out_be32(&ctx->spu->problem->spu_npc_RW, val);
198 }
199 
200 static u32 spu_hw_status_read(struct spu_context *ctx)
201 {
202  return in_be32(&ctx->spu->problem->spu_status_R);
203 }
204 
205 static char *spu_hw_get_ls(struct spu_context *ctx)
206 {
207  return ctx->spu->local_store;
208 }
209 
210 static void spu_hw_privcntl_write(struct spu_context *ctx, u64 val)
211 {
212  out_be64(&ctx->spu->priv2->spu_privcntl_RW, val);
213 }
214 
215 static u32 spu_hw_runcntl_read(struct spu_context *ctx)
216 {
217  return in_be32(&ctx->spu->problem->spu_runcntl_RW);
218 }
219 
220 static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
221 {
222  spin_lock_irq(&ctx->spu->register_lock);
223  if (val & SPU_RUNCNTL_ISOLATE)
224  spu_hw_privcntl_write(ctx,
225  SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK);
226  out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
227  spin_unlock_irq(&ctx->spu->register_lock);
228 }
229 
230 static void spu_hw_runcntl_stop(struct spu_context *ctx)
231 {
232  spin_lock_irq(&ctx->spu->register_lock);
233  out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
234  while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
235  cpu_relax();
236  spin_unlock_irq(&ctx->spu->register_lock);
237 }
238 
239 static void spu_hw_master_start(struct spu_context *ctx)
240 {
241  struct spu *spu = ctx->spu;
242  u64 sr1;
243 
244  spin_lock_irq(&spu->register_lock);
245  sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
246  spu_mfc_sr1_set(spu, sr1);
247  spin_unlock_irq(&spu->register_lock);
248 }
249 
250 static void spu_hw_master_stop(struct spu_context *ctx)
251 {
252  struct spu *spu = ctx->spu;
253  u64 sr1;
254 
255  spin_lock_irq(&spu->register_lock);
256  sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
257  spu_mfc_sr1_set(spu, sr1);
258  spin_unlock_irq(&spu->register_lock);
259 }
260 
261 static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
262 {
263  struct spu_problem __iomem *prob = ctx->spu->problem;
264  int ret;
265 
266  spin_lock_irq(&ctx->spu->register_lock);
267  ret = -EAGAIN;
268  if (in_be32(&prob->dma_querytype_RW))
269  goto out;
270  ret = 0;
271  out_be32(&prob->dma_querymask_RW, mask);
272  out_be32(&prob->dma_querytype_RW, mode);
273 out:
274  spin_unlock_irq(&ctx->spu->register_lock);
275  return ret;
276 }
277 
278 static u32 spu_hw_read_mfc_tagstatus(struct spu_context * ctx)
279 {
280  return in_be32(&ctx->spu->problem->dma_tagstatus_R);
281 }
282 
283 static u32 spu_hw_get_mfc_free_elements(struct spu_context *ctx)
284 {
285  return in_be32(&ctx->spu->problem->dma_qstatus_R);
286 }
287 
288 static int spu_hw_send_mfc_command(struct spu_context *ctx,
289  struct mfc_dma_command *cmd)
290 {
291  u32 status;
292  struct spu_problem __iomem *prob = ctx->spu->problem;
293 
294  spin_lock_irq(&ctx->spu->register_lock);
295  out_be32(&prob->mfc_lsa_W, cmd->lsa);
296  out_be64(&prob->mfc_ea_W, cmd->ea);
297  out_be32(&prob->mfc_union_W.by32.mfc_size_tag32,
298  cmd->size << 16 | cmd->tag);
299  out_be32(&prob->mfc_union_W.by32.mfc_class_cmd32,
300  cmd->class << 16 | cmd->cmd);
301  status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
302  spin_unlock_irq(&ctx->spu->register_lock);
303 
304  switch (status & 0xffff) {
305  case 0:
306  return 0;
307  case 2:
308  return -EAGAIN;
309  default:
310  return -EINVAL;
311  }
312 }
313 
314 static void spu_hw_restart_dma(struct spu_context *ctx)
315 {
316  struct spu_priv2 __iomem *priv2 = ctx->spu->priv2;
317 
318  if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags))
319  out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
320 }
321 
323  .mbox_read = spu_hw_mbox_read,
324  .mbox_stat_read = spu_hw_mbox_stat_read,
325  .mbox_stat_poll = spu_hw_mbox_stat_poll,
326  .ibox_read = spu_hw_ibox_read,
327  .wbox_write = spu_hw_wbox_write,
328  .signal1_write = spu_hw_signal1_write,
329  .signal2_write = spu_hw_signal2_write,
330  .signal1_type_set = spu_hw_signal1_type_set,
331  .signal1_type_get = spu_hw_signal1_type_get,
332  .signal2_type_set = spu_hw_signal2_type_set,
333  .signal2_type_get = spu_hw_signal2_type_get,
334  .npc_read = spu_hw_npc_read,
335  .npc_write = spu_hw_npc_write,
336  .status_read = spu_hw_status_read,
337  .get_ls = spu_hw_get_ls,
338  .privcntl_write = spu_hw_privcntl_write,
339  .runcntl_read = spu_hw_runcntl_read,
340  .runcntl_write = spu_hw_runcntl_write,
341  .runcntl_stop = spu_hw_runcntl_stop,
342  .master_start = spu_hw_master_start,
343  .master_stop = spu_hw_master_stop,
344  .set_mfc_query = spu_hw_set_mfc_query,
345  .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
346  .get_mfc_free_elements = spu_hw_get_mfc_free_elements,
347  .send_mfc_command = spu_hw_send_mfc_command,
348  .restart_dma = spu_hw_restart_dma,
349 };