Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
file.c
Go to the documentation of this file.
1 /*
2  * SPU file system -- file contents
3  *
4  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5  *
6  * Author: Arnd Bergmann <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #undef DEBUG
24 
25 #include <linux/fs.h>
26 #include <linux/ioctl.h>
27 #include <linux/export.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
32 #include <linux/slab.h>
33 
34 #include <asm/io.h>
35 #include <asm/time.h>
36 #include <asm/spu.h>
37 #include <asm/spu_info.h>
38 #include <asm/uaccess.h>
39 
40 #include "spufs.h"
41 #include "sputrace.h"
42 
43 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
44 
45 /* Simple attribute files */
46 struct spufs_attr {
47  int (*get)(void *, u64 *);
48  int (*set)(void *, u64);
49  char get_buf[24]; /* enough to store a u64 and "\n\0" */
50  char set_buf[24];
51  void *data;
52  const char *fmt; /* format for read operation */
53  struct mutex mutex; /* protects access to these buffers */
54 };
55 
56 static int spufs_attr_open(struct inode *inode, struct file *file,
57  int (*get)(void *, u64 *), int (*set)(void *, u64),
58  const char *fmt)
59 {
60  struct spufs_attr *attr;
61 
62  attr = kmalloc(sizeof(*attr), GFP_KERNEL);
63  if (!attr)
64  return -ENOMEM;
65 
66  attr->get = get;
67  attr->set = set;
68  attr->data = inode->i_private;
69  attr->fmt = fmt;
70  mutex_init(&attr->mutex);
71  file->private_data = attr;
72 
73  return nonseekable_open(inode, file);
74 }
75 
76 static int spufs_attr_release(struct inode *inode, struct file *file)
77 {
78  kfree(file->private_data);
79  return 0;
80 }
81 
82 static ssize_t spufs_attr_read(struct file *file, char __user *buf,
83  size_t len, loff_t *ppos)
84 {
85  struct spufs_attr *attr;
86  size_t size;
87  ssize_t ret;
88 
89  attr = file->private_data;
90  if (!attr->get)
91  return -EACCES;
92 
93  ret = mutex_lock_interruptible(&attr->mutex);
94  if (ret)
95  return ret;
96 
97  if (*ppos) { /* continued read */
98  size = strlen(attr->get_buf);
99  } else { /* first read */
100  u64 val;
101  ret = attr->get(attr->data, &val);
102  if (ret)
103  goto out;
104 
105  size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
106  attr->fmt, (unsigned long long)val);
107  }
108 
109  ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
110 out:
111  mutex_unlock(&attr->mutex);
112  return ret;
113 }
114 
115 static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
116  size_t len, loff_t *ppos)
117 {
118  struct spufs_attr *attr;
119  u64 val;
120  size_t size;
121  ssize_t ret;
122 
123  attr = file->private_data;
124  if (!attr->set)
125  return -EACCES;
126 
127  ret = mutex_lock_interruptible(&attr->mutex);
128  if (ret)
129  return ret;
130 
131  ret = -EFAULT;
132  size = min(sizeof(attr->set_buf) - 1, len);
133  if (copy_from_user(attr->set_buf, buf, size))
134  goto out;
135 
136  ret = len; /* claim we got the whole input */
137  attr->set_buf[size] = '\0';
138  val = simple_strtol(attr->set_buf, NULL, 0);
139  attr->set(attr->data, val);
140 out:
141  mutex_unlock(&attr->mutex);
142  return ret;
143 }
144 
145 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
146 static int __fops ## _open(struct inode *inode, struct file *file) \
147 { \
148  __simple_attr_check_format(__fmt, 0ull); \
149  return spufs_attr_open(inode, file, __get, __set, __fmt); \
150 } \
151 static const struct file_operations __fops = { \
152  .owner = THIS_MODULE, \
153  .open = __fops ## _open, \
154  .release = spufs_attr_release, \
155  .read = spufs_attr_read, \
156  .write = spufs_attr_write, \
157  .llseek = generic_file_llseek, \
158 };
159 
160 
161 static int
162 spufs_mem_open(struct inode *inode, struct file *file)
163 {
164  struct spufs_inode_info *i = SPUFS_I(inode);
165  struct spu_context *ctx = i->i_ctx;
166 
167  mutex_lock(&ctx->mapping_lock);
168  file->private_data = ctx;
169  if (!i->i_openers++)
170  ctx->local_store = inode->i_mapping;
171  mutex_unlock(&ctx->mapping_lock);
172  return 0;
173 }
174 
175 static int
176 spufs_mem_release(struct inode *inode, struct file *file)
177 {
178  struct spufs_inode_info *i = SPUFS_I(inode);
179  struct spu_context *ctx = i->i_ctx;
180 
181  mutex_lock(&ctx->mapping_lock);
182  if (!--i->i_openers)
183  ctx->local_store = NULL;
184  mutex_unlock(&ctx->mapping_lock);
185  return 0;
186 }
187 
188 static ssize_t
189 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
190  size_t size, loff_t *pos)
191 {
192  char *local_store = ctx->ops->get_ls(ctx);
193  return simple_read_from_buffer(buffer, size, pos, local_store,
194  LS_SIZE);
195 }
196 
197 static ssize_t
198 spufs_mem_read(struct file *file, char __user *buffer,
199  size_t size, loff_t *pos)
200 {
201  struct spu_context *ctx = file->private_data;
202  ssize_t ret;
203 
204  ret = spu_acquire(ctx);
205  if (ret)
206  return ret;
207  ret = __spufs_mem_read(ctx, buffer, size, pos);
208  spu_release(ctx);
209 
210  return ret;
211 }
212 
213 static ssize_t
214 spufs_mem_write(struct file *file, const char __user *buffer,
215  size_t size, loff_t *ppos)
216 {
217  struct spu_context *ctx = file->private_data;
218  char *local_store;
219  loff_t pos = *ppos;
220  int ret;
221 
222  if (pos > LS_SIZE)
223  return -EFBIG;
224 
225  ret = spu_acquire(ctx);
226  if (ret)
227  return ret;
228 
229  local_store = ctx->ops->get_ls(ctx);
230  size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
231  spu_release(ctx);
232 
233  return size;
234 }
235 
236 static int
237 spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
238 {
239  struct spu_context *ctx = vma->vm_file->private_data;
240  unsigned long address = (unsigned long)vmf->virtual_address;
241  unsigned long pfn, offset;
242 
243 #ifdef CONFIG_SPU_FS_64K_LS
244  struct spu_state *csa = &ctx->csa;
245  int psize;
246 
247  /* Check what page size we are using */
248  psize = get_slice_psize(vma->vm_mm, address);
249 
250  /* Some sanity checking */
251  BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
252 
253  /* Wow, 64K, cool, we need to align the address though */
254  if (csa->use_big_pages) {
255  BUG_ON(vma->vm_start & 0xffff);
256  address &= ~0xfffful;
257  }
258 #endif /* CONFIG_SPU_FS_64K_LS */
259 
260  offset = vmf->pgoff << PAGE_SHIFT;
261  if (offset >= LS_SIZE)
262  return VM_FAULT_SIGBUS;
263 
264  pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
265  address, offset);
266 
267  if (spu_acquire(ctx))
268  return VM_FAULT_NOPAGE;
269 
270  if (ctx->state == SPU_STATE_SAVED) {
271  vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
272  pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
273  } else {
275  pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
276  }
277  vm_insert_pfn(vma, address, pfn);
278 
279  spu_release(ctx);
280 
281  return VM_FAULT_NOPAGE;
282 }
283 
284 static int spufs_mem_mmap_access(struct vm_area_struct *vma,
285  unsigned long address,
286  void *buf, int len, int write)
287 {
288  struct spu_context *ctx = vma->vm_file->private_data;
289  unsigned long offset = address - vma->vm_start;
290  char *local_store;
291 
292  if (write && !(vma->vm_flags & VM_WRITE))
293  return -EACCES;
294  if (spu_acquire(ctx))
295  return -EINTR;
296  if ((offset + len) > vma->vm_end)
297  len = vma->vm_end - offset;
298  local_store = ctx->ops->get_ls(ctx);
299  if (write)
300  memcpy_toio(local_store + offset, buf, len);
301  else
302  memcpy_fromio(buf, local_store + offset, len);
303  spu_release(ctx);
304  return len;
305 }
306 
307 static const struct vm_operations_struct spufs_mem_mmap_vmops = {
308  .fault = spufs_mem_mmap_fault,
309  .access = spufs_mem_mmap_access,
310 };
311 
312 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
313 {
314 #ifdef CONFIG_SPU_FS_64K_LS
315  struct spu_context *ctx = file->private_data;
316  struct spu_state *csa = &ctx->csa;
317 
318  /* Sanity check VMA alignment */
319  if (csa->use_big_pages) {
320  pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
321  " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
322  vma->vm_pgoff);
323  if (vma->vm_start & 0xffff)
324  return -EINVAL;
325  if (vma->vm_pgoff & 0xf)
326  return -EINVAL;
327  }
328 #endif /* CONFIG_SPU_FS_64K_LS */
329 
330  if (!(vma->vm_flags & VM_SHARED))
331  return -EINVAL;
332 
333  vma->vm_flags |= VM_IO | VM_PFNMAP;
335 
336  vma->vm_ops = &spufs_mem_mmap_vmops;
337  return 0;
338 }
339 
340 #ifdef CONFIG_SPU_FS_64K_LS
341 static unsigned long spufs_get_unmapped_area(struct file *file,
342  unsigned long addr, unsigned long len, unsigned long pgoff,
343  unsigned long flags)
344 {
345  struct spu_context *ctx = file->private_data;
346  struct spu_state *csa = &ctx->csa;
347 
348  /* If not using big pages, fallback to normal MM g_u_a */
349  if (!csa->use_big_pages)
350  return current->mm->get_unmapped_area(file, addr, len,
351  pgoff, flags);
352 
353  /* Else, try to obtain a 64K pages slice */
354  return slice_get_unmapped_area(addr, len, flags,
355  MMU_PAGE_64K, 1, 0);
356 }
357 #endif /* CONFIG_SPU_FS_64K_LS */
358 
359 static const struct file_operations spufs_mem_fops = {
360  .open = spufs_mem_open,
361  .release = spufs_mem_release,
362  .read = spufs_mem_read,
363  .write = spufs_mem_write,
364  .llseek = generic_file_llseek,
365  .mmap = spufs_mem_mmap,
366 #ifdef CONFIG_SPU_FS_64K_LS
367  .get_unmapped_area = spufs_get_unmapped_area,
368 #endif
369 };
370 
371 static int spufs_ps_fault(struct vm_area_struct *vma,
372  struct vm_fault *vmf,
373  unsigned long ps_offs,
374  unsigned long ps_size)
375 {
376  struct spu_context *ctx = vma->vm_file->private_data;
377  unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
378  int ret = 0;
379 
380  spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
381 
382  if (offset >= ps_size)
383  return VM_FAULT_SIGBUS;
384 
385  if (fatal_signal_pending(current))
386  return VM_FAULT_SIGBUS;
387 
388  /*
389  * Because we release the mmap_sem, the context may be destroyed while
390  * we're in spu_wait. Grab an extra reference so it isn't destroyed
391  * in the meantime.
392  */
393  get_spu_context(ctx);
394 
395  /*
396  * We have to wait for context to be loaded before we have
397  * pages to hand out to the user, but we don't want to wait
398  * with the mmap_sem held.
399  * It is possible to drop the mmap_sem here, but then we need
400  * to return VM_FAULT_NOPAGE because the mappings may have
401  * hanged.
402  */
403  if (spu_acquire(ctx))
404  goto refault;
405 
406  if (ctx->state == SPU_STATE_SAVED) {
407  up_read(&current->mm->mmap_sem);
408  spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
409  ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
410  spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
411  down_read(&current->mm->mmap_sem);
412  } else {
413  area = ctx->spu->problem_phys + ps_offs;
414  vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
415  (area + offset) >> PAGE_SHIFT);
416  spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
417  }
418 
419  if (!ret)
420  spu_release(ctx);
421 
422 refault:
423  put_spu_context(ctx);
424  return VM_FAULT_NOPAGE;
425 }
426 
427 #if SPUFS_MMAP_4K
428 static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
429  struct vm_fault *vmf)
430 {
431  return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
432 }
433 
434 static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
435  .fault = spufs_cntl_mmap_fault,
436 };
437 
438 /*
439  * mmap support for problem state control area [0x4000 - 0x4fff].
440  */
441 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
442 {
443  if (!(vma->vm_flags & VM_SHARED))
444  return -EINVAL;
445 
446  vma->vm_flags |= VM_IO | VM_PFNMAP;
448 
449  vma->vm_ops = &spufs_cntl_mmap_vmops;
450  return 0;
451 }
452 #else /* SPUFS_MMAP_4K */
453 #define spufs_cntl_mmap NULL
454 #endif /* !SPUFS_MMAP_4K */
455 
456 static int spufs_cntl_get(void *data, u64 *val)
457 {
458  struct spu_context *ctx = data;
459  int ret;
460 
461  ret = spu_acquire(ctx);
462  if (ret)
463  return ret;
464  *val = ctx->ops->status_read(ctx);
465  spu_release(ctx);
466 
467  return 0;
468 }
469 
470 static int spufs_cntl_set(void *data, u64 val)
471 {
472  struct spu_context *ctx = data;
473  int ret;
474 
475  ret = spu_acquire(ctx);
476  if (ret)
477  return ret;
478  ctx->ops->runcntl_write(ctx, val);
479  spu_release(ctx);
480 
481  return 0;
482 }
483 
484 static int spufs_cntl_open(struct inode *inode, struct file *file)
485 {
486  struct spufs_inode_info *i = SPUFS_I(inode);
487  struct spu_context *ctx = i->i_ctx;
488 
489  mutex_lock(&ctx->mapping_lock);
490  file->private_data = ctx;
491  if (!i->i_openers++)
492  ctx->cntl = inode->i_mapping;
493  mutex_unlock(&ctx->mapping_lock);
494  return simple_attr_open(inode, file, spufs_cntl_get,
495  spufs_cntl_set, "0x%08lx");
496 }
497 
498 static int
499 spufs_cntl_release(struct inode *inode, struct file *file)
500 {
501  struct spufs_inode_info *i = SPUFS_I(inode);
502  struct spu_context *ctx = i->i_ctx;
503 
504  simple_attr_release(inode, file);
505 
506  mutex_lock(&ctx->mapping_lock);
507  if (!--i->i_openers)
508  ctx->cntl = NULL;
509  mutex_unlock(&ctx->mapping_lock);
510  return 0;
511 }
512 
513 static const struct file_operations spufs_cntl_fops = {
514  .open = spufs_cntl_open,
515  .release = spufs_cntl_release,
516  .read = simple_attr_read,
517  .write = simple_attr_write,
518  .llseek = generic_file_llseek,
519  .mmap = spufs_cntl_mmap,
520 };
521 
522 static int
523 spufs_regs_open(struct inode *inode, struct file *file)
524 {
525  struct spufs_inode_info *i = SPUFS_I(inode);
526  file->private_data = i->i_ctx;
527  return 0;
528 }
529 
530 static ssize_t
531 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
532  size_t size, loff_t *pos)
533 {
534  struct spu_lscsa *lscsa = ctx->csa.lscsa;
535  return simple_read_from_buffer(buffer, size, pos,
536  lscsa->gprs, sizeof lscsa->gprs);
537 }
538 
539 static ssize_t
540 spufs_regs_read(struct file *file, char __user *buffer,
541  size_t size, loff_t *pos)
542 {
543  int ret;
544  struct spu_context *ctx = file->private_data;
545 
546  /* pre-check for file position: if we'd return EOF, there's no point
547  * causing a deschedule */
548  if (*pos >= sizeof(ctx->csa.lscsa->gprs))
549  return 0;
550 
551  ret = spu_acquire_saved(ctx);
552  if (ret)
553  return ret;
554  ret = __spufs_regs_read(ctx, buffer, size, pos);
555  spu_release_saved(ctx);
556  return ret;
557 }
558 
559 static ssize_t
560 spufs_regs_write(struct file *file, const char __user *buffer,
561  size_t size, loff_t *pos)
562 {
563  struct spu_context *ctx = file->private_data;
564  struct spu_lscsa *lscsa = ctx->csa.lscsa;
565  int ret;
566 
567  if (*pos >= sizeof(lscsa->gprs))
568  return -EFBIG;
569 
570  ret = spu_acquire_saved(ctx);
571  if (ret)
572  return ret;
573 
574  size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
575  buffer, size);
576 
577  spu_release_saved(ctx);
578  return size;
579 }
580 
581 static const struct file_operations spufs_regs_fops = {
582  .open = spufs_regs_open,
583  .read = spufs_regs_read,
584  .write = spufs_regs_write,
585  .llseek = generic_file_llseek,
586 };
587 
588 static ssize_t
589 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
590  size_t size, loff_t * pos)
591 {
592  struct spu_lscsa *lscsa = ctx->csa.lscsa;
593  return simple_read_from_buffer(buffer, size, pos,
594  &lscsa->fpcr, sizeof(lscsa->fpcr));
595 }
596 
597 static ssize_t
598 spufs_fpcr_read(struct file *file, char __user * buffer,
599  size_t size, loff_t * pos)
600 {
601  int ret;
602  struct spu_context *ctx = file->private_data;
603 
604  ret = spu_acquire_saved(ctx);
605  if (ret)
606  return ret;
607  ret = __spufs_fpcr_read(ctx, buffer, size, pos);
608  spu_release_saved(ctx);
609  return ret;
610 }
611 
612 static ssize_t
613 spufs_fpcr_write(struct file *file, const char __user * buffer,
614  size_t size, loff_t * pos)
615 {
616  struct spu_context *ctx = file->private_data;
617  struct spu_lscsa *lscsa = ctx->csa.lscsa;
618  int ret;
619 
620  if (*pos >= sizeof(lscsa->fpcr))
621  return -EFBIG;
622 
623  ret = spu_acquire_saved(ctx);
624  if (ret)
625  return ret;
626 
627  size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
628  buffer, size);
629 
630  spu_release_saved(ctx);
631  return size;
632 }
633 
634 static const struct file_operations spufs_fpcr_fops = {
635  .open = spufs_regs_open,
636  .read = spufs_fpcr_read,
637  .write = spufs_fpcr_write,
638  .llseek = generic_file_llseek,
639 };
640 
641 /* generic open function for all pipe-like files */
642 static int spufs_pipe_open(struct inode *inode, struct file *file)
643 {
644  struct spufs_inode_info *i = SPUFS_I(inode);
645  file->private_data = i->i_ctx;
646 
647  return nonseekable_open(inode, file);
648 }
649 
650 /*
651  * Read as many bytes from the mailbox as possible, until
652  * one of the conditions becomes true:
653  *
654  * - no more data available in the mailbox
655  * - end of the user provided buffer
656  * - end of the mapped area
657  */
658 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
659  size_t len, loff_t *pos)
660 {
661  struct spu_context *ctx = file->private_data;
662  u32 mbox_data, __user *udata;
663  ssize_t count;
664 
665  if (len < 4)
666  return -EINVAL;
667 
668  if (!access_ok(VERIFY_WRITE, buf, len))
669  return -EFAULT;
670 
671  udata = (void __user *)buf;
672 
673  count = spu_acquire(ctx);
674  if (count)
675  return count;
676 
677  for (count = 0; (count + 4) <= len; count += 4, udata++) {
678  int ret;
679  ret = ctx->ops->mbox_read(ctx, &mbox_data);
680  if (ret == 0)
681  break;
682 
683  /*
684  * at the end of the mapped area, we can fault
685  * but still need to return the data we have
686  * read successfully so far.
687  */
688  ret = __put_user(mbox_data, udata);
689  if (ret) {
690  if (!count)
691  count = -EFAULT;
692  break;
693  }
694  }
695  spu_release(ctx);
696 
697  if (!count)
698  count = -EAGAIN;
699 
700  return count;
701 }
702 
703 static const struct file_operations spufs_mbox_fops = {
704  .open = spufs_pipe_open,
705  .read = spufs_mbox_read,
706  .llseek = no_llseek,
707 };
708 
709 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
710  size_t len, loff_t *pos)
711 {
712  struct spu_context *ctx = file->private_data;
713  ssize_t ret;
714  u32 mbox_stat;
715 
716  if (len < 4)
717  return -EINVAL;
718 
719  ret = spu_acquire(ctx);
720  if (ret)
721  return ret;
722 
723  mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
724 
725  spu_release(ctx);
726 
727  if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
728  return -EFAULT;
729 
730  return 4;
731 }
732 
733 static const struct file_operations spufs_mbox_stat_fops = {
734  .open = spufs_pipe_open,
735  .read = spufs_mbox_stat_read,
736  .llseek = no_llseek,
737 };
738 
739 /* low-level ibox access function */
740 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
741 {
742  return ctx->ops->ibox_read(ctx, data);
743 }
744 
745 static int spufs_ibox_fasync(int fd, struct file *file, int on)
746 {
747  struct spu_context *ctx = file->private_data;
748 
749  return fasync_helper(fd, file, on, &ctx->ibox_fasync);
750 }
751 
752 /* interrupt-level ibox callback function. */
754 {
755  struct spu_context *ctx = spu->ctx;
756 
757  if (!ctx)
758  return;
759 
760  wake_up_all(&ctx->ibox_wq);
762 }
763 
764 /*
765  * Read as many bytes from the interrupt mailbox as possible, until
766  * one of the conditions becomes true:
767  *
768  * - no more data available in the mailbox
769  * - end of the user provided buffer
770  * - end of the mapped area
771  *
772  * If the file is opened without O_NONBLOCK, we wait here until
773  * any data is available, but return when we have been able to
774  * read something.
775  */
776 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
777  size_t len, loff_t *pos)
778 {
779  struct spu_context *ctx = file->private_data;
780  u32 ibox_data, __user *udata;
781  ssize_t count;
782 
783  if (len < 4)
784  return -EINVAL;
785 
786  if (!access_ok(VERIFY_WRITE, buf, len))
787  return -EFAULT;
788 
789  udata = (void __user *)buf;
790 
791  count = spu_acquire(ctx);
792  if (count)
793  goto out;
794 
795  /* wait only for the first element */
796  count = 0;
797  if (file->f_flags & O_NONBLOCK) {
798  if (!spu_ibox_read(ctx, &ibox_data)) {
799  count = -EAGAIN;
800  goto out_unlock;
801  }
802  } else {
803  count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
804  if (count)
805  goto out;
806  }
807 
808  /* if we can't write at all, return -EFAULT */
809  count = __put_user(ibox_data, udata);
810  if (count)
811  goto out_unlock;
812 
813  for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
814  int ret;
815  ret = ctx->ops->ibox_read(ctx, &ibox_data);
816  if (ret == 0)
817  break;
818  /*
819  * at the end of the mapped area, we can fault
820  * but still need to return the data we have
821  * read successfully so far.
822  */
823  ret = __put_user(ibox_data, udata);
824  if (ret)
825  break;
826  }
827 
828 out_unlock:
829  spu_release(ctx);
830 out:
831  return count;
832 }
833 
834 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
835 {
836  struct spu_context *ctx = file->private_data;
837  unsigned int mask;
838 
839  poll_wait(file, &ctx->ibox_wq, wait);
840 
841  /*
842  * For now keep this uninterruptible and also ignore the rule
843  * that poll should not sleep. Will be fixed later.
844  */
845  mutex_lock(&ctx->state_mutex);
846  mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
847  spu_release(ctx);
848 
849  return mask;
850 }
851 
852 static const struct file_operations spufs_ibox_fops = {
853  .open = spufs_pipe_open,
854  .read = spufs_ibox_read,
855  .poll = spufs_ibox_poll,
856  .fasync = spufs_ibox_fasync,
857  .llseek = no_llseek,
858 };
859 
860 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
861  size_t len, loff_t *pos)
862 {
863  struct spu_context *ctx = file->private_data;
864  ssize_t ret;
865  u32 ibox_stat;
866 
867  if (len < 4)
868  return -EINVAL;
869 
870  ret = spu_acquire(ctx);
871  if (ret)
872  return ret;
873  ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
874  spu_release(ctx);
875 
876  if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
877  return -EFAULT;
878 
879  return 4;
880 }
881 
882 static const struct file_operations spufs_ibox_stat_fops = {
883  .open = spufs_pipe_open,
884  .read = spufs_ibox_stat_read,
885  .llseek = no_llseek,
886 };
887 
888 /* low-level mailbox write */
889 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
890 {
891  return ctx->ops->wbox_write(ctx, data);
892 }
893 
894 static int spufs_wbox_fasync(int fd, struct file *file, int on)
895 {
896  struct spu_context *ctx = file->private_data;
897  int ret;
898 
899  ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
900 
901  return ret;
902 }
903 
904 /* interrupt-level wbox callback function. */
906 {
907  struct spu_context *ctx = spu->ctx;
908 
909  if (!ctx)
910  return;
911 
912  wake_up_all(&ctx->wbox_wq);
914 }
915 
916 /*
917  * Write as many bytes to the interrupt mailbox as possible, until
918  * one of the conditions becomes true:
919  *
920  * - the mailbox is full
921  * - end of the user provided buffer
922  * - end of the mapped area
923  *
924  * If the file is opened without O_NONBLOCK, we wait here until
925  * space is availabyl, but return when we have been able to
926  * write something.
927  */
928 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
929  size_t len, loff_t *pos)
930 {
931  struct spu_context *ctx = file->private_data;
932  u32 wbox_data, __user *udata;
933  ssize_t count;
934 
935  if (len < 4)
936  return -EINVAL;
937 
938  udata = (void __user *)buf;
939  if (!access_ok(VERIFY_READ, buf, len))
940  return -EFAULT;
941 
942  if (__get_user(wbox_data, udata))
943  return -EFAULT;
944 
945  count = spu_acquire(ctx);
946  if (count)
947  goto out;
948 
949  /*
950  * make sure we can at least write one element, by waiting
951  * in case of !O_NONBLOCK
952  */
953  count = 0;
954  if (file->f_flags & O_NONBLOCK) {
955  if (!spu_wbox_write(ctx, wbox_data)) {
956  count = -EAGAIN;
957  goto out_unlock;
958  }
959  } else {
960  count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
961  if (count)
962  goto out;
963  }
964 
965 
966  /* write as much as possible */
967  for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
968  int ret;
969  ret = __get_user(wbox_data, udata);
970  if (ret)
971  break;
972 
973  ret = spu_wbox_write(ctx, wbox_data);
974  if (ret == 0)
975  break;
976  }
977 
978 out_unlock:
979  spu_release(ctx);
980 out:
981  return count;
982 }
983 
984 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
985 {
986  struct spu_context *ctx = file->private_data;
987  unsigned int mask;
988 
989  poll_wait(file, &ctx->wbox_wq, wait);
990 
991  /*
992  * For now keep this uninterruptible and also ignore the rule
993  * that poll should not sleep. Will be fixed later.
994  */
995  mutex_lock(&ctx->state_mutex);
996  mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
997  spu_release(ctx);
998 
999  return mask;
1000 }
1001 
1002 static const struct file_operations spufs_wbox_fops = {
1003  .open = spufs_pipe_open,
1004  .write = spufs_wbox_write,
1005  .poll = spufs_wbox_poll,
1006  .fasync = spufs_wbox_fasync,
1007  .llseek = no_llseek,
1008 };
1009 
1010 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
1011  size_t len, loff_t *pos)
1012 {
1013  struct spu_context *ctx = file->private_data;
1014  ssize_t ret;
1015  u32 wbox_stat;
1016 
1017  if (len < 4)
1018  return -EINVAL;
1019 
1020  ret = spu_acquire(ctx);
1021  if (ret)
1022  return ret;
1023  wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
1024  spu_release(ctx);
1025 
1026  if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
1027  return -EFAULT;
1028 
1029  return 4;
1030 }
1031 
1032 static const struct file_operations spufs_wbox_stat_fops = {
1033  .open = spufs_pipe_open,
1034  .read = spufs_wbox_stat_read,
1035  .llseek = no_llseek,
1036 };
1037 
1038 static int spufs_signal1_open(struct inode *inode, struct file *file)
1039 {
1040  struct spufs_inode_info *i = SPUFS_I(inode);
1041  struct spu_context *ctx = i->i_ctx;
1042 
1043  mutex_lock(&ctx->mapping_lock);
1044  file->private_data = ctx;
1045  if (!i->i_openers++)
1046  ctx->signal1 = inode->i_mapping;
1047  mutex_unlock(&ctx->mapping_lock);
1048  return nonseekable_open(inode, file);
1049 }
1050 
1051 static int
1052 spufs_signal1_release(struct inode *inode, struct file *file)
1053 {
1054  struct spufs_inode_info *i = SPUFS_I(inode);
1055  struct spu_context *ctx = i->i_ctx;
1056 
1057  mutex_lock(&ctx->mapping_lock);
1058  if (!--i->i_openers)
1059  ctx->signal1 = NULL;
1060  mutex_unlock(&ctx->mapping_lock);
1061  return 0;
1062 }
1063 
1064 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
1065  size_t len, loff_t *pos)
1066 {
1067  int ret = 0;
1068  u32 data;
1069 
1070  if (len < 4)
1071  return -EINVAL;
1072 
1073  if (ctx->csa.spu_chnlcnt_RW[3]) {
1074  data = ctx->csa.spu_chnldata_RW[3];
1075  ret = 4;
1076  }
1077 
1078  if (!ret)
1079  goto out;
1080 
1081  if (copy_to_user(buf, &data, 4))
1082  return -EFAULT;
1083 
1084 out:
1085  return ret;
1086 }
1087 
1088 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
1089  size_t len, loff_t *pos)
1090 {
1091  int ret;
1092  struct spu_context *ctx = file->private_data;
1093 
1094  ret = spu_acquire_saved(ctx);
1095  if (ret)
1096  return ret;
1097  ret = __spufs_signal1_read(ctx, buf, len, pos);
1098  spu_release_saved(ctx);
1099 
1100  return ret;
1101 }
1102 
1103 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
1104  size_t len, loff_t *pos)
1105 {
1106  struct spu_context *ctx;
1107  ssize_t ret;
1108  u32 data;
1109 
1110  ctx = file->private_data;
1111 
1112  if (len < 4)
1113  return -EINVAL;
1114 
1115  if (copy_from_user(&data, buf, 4))
1116  return -EFAULT;
1117 
1118  ret = spu_acquire(ctx);
1119  if (ret)
1120  return ret;
1121  ctx->ops->signal1_write(ctx, data);
1122  spu_release(ctx);
1123 
1124  return 4;
1125 }
1126 
1127 static int
1128 spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1129 {
1130 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1131  return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
1132 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1133  /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1134  * signal 1 and 2 area
1135  */
1136  return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1137 #else
1138 #error unsupported page size
1139 #endif
1140 }
1141 
1142 static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
1143  .fault = spufs_signal1_mmap_fault,
1144 };
1145 
1146 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1147 {
1148  if (!(vma->vm_flags & VM_SHARED))
1149  return -EINVAL;
1150 
1151  vma->vm_flags |= VM_IO | VM_PFNMAP;
1153 
1154  vma->vm_ops = &spufs_signal1_mmap_vmops;
1155  return 0;
1156 }
1157 
1158 static const struct file_operations spufs_signal1_fops = {
1159  .open = spufs_signal1_open,
1160  .release = spufs_signal1_release,
1161  .read = spufs_signal1_read,
1162  .write = spufs_signal1_write,
1163  .mmap = spufs_signal1_mmap,
1164  .llseek = no_llseek,
1165 };
1166 
1167 static const struct file_operations spufs_signal1_nosched_fops = {
1168  .open = spufs_signal1_open,
1169  .release = spufs_signal1_release,
1170  .write = spufs_signal1_write,
1171  .mmap = spufs_signal1_mmap,
1172  .llseek = no_llseek,
1173 };
1174 
1175 static int spufs_signal2_open(struct inode *inode, struct file *file)
1176 {
1177  struct spufs_inode_info *i = SPUFS_I(inode);
1178  struct spu_context *ctx = i->i_ctx;
1179 
1180  mutex_lock(&ctx->mapping_lock);
1181  file->private_data = ctx;
1182  if (!i->i_openers++)
1183  ctx->signal2 = inode->i_mapping;
1184  mutex_unlock(&ctx->mapping_lock);
1185  return nonseekable_open(inode, file);
1186 }
1187 
1188 static int
1189 spufs_signal2_release(struct inode *inode, struct file *file)
1190 {
1191  struct spufs_inode_info *i = SPUFS_I(inode);
1192  struct spu_context *ctx = i->i_ctx;
1193 
1194  mutex_lock(&ctx->mapping_lock);
1195  if (!--i->i_openers)
1196  ctx->signal2 = NULL;
1197  mutex_unlock(&ctx->mapping_lock);
1198  return 0;
1199 }
1200 
1201 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
1202  size_t len, loff_t *pos)
1203 {
1204  int ret = 0;
1205  u32 data;
1206 
1207  if (len < 4)
1208  return -EINVAL;
1209 
1210  if (ctx->csa.spu_chnlcnt_RW[4]) {
1211  data = ctx->csa.spu_chnldata_RW[4];
1212  ret = 4;
1213  }
1214 
1215  if (!ret)
1216  goto out;
1217 
1218  if (copy_to_user(buf, &data, 4))
1219  return -EFAULT;
1220 
1221 out:
1222  return ret;
1223 }
1224 
1225 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1226  size_t len, loff_t *pos)
1227 {
1228  struct spu_context *ctx = file->private_data;
1229  int ret;
1230 
1231  ret = spu_acquire_saved(ctx);
1232  if (ret)
1233  return ret;
1234  ret = __spufs_signal2_read(ctx, buf, len, pos);
1235  spu_release_saved(ctx);
1236 
1237  return ret;
1238 }
1239 
1240 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1241  size_t len, loff_t *pos)
1242 {
1243  struct spu_context *ctx;
1244  ssize_t ret;
1245  u32 data;
1246 
1247  ctx = file->private_data;
1248 
1249  if (len < 4)
1250  return -EINVAL;
1251 
1252  if (copy_from_user(&data, buf, 4))
1253  return -EFAULT;
1254 
1255  ret = spu_acquire(ctx);
1256  if (ret)
1257  return ret;
1258  ctx->ops->signal2_write(ctx, data);
1259  spu_release(ctx);
1260 
1261  return 4;
1262 }
1263 
1264 #if SPUFS_MMAP_4K
1265 static int
1266 spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1267 {
1268 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1269  return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
1270 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1271  /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1272  * signal 1 and 2 area
1273  */
1274  return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1275 #else
1276 #error unsupported page size
1277 #endif
1278 }
1279 
1280 static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
1281  .fault = spufs_signal2_mmap_fault,
1282 };
1283 
1284 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1285 {
1286  if (!(vma->vm_flags & VM_SHARED))
1287  return -EINVAL;
1288 
1289  vma->vm_flags |= VM_IO | VM_PFNMAP;
1291 
1292  vma->vm_ops = &spufs_signal2_mmap_vmops;
1293  return 0;
1294 }
1295 #else /* SPUFS_MMAP_4K */
1296 #define spufs_signal2_mmap NULL
1297 #endif /* !SPUFS_MMAP_4K */
1298 
1299 static const struct file_operations spufs_signal2_fops = {
1300  .open = spufs_signal2_open,
1301  .release = spufs_signal2_release,
1302  .read = spufs_signal2_read,
1303  .write = spufs_signal2_write,
1304  .mmap = spufs_signal2_mmap,
1305  .llseek = no_llseek,
1306 };
1307 
1308 static const struct file_operations spufs_signal2_nosched_fops = {
1309  .open = spufs_signal2_open,
1310  .release = spufs_signal2_release,
1311  .write = spufs_signal2_write,
1312  .mmap = spufs_signal2_mmap,
1313  .llseek = no_llseek,
1314 };
1315 
1316 /*
1317  * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1318  * work of acquiring (or not) the SPU context before calling through
1319  * to the actual get routine. The set routine is called directly.
1320  */
1321 #define SPU_ATTR_NOACQUIRE 0
1322 #define SPU_ATTR_ACQUIRE 1
1323 #define SPU_ATTR_ACQUIRE_SAVED 2
1324 
1325 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1326 static int __##__get(void *data, u64 *val) \
1327 { \
1328  struct spu_context *ctx = data; \
1329  int ret = 0; \
1330  \
1331  if (__acquire == SPU_ATTR_ACQUIRE) { \
1332  ret = spu_acquire(ctx); \
1333  if (ret) \
1334  return ret; \
1335  *val = __get(ctx); \
1336  spu_release(ctx); \
1337  } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1338  ret = spu_acquire_saved(ctx); \
1339  if (ret) \
1340  return ret; \
1341  *val = __get(ctx); \
1342  spu_release_saved(ctx); \
1343  } else \
1344  *val = __get(ctx); \
1345  \
1346  return 0; \
1347 } \
1348 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1349 
1350 static int spufs_signal1_type_set(void *data, u64 val)
1351 {
1352  struct spu_context *ctx = data;
1353  int ret;
1354 
1355  ret = spu_acquire(ctx);
1356  if (ret)
1357  return ret;
1358  ctx->ops->signal1_type_set(ctx, val);
1359  spu_release(ctx);
1360 
1361  return 0;
1362 }
1363 
1364 static u64 spufs_signal1_type_get(struct spu_context *ctx)
1365 {
1366  return ctx->ops->signal1_type_get(ctx);
1367 }
1368 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1369  spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1370 
1371 
1372 static int spufs_signal2_type_set(void *data, u64 val)
1373 {
1374  struct spu_context *ctx = data;
1375  int ret;
1376 
1377  ret = spu_acquire(ctx);
1378  if (ret)
1379  return ret;
1380  ctx->ops->signal2_type_set(ctx, val);
1381  spu_release(ctx);
1382 
1383  return 0;
1384 }
1385 
1386 static u64 spufs_signal2_type_get(struct spu_context *ctx)
1387 {
1388  return ctx->ops->signal2_type_get(ctx);
1389 }
1390 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1391  spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1392 
1393 #if SPUFS_MMAP_4K
1394 static int
1395 spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1396 {
1397  return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
1398 }
1399 
1400 static const struct vm_operations_struct spufs_mss_mmap_vmops = {
1401  .fault = spufs_mss_mmap_fault,
1402 };
1403 
1404 /*
1405  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1406  */
1407 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1408 {
1409  if (!(vma->vm_flags & VM_SHARED))
1410  return -EINVAL;
1411 
1412  vma->vm_flags |= VM_IO | VM_PFNMAP;
1414 
1415  vma->vm_ops = &spufs_mss_mmap_vmops;
1416  return 0;
1417 }
1418 #else /* SPUFS_MMAP_4K */
1419 #define spufs_mss_mmap NULL
1420 #endif /* !SPUFS_MMAP_4K */
1421 
1422 static int spufs_mss_open(struct inode *inode, struct file *file)
1423 {
1424  struct spufs_inode_info *i = SPUFS_I(inode);
1425  struct spu_context *ctx = i->i_ctx;
1426 
1427  file->private_data = i->i_ctx;
1428 
1429  mutex_lock(&ctx->mapping_lock);
1430  if (!i->i_openers++)
1431  ctx->mss = inode->i_mapping;
1432  mutex_unlock(&ctx->mapping_lock);
1433  return nonseekable_open(inode, file);
1434 }
1435 
1436 static int
1437 spufs_mss_release(struct inode *inode, struct file *file)
1438 {
1439  struct spufs_inode_info *i = SPUFS_I(inode);
1440  struct spu_context *ctx = i->i_ctx;
1441 
1442  mutex_lock(&ctx->mapping_lock);
1443  if (!--i->i_openers)
1444  ctx->mss = NULL;
1445  mutex_unlock(&ctx->mapping_lock);
1446  return 0;
1447 }
1448 
1449 static const struct file_operations spufs_mss_fops = {
1450  .open = spufs_mss_open,
1451  .release = spufs_mss_release,
1452  .mmap = spufs_mss_mmap,
1453  .llseek = no_llseek,
1454 };
1455 
1456 static int
1457 spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1458 {
1459  return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
1460 }
1461 
1462 static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
1463  .fault = spufs_psmap_mmap_fault,
1464 };
1465 
1466 /*
1467  * mmap support for full problem state area [0x00000 - 0x1ffff].
1468  */
1469 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1470 {
1471  if (!(vma->vm_flags & VM_SHARED))
1472  return -EINVAL;
1473 
1474  vma->vm_flags |= VM_IO | VM_PFNMAP;
1476 
1477  vma->vm_ops = &spufs_psmap_mmap_vmops;
1478  return 0;
1479 }
1480 
1481 static int spufs_psmap_open(struct inode *inode, struct file *file)
1482 {
1483  struct spufs_inode_info *i = SPUFS_I(inode);
1484  struct spu_context *ctx = i->i_ctx;
1485 
1486  mutex_lock(&ctx->mapping_lock);
1487  file->private_data = i->i_ctx;
1488  if (!i->i_openers++)
1489  ctx->psmap = inode->i_mapping;
1490  mutex_unlock(&ctx->mapping_lock);
1491  return nonseekable_open(inode, file);
1492 }
1493 
1494 static int
1495 spufs_psmap_release(struct inode *inode, struct file *file)
1496 {
1497  struct spufs_inode_info *i = SPUFS_I(inode);
1498  struct spu_context *ctx = i->i_ctx;
1499 
1500  mutex_lock(&ctx->mapping_lock);
1501  if (!--i->i_openers)
1502  ctx->psmap = NULL;
1503  mutex_unlock(&ctx->mapping_lock);
1504  return 0;
1505 }
1506 
1507 static const struct file_operations spufs_psmap_fops = {
1508  .open = spufs_psmap_open,
1509  .release = spufs_psmap_release,
1510  .mmap = spufs_psmap_mmap,
1511  .llseek = no_llseek,
1512 };
1513 
1514 
1515 #if SPUFS_MMAP_4K
1516 static int
1517 spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1518 {
1519  return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
1520 }
1521 
1522 static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
1523  .fault = spufs_mfc_mmap_fault,
1524 };
1525 
1526 /*
1527  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1528  */
1529 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1530 {
1531  if (!(vma->vm_flags & VM_SHARED))
1532  return -EINVAL;
1533 
1534  vma->vm_flags |= VM_IO | VM_PFNMAP;
1536 
1537  vma->vm_ops = &spufs_mfc_mmap_vmops;
1538  return 0;
1539 }
1540 #else /* SPUFS_MMAP_4K */
1541 #define spufs_mfc_mmap NULL
1542 #endif /* !SPUFS_MMAP_4K */
1543 
1544 static int spufs_mfc_open(struct inode *inode, struct file *file)
1545 {
1546  struct spufs_inode_info *i = SPUFS_I(inode);
1547  struct spu_context *ctx = i->i_ctx;
1548 
1549  /* we don't want to deal with DMA into other processes */
1550  if (ctx->owner != current->mm)
1551  return -EINVAL;
1552 
1553  if (atomic_read(&inode->i_count) != 1)
1554  return -EBUSY;
1555 
1556  mutex_lock(&ctx->mapping_lock);
1557  file->private_data = ctx;
1558  if (!i->i_openers++)
1559  ctx->mfc = inode->i_mapping;
1560  mutex_unlock(&ctx->mapping_lock);
1561  return nonseekable_open(inode, file);
1562 }
1563 
1564 static int
1565 spufs_mfc_release(struct inode *inode, struct file *file)
1566 {
1567  struct spufs_inode_info *i = SPUFS_I(inode);
1568  struct spu_context *ctx = i->i_ctx;
1569 
1570  mutex_lock(&ctx->mapping_lock);
1571  if (!--i->i_openers)
1572  ctx->mfc = NULL;
1573  mutex_unlock(&ctx->mapping_lock);
1574  return 0;
1575 }
1576 
1577 /* interrupt-level mfc callback function. */
1579 {
1580  struct spu_context *ctx = spu->ctx;
1581 
1582  if (!ctx)
1583  return;
1584 
1585  wake_up_all(&ctx->mfc_wq);
1586 
1587  pr_debug("%s %s\n", __func__, spu->name);
1588  if (ctx->mfc_fasync) {
1589  u32 free_elements, tagstatus;
1590  unsigned int mask;
1591 
1592  /* no need for spu_acquire in interrupt context */
1593  free_elements = ctx->ops->get_mfc_free_elements(ctx);
1594  tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1595 
1596  mask = 0;
1597  if (free_elements & 0xffff)
1598  mask |= POLLOUT;
1599  if (tagstatus & ctx->tagwait)
1600  mask |= POLLIN;
1601 
1602  kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1603  }
1604 }
1605 
1606 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1607 {
1608  /* See if there is one tag group is complete */
1609  /* FIXME we need locking around tagwait */
1610  *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1611  ctx->tagwait &= ~*status;
1612  if (*status)
1613  return 1;
1614 
1615  /* enable interrupt waiting for any tag group,
1616  may silently fail if interrupts are already enabled */
1617  ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1618  return 0;
1619 }
1620 
1621 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1622  size_t size, loff_t *pos)
1623 {
1624  struct spu_context *ctx = file->private_data;
1625  int ret = -EINVAL;
1626  u32 status;
1627 
1628  if (size != 4)
1629  goto out;
1630 
1631  ret = spu_acquire(ctx);
1632  if (ret)
1633  return ret;
1634 
1635  ret = -EINVAL;
1636  if (file->f_flags & O_NONBLOCK) {
1637  status = ctx->ops->read_mfc_tagstatus(ctx);
1638  if (!(status & ctx->tagwait))
1639  ret = -EAGAIN;
1640  else
1641  /* XXX(hch): shouldn't we clear ret here? */
1642  ctx->tagwait &= ~status;
1643  } else {
1644  ret = spufs_wait(ctx->mfc_wq,
1645  spufs_read_mfc_tagstatus(ctx, &status));
1646  if (ret)
1647  goto out;
1648  }
1649  spu_release(ctx);
1650 
1651  ret = 4;
1652  if (copy_to_user(buffer, &status, 4))
1653  ret = -EFAULT;
1654 
1655 out:
1656  return ret;
1657 }
1658 
1659 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1660 {
1661  pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
1662  cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1663 
1664  switch (cmd->cmd) {
1665  case MFC_PUT_CMD:
1666  case MFC_PUTF_CMD:
1667  case MFC_PUTB_CMD:
1668  case MFC_GET_CMD:
1669  case MFC_GETF_CMD:
1670  case MFC_GETB_CMD:
1671  break;
1672  default:
1673  pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1674  return -EIO;
1675  }
1676 
1677  if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1678  pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
1679  cmd->ea, cmd->lsa);
1680  return -EIO;
1681  }
1682 
1683  switch (cmd->size & 0xf) {
1684  case 1:
1685  break;
1686  case 2:
1687  if (cmd->lsa & 1)
1688  goto error;
1689  break;
1690  case 4:
1691  if (cmd->lsa & 3)
1692  goto error;
1693  break;
1694  case 8:
1695  if (cmd->lsa & 7)
1696  goto error;
1697  break;
1698  case 0:
1699  if (cmd->lsa & 15)
1700  goto error;
1701  break;
1702  error:
1703  default:
1704  pr_debug("invalid DMA alignment %x for size %x\n",
1705  cmd->lsa & 0xf, cmd->size);
1706  return -EIO;
1707  }
1708 
1709  if (cmd->size > 16 * 1024) {
1710  pr_debug("invalid DMA size %x\n", cmd->size);
1711  return -EIO;
1712  }
1713 
1714  if (cmd->tag & 0xfff0) {
1715  /* we reserve the higher tag numbers for kernel use */
1716  pr_debug("invalid DMA tag\n");
1717  return -EIO;
1718  }
1719 
1720  if (cmd->class) {
1721  /* not supported in this version */
1722  pr_debug("invalid DMA class\n");
1723  return -EIO;
1724  }
1725 
1726  return 0;
1727 }
1728 
1729 static int spu_send_mfc_command(struct spu_context *ctx,
1730  struct mfc_dma_command cmd,
1731  int *error)
1732 {
1733  *error = ctx->ops->send_mfc_command(ctx, &cmd);
1734  if (*error == -EAGAIN) {
1735  /* wait for any tag group to complete
1736  so we have space for the new command */
1737  ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1738  /* try again, because the queue might be
1739  empty again */
1740  *error = ctx->ops->send_mfc_command(ctx, &cmd);
1741  if (*error == -EAGAIN)
1742  return 0;
1743  }
1744  return 1;
1745 }
1746 
1747 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1748  size_t size, loff_t *pos)
1749 {
1750  struct spu_context *ctx = file->private_data;
1751  struct mfc_dma_command cmd;
1752  int ret = -EINVAL;
1753 
1754  if (size != sizeof cmd)
1755  goto out;
1756 
1757  ret = -EFAULT;
1758  if (copy_from_user(&cmd, buffer, sizeof cmd))
1759  goto out;
1760 
1761  ret = spufs_check_valid_dma(&cmd);
1762  if (ret)
1763  goto out;
1764 
1765  ret = spu_acquire(ctx);
1766  if (ret)
1767  goto out;
1768 
1769  ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
1770  if (ret)
1771  goto out;
1772 
1773  if (file->f_flags & O_NONBLOCK) {
1774  ret = ctx->ops->send_mfc_command(ctx, &cmd);
1775  } else {
1776  int status;
1777  ret = spufs_wait(ctx->mfc_wq,
1778  spu_send_mfc_command(ctx, cmd, &status));
1779  if (ret)
1780  goto out;
1781  if (status)
1782  ret = status;
1783  }
1784 
1785  if (ret)
1786  goto out_unlock;
1787 
1788  ctx->tagwait |= 1 << cmd.tag;
1789  ret = size;
1790 
1791 out_unlock:
1792  spu_release(ctx);
1793 out:
1794  return ret;
1795 }
1796 
1797 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1798 {
1799  struct spu_context *ctx = file->private_data;
1800  u32 free_elements, tagstatus;
1801  unsigned int mask;
1802 
1803  poll_wait(file, &ctx->mfc_wq, wait);
1804 
1805  /*
1806  * For now keep this uninterruptible and also ignore the rule
1807  * that poll should not sleep. Will be fixed later.
1808  */
1809  mutex_lock(&ctx->state_mutex);
1810  ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1811  free_elements = ctx->ops->get_mfc_free_elements(ctx);
1812  tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1813  spu_release(ctx);
1814 
1815  mask = 0;
1816  if (free_elements & 0xffff)
1817  mask |= POLLOUT | POLLWRNORM;
1818  if (tagstatus & ctx->tagwait)
1819  mask |= POLLIN | POLLRDNORM;
1820 
1821  pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
1822  free_elements, tagstatus, ctx->tagwait);
1823 
1824  return mask;
1825 }
1826 
1827 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1828 {
1829  struct spu_context *ctx = file->private_data;
1830  int ret;
1831 
1832  ret = spu_acquire(ctx);
1833  if (ret)
1834  goto out;
1835 #if 0
1836 /* this currently hangs */
1837  ret = spufs_wait(ctx->mfc_wq,
1838  ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1839  if (ret)
1840  goto out;
1841  ret = spufs_wait(ctx->mfc_wq,
1842  ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1843  if (ret)
1844  goto out;
1845 #else
1846  ret = 0;
1847 #endif
1848  spu_release(ctx);
1849 out:
1850  return ret;
1851 }
1852 
1853 static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1854 {
1855  struct inode *inode = file->f_path.dentry->d_inode;
1856  int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
1857  if (!err) {
1858  mutex_lock(&inode->i_mutex);
1859  err = spufs_mfc_flush(file, NULL);
1860  mutex_unlock(&inode->i_mutex);
1861  }
1862  return err;
1863 }
1864 
1865 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1866 {
1867  struct spu_context *ctx = file->private_data;
1868 
1869  return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1870 }
1871 
1872 static const struct file_operations spufs_mfc_fops = {
1873  .open = spufs_mfc_open,
1874  .release = spufs_mfc_release,
1875  .read = spufs_mfc_read,
1876  .write = spufs_mfc_write,
1877  .poll = spufs_mfc_poll,
1878  .flush = spufs_mfc_flush,
1879  .fsync = spufs_mfc_fsync,
1880  .fasync = spufs_mfc_fasync,
1881  .mmap = spufs_mfc_mmap,
1882  .llseek = no_llseek,
1883 };
1884 
1885 static int spufs_npc_set(void *data, u64 val)
1886 {
1887  struct spu_context *ctx = data;
1888  int ret;
1889 
1890  ret = spu_acquire(ctx);
1891  if (ret)
1892  return ret;
1893  ctx->ops->npc_write(ctx, val);
1894  spu_release(ctx);
1895 
1896  return 0;
1897 }
1898 
1899 static u64 spufs_npc_get(struct spu_context *ctx)
1900 {
1901  return ctx->ops->npc_read(ctx);
1902 }
1903 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1904  "0x%llx\n", SPU_ATTR_ACQUIRE);
1905 
1906 static int spufs_decr_set(void *data, u64 val)
1907 {
1908  struct spu_context *ctx = data;
1909  struct spu_lscsa *lscsa = ctx->csa.lscsa;
1910  int ret;
1911 
1912  ret = spu_acquire_saved(ctx);
1913  if (ret)
1914  return ret;
1915  lscsa->decr.slot[0] = (u32) val;
1916  spu_release_saved(ctx);
1917 
1918  return 0;
1919 }
1920 
1921 static u64 spufs_decr_get(struct spu_context *ctx)
1922 {
1923  struct spu_lscsa *lscsa = ctx->csa.lscsa;
1924  return lscsa->decr.slot[0];
1925 }
1926 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1927  "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
1928 
1929 static int spufs_decr_status_set(void *data, u64 val)
1930 {
1931  struct spu_context *ctx = data;
1932  int ret;
1933 
1934  ret = spu_acquire_saved(ctx);
1935  if (ret)
1936  return ret;
1937  if (val)
1938  ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1939  else
1940  ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1941  spu_release_saved(ctx);
1942 
1943  return 0;
1944 }
1945 
1946 static u64 spufs_decr_status_get(struct spu_context *ctx)
1947 {
1948  if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1949  return SPU_DECR_STATUS_RUNNING;
1950  else
1951  return 0;
1952 }
1953 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1954  spufs_decr_status_set, "0x%llx\n",
1956 
1957 static int spufs_event_mask_set(void *data, u64 val)
1958 {
1959  struct spu_context *ctx = data;
1960  struct spu_lscsa *lscsa = ctx->csa.lscsa;
1961  int ret;
1962 
1963  ret = spu_acquire_saved(ctx);
1964  if (ret)
1965  return ret;
1966  lscsa->event_mask.slot[0] = (u32) val;
1967  spu_release_saved(ctx);
1968 
1969  return 0;
1970 }
1971 
1972 static u64 spufs_event_mask_get(struct spu_context *ctx)
1973 {
1974  struct spu_lscsa *lscsa = ctx->csa.lscsa;
1975  return lscsa->event_mask.slot[0];
1976 }
1977 
1978 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1979  spufs_event_mask_set, "0x%llx\n",
1981 
1982 static u64 spufs_event_status_get(struct spu_context *ctx)
1983 {
1984  struct spu_state *state = &ctx->csa;
1985  u64 stat;
1986  stat = state->spu_chnlcnt_RW[0];
1987  if (stat)
1988  return state->spu_chnldata_RW[0];
1989  return 0;
1990 }
1991 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1992  NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1993 
1994 static int spufs_srr0_set(void *data, u64 val)
1995 {
1996  struct spu_context *ctx = data;
1997  struct spu_lscsa *lscsa = ctx->csa.lscsa;
1998  int ret;
1999 
2000  ret = spu_acquire_saved(ctx);
2001  if (ret)
2002  return ret;
2003  lscsa->srr0.slot[0] = (u32) val;
2004  spu_release_saved(ctx);
2005 
2006  return 0;
2007 }
2008 
2009 static u64 spufs_srr0_get(struct spu_context *ctx)
2010 {
2011  struct spu_lscsa *lscsa = ctx->csa.lscsa;
2012  return lscsa->srr0.slot[0];
2013 }
2014 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
2015  "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
2016 
2017 static u64 spufs_id_get(struct spu_context *ctx)
2018 {
2019  u64 num;
2020 
2021  if (ctx->state == SPU_STATE_RUNNABLE)
2022  num = ctx->spu->number;
2023  else
2024  num = (unsigned int)-1;
2025 
2026  return num;
2027 }
2028 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
2030 
2031 static u64 spufs_object_id_get(struct spu_context *ctx)
2032 {
2033  /* FIXME: Should there really be no locking here? */
2034  return ctx->object_id;
2035 }
2036 
2037 static int spufs_object_id_set(void *data, u64 id)
2038 {
2039  struct spu_context *ctx = data;
2040  ctx->object_id = id;
2041 
2042  return 0;
2043 }
2044 
2045 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
2046  spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
2047 
2048 static u64 spufs_lslr_get(struct spu_context *ctx)
2049 {
2050  return ctx->csa.priv2.spu_lslr_RW;
2051 }
2052 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
2054 
2055 static int spufs_info_open(struct inode *inode, struct file *file)
2056 {
2057  struct spufs_inode_info *i = SPUFS_I(inode);
2058  struct spu_context *ctx = i->i_ctx;
2059  file->private_data = ctx;
2060  return 0;
2061 }
2062 
2063 static int spufs_caps_show(struct seq_file *s, void *private)
2064 {
2065  struct spu_context *ctx = s->private;
2066 
2067  if (!(ctx->flags & SPU_CREATE_NOSCHED))
2068  seq_puts(s, "sched\n");
2069  if (!(ctx->flags & SPU_CREATE_ISOLATE))
2070  seq_puts(s, "step\n");
2071  return 0;
2072 }
2073 
2074 static int spufs_caps_open(struct inode *inode, struct file *file)
2075 {
2076  return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
2077 }
2078 
2079 static const struct file_operations spufs_caps_fops = {
2080  .open = spufs_caps_open,
2081  .read = seq_read,
2082  .llseek = seq_lseek,
2083  .release = single_release,
2084 };
2085 
2086 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
2087  char __user *buf, size_t len, loff_t *pos)
2088 {
2089  u32 data;
2090 
2091  /* EOF if there's no entry in the mbox */
2092  if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
2093  return 0;
2094 
2095  data = ctx->csa.prob.pu_mb_R;
2096 
2097  return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2098 }
2099 
2100 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
2101  size_t len, loff_t *pos)
2102 {
2103  int ret;
2104  struct spu_context *ctx = file->private_data;
2105 
2106  if (!access_ok(VERIFY_WRITE, buf, len))
2107  return -EFAULT;
2108 
2109  ret = spu_acquire_saved(ctx);
2110  if (ret)
2111  return ret;
2112  spin_lock(&ctx->csa.register_lock);
2113  ret = __spufs_mbox_info_read(ctx, buf, len, pos);
2114  spin_unlock(&ctx->csa.register_lock);
2115  spu_release_saved(ctx);
2116 
2117  return ret;
2118 }
2119 
2120 static const struct file_operations spufs_mbox_info_fops = {
2121  .open = spufs_info_open,
2122  .read = spufs_mbox_info_read,
2123  .llseek = generic_file_llseek,
2124 };
2125 
2126 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
2127  char __user *buf, size_t len, loff_t *pos)
2128 {
2129  u32 data;
2130 
2131  /* EOF if there's no entry in the ibox */
2132  if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
2133  return 0;
2134 
2135  data = ctx->csa.priv2.puint_mb_R;
2136 
2137  return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2138 }
2139 
2140 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
2141  size_t len, loff_t *pos)
2142 {
2143  struct spu_context *ctx = file->private_data;
2144  int ret;
2145 
2146  if (!access_ok(VERIFY_WRITE, buf, len))
2147  return -EFAULT;
2148 
2149  ret = spu_acquire_saved(ctx);
2150  if (ret)
2151  return ret;
2152  spin_lock(&ctx->csa.register_lock);
2153  ret = __spufs_ibox_info_read(ctx, buf, len, pos);
2154  spin_unlock(&ctx->csa.register_lock);
2155  spu_release_saved(ctx);
2156 
2157  return ret;
2158 }
2159 
2160 static const struct file_operations spufs_ibox_info_fops = {
2161  .open = spufs_info_open,
2162  .read = spufs_ibox_info_read,
2163  .llseek = generic_file_llseek,
2164 };
2165 
2166 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
2167  char __user *buf, size_t len, loff_t *pos)
2168 {
2169  int i, cnt;
2170  u32 data[4];
2171  u32 wbox_stat;
2172 
2173  wbox_stat = ctx->csa.prob.mb_stat_R;
2174  cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
2175  for (i = 0; i < cnt; i++) {
2176  data[i] = ctx->csa.spu_mailbox_data[i];
2177  }
2178 
2179  return simple_read_from_buffer(buf, len, pos, &data,
2180  cnt * sizeof(u32));
2181 }
2182 
2183 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2184  size_t len, loff_t *pos)
2185 {
2186  struct spu_context *ctx = file->private_data;
2187  int ret;
2188 
2189  if (!access_ok(VERIFY_WRITE, buf, len))
2190  return -EFAULT;
2191 
2192  ret = spu_acquire_saved(ctx);
2193  if (ret)
2194  return ret;
2195  spin_lock(&ctx->csa.register_lock);
2196  ret = __spufs_wbox_info_read(ctx, buf, len, pos);
2197  spin_unlock(&ctx->csa.register_lock);
2198  spu_release_saved(ctx);
2199 
2200  return ret;
2201 }
2202 
2203 static const struct file_operations spufs_wbox_info_fops = {
2204  .open = spufs_info_open,
2205  .read = spufs_wbox_info_read,
2206  .llseek = generic_file_llseek,
2207 };
2208 
2209 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
2210  char __user *buf, size_t len, loff_t *pos)
2211 {
2212  struct spu_dma_info info;
2213  struct mfc_cq_sr *qp, *spuqp;
2214  int i;
2215 
2216  info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2217  info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2218  info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
2219  info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2220  info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2221  for (i = 0; i < 16; i++) {
2222  qp = &info.dma_info_command_data[i];
2223  spuqp = &ctx->csa.priv2.spuq[i];
2224 
2225  qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2226  qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2227  qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2228  qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2229  }
2230 
2231  return simple_read_from_buffer(buf, len, pos, &info,
2232  sizeof info);
2233 }
2234 
2235 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2236  size_t len, loff_t *pos)
2237 {
2238  struct spu_context *ctx = file->private_data;
2239  int ret;
2240 
2241  if (!access_ok(VERIFY_WRITE, buf, len))
2242  return -EFAULT;
2243 
2244  ret = spu_acquire_saved(ctx);
2245  if (ret)
2246  return ret;
2247  spin_lock(&ctx->csa.register_lock);
2248  ret = __spufs_dma_info_read(ctx, buf, len, pos);
2249  spin_unlock(&ctx->csa.register_lock);
2250  spu_release_saved(ctx);
2251 
2252  return ret;
2253 }
2254 
2255 static const struct file_operations spufs_dma_info_fops = {
2256  .open = spufs_info_open,
2257  .read = spufs_dma_info_read,
2258  .llseek = no_llseek,
2259 };
2260 
2261 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2262  char __user *buf, size_t len, loff_t *pos)
2263 {
2264  struct spu_proxydma_info info;
2265  struct mfc_cq_sr *qp, *puqp;
2266  int ret = sizeof info;
2267  int i;
2268 
2269  if (len < ret)
2270  return -EINVAL;
2271 
2272  if (!access_ok(VERIFY_WRITE, buf, len))
2273  return -EFAULT;
2274 
2275  info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2276  info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2277  info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2278  for (i = 0; i < 8; i++) {
2279  qp = &info.proxydma_info_command_data[i];
2280  puqp = &ctx->csa.priv2.puq[i];
2281 
2282  qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2283  qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2284  qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2285  qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2286  }
2287 
2288  return simple_read_from_buffer(buf, len, pos, &info,
2289  sizeof info);
2290 }
2291 
2292 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2293  size_t len, loff_t *pos)
2294 {
2295  struct spu_context *ctx = file->private_data;
2296  int ret;
2297 
2298  ret = spu_acquire_saved(ctx);
2299  if (ret)
2300  return ret;
2301  spin_lock(&ctx->csa.register_lock);
2302  ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2303  spin_unlock(&ctx->csa.register_lock);
2304  spu_release_saved(ctx);
2305 
2306  return ret;
2307 }
2308 
2309 static const struct file_operations spufs_proxydma_info_fops = {
2310  .open = spufs_info_open,
2311  .read = spufs_proxydma_info_read,
2312  .llseek = no_llseek,
2313 };
2314 
2315 static int spufs_show_tid(struct seq_file *s, void *private)
2316 {
2317  struct spu_context *ctx = s->private;
2318 
2319  seq_printf(s, "%d\n", ctx->tid);
2320  return 0;
2321 }
2322 
2323 static int spufs_tid_open(struct inode *inode, struct file *file)
2324 {
2325  return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2326 }
2327 
2328 static const struct file_operations spufs_tid_fops = {
2329  .open = spufs_tid_open,
2330  .read = seq_read,
2331  .llseek = seq_lseek,
2332  .release = single_release,
2333 };
2334 
2335 static const char *ctx_state_names[] = {
2336  "user", "system", "iowait", "loaded"
2337 };
2338 
2339 static unsigned long long spufs_acct_time(struct spu_context *ctx,
2340  enum spu_utilization_state state)
2341 {
2342  struct timespec ts;
2343  unsigned long long time = ctx->stats.times[state];
2344 
2345  /*
2346  * In general, utilization statistics are updated by the controlling
2347  * thread as the spu context moves through various well defined
2348  * state transitions, but if the context is lazily loaded its
2349  * utilization statistics are not updated as the controlling thread
2350  * is not tightly coupled with the execution of the spu context. We
2351  * calculate and apply the time delta from the last recorded state
2352  * of the spu context.
2353  */
2354  if (ctx->spu && ctx->stats.util_state == state) {
2355  ktime_get_ts(&ts);
2356  time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2357  }
2358 
2359  return time / NSEC_PER_MSEC;
2360 }
2361 
2362 static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2363 {
2364  unsigned long long slb_flts = ctx->stats.slb_flt;
2365 
2366  if (ctx->state == SPU_STATE_RUNNABLE) {
2367  slb_flts += (ctx->spu->stats.slb_flt -
2368  ctx->stats.slb_flt_base);
2369  }
2370 
2371  return slb_flts;
2372 }
2373 
2374 static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2375 {
2376  unsigned long long class2_intrs = ctx->stats.class2_intr;
2377 
2378  if (ctx->state == SPU_STATE_RUNNABLE) {
2379  class2_intrs += (ctx->spu->stats.class2_intr -
2380  ctx->stats.class2_intr_base);
2381  }
2382 
2383  return class2_intrs;
2384 }
2385 
2386 
2387 static int spufs_show_stat(struct seq_file *s, void *private)
2388 {
2389  struct spu_context *ctx = s->private;
2390  int ret;
2391 
2392  ret = spu_acquire(ctx);
2393  if (ret)
2394  return ret;
2395 
2396  seq_printf(s, "%s %llu %llu %llu %llu "
2397  "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2398  ctx_state_names[ctx->stats.util_state],
2399  spufs_acct_time(ctx, SPU_UTIL_USER),
2400  spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2401  spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2402  spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2403  ctx->stats.vol_ctx_switch,
2404  ctx->stats.invol_ctx_switch,
2405  spufs_slb_flts(ctx),
2406  ctx->stats.hash_flt,
2407  ctx->stats.min_flt,
2408  ctx->stats.maj_flt,
2409  spufs_class2_intrs(ctx),
2410  ctx->stats.libassist);
2411  spu_release(ctx);
2412  return 0;
2413 }
2414 
2415 static int spufs_stat_open(struct inode *inode, struct file *file)
2416 {
2417  return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2418 }
2419 
2420 static const struct file_operations spufs_stat_fops = {
2421  .open = spufs_stat_open,
2422  .read = seq_read,
2423  .llseek = seq_lseek,
2424  .release = single_release,
2425 };
2426 
2427 static inline int spufs_switch_log_used(struct spu_context *ctx)
2428 {
2429  return (ctx->switch_log->head - ctx->switch_log->tail) %
2431 }
2432 
2433 static inline int spufs_switch_log_avail(struct spu_context *ctx)
2434 {
2435  return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
2436 }
2437 
2438 static int spufs_switch_log_open(struct inode *inode, struct file *file)
2439 {
2440  struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2441  int rc;
2442 
2443  rc = spu_acquire(ctx);
2444  if (rc)
2445  return rc;
2446 
2447  if (ctx->switch_log) {
2448  rc = -EBUSY;
2449  goto out;
2450  }
2451 
2452  ctx->switch_log = kmalloc(sizeof(struct switch_log) +
2453  SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
2454  GFP_KERNEL);
2455 
2456  if (!ctx->switch_log) {
2457  rc = -ENOMEM;
2458  goto out;
2459  }
2460 
2461  ctx->switch_log->head = ctx->switch_log->tail = 0;
2462  init_waitqueue_head(&ctx->switch_log->wait);
2463  rc = 0;
2464 
2465 out:
2466  spu_release(ctx);
2467  return rc;
2468 }
2469 
2470 static int spufs_switch_log_release(struct inode *inode, struct file *file)
2471 {
2472  struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2473  int rc;
2474 
2475  rc = spu_acquire(ctx);
2476  if (rc)
2477  return rc;
2478 
2479  kfree(ctx->switch_log);
2480  ctx->switch_log = NULL;
2481  spu_release(ctx);
2482 
2483  return 0;
2484 }
2485 
2486 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
2487 {
2488  struct switch_log_entry *p;
2489 
2490  p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
2491 
2492  return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
2493  (unsigned int) p->tstamp.tv_sec,
2494  (unsigned int) p->tstamp.tv_nsec,
2495  p->spu_id,
2496  (unsigned int) p->type,
2497  (unsigned int) p->val,
2498  (unsigned long long) p->timebase);
2499 }
2500 
2501 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2502  size_t len, loff_t *ppos)
2503 {
2504  struct inode *inode = file->f_path.dentry->d_inode;
2505  struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2506  int error = 0, cnt = 0;
2507 
2508  if (!buf)
2509  return -EINVAL;
2510 
2511  error = spu_acquire(ctx);
2512  if (error)
2513  return error;
2514 
2515  while (cnt < len) {
2516  char tbuf[128];
2517  int width;
2518 
2519  if (spufs_switch_log_used(ctx) == 0) {
2520  if (cnt > 0) {
2521  /* If there's data ready to go, we can
2522  * just return straight away */
2523  break;
2524 
2525  } else if (file->f_flags & O_NONBLOCK) {
2526  error = -EAGAIN;
2527  break;
2528 
2529  } else {
2530  /* spufs_wait will drop the mutex and
2531  * re-acquire, but since we're in read(), the
2532  * file cannot be _released (and so
2533  * ctx->switch_log is stable).
2534  */
2535  error = spufs_wait(ctx->switch_log->wait,
2536  spufs_switch_log_used(ctx) > 0);
2537 
2538  /* On error, spufs_wait returns without the
2539  * state mutex held */
2540  if (error)
2541  return error;
2542 
2543  /* We may have had entries read from underneath
2544  * us while we dropped the mutex in spufs_wait,
2545  * so re-check */
2546  if (spufs_switch_log_used(ctx) == 0)
2547  continue;
2548  }
2549  }
2550 
2551  width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
2552  if (width < len)
2553  ctx->switch_log->tail =
2554  (ctx->switch_log->tail + 1) %
2556  else
2557  /* If the record is greater than space available return
2558  * partial buffer (so far) */
2559  break;
2560 
2561  error = copy_to_user(buf + cnt, tbuf, width);
2562  if (error)
2563  break;
2564  cnt += width;
2565  }
2566 
2567  spu_release(ctx);
2568 
2569  return cnt == 0 ? error : cnt;
2570 }
2571 
2572 static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
2573 {
2574  struct inode *inode = file->f_path.dentry->d_inode;
2575  struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2576  unsigned int mask = 0;
2577  int rc;
2578 
2579  poll_wait(file, &ctx->switch_log->wait, wait);
2580 
2581  rc = spu_acquire(ctx);
2582  if (rc)
2583  return rc;
2584 
2585  if (spufs_switch_log_used(ctx) > 0)
2586  mask |= POLLIN;
2587 
2588  spu_release(ctx);
2589 
2590  return mask;
2591 }
2592 
2593 static const struct file_operations spufs_switch_log_fops = {
2594  .owner = THIS_MODULE,
2595  .open = spufs_switch_log_open,
2596  .read = spufs_switch_log_read,
2597  .poll = spufs_switch_log_poll,
2598  .release = spufs_switch_log_release,
2599  .llseek = no_llseek,
2600 };
2601 
2607 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2608  u32 type, u32 val)
2609 {
2610  if (!ctx->switch_log)
2611  return;
2612 
2613  if (spufs_switch_log_avail(ctx) > 1) {
2614  struct switch_log_entry *p;
2615 
2616  p = ctx->switch_log->log + ctx->switch_log->head;
2617  ktime_get_ts(&p->tstamp);
2618  p->timebase = get_tb();
2619  p->spu_id = spu ? spu->number : -1;
2620  p->type = type;
2621  p->val = val;
2622 
2623  ctx->switch_log->head =
2624  (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
2625  }
2626 
2627  wake_up(&ctx->switch_log->wait);
2628 }
2629 
2630 static int spufs_show_ctx(struct seq_file *s, void *private)
2631 {
2632  struct spu_context *ctx = s->private;
2633  u64 mfc_control_RW;
2634 
2635  mutex_lock(&ctx->state_mutex);
2636  if (ctx->spu) {
2637  struct spu *spu = ctx->spu;
2638  struct spu_priv2 __iomem *priv2 = spu->priv2;
2639 
2640  spin_lock_irq(&spu->register_lock);
2641  mfc_control_RW = in_be64(&priv2->mfc_control_RW);
2642  spin_unlock_irq(&spu->register_lock);
2643  } else {
2644  struct spu_state *csa = &ctx->csa;
2645 
2646  mfc_control_RW = csa->priv2.mfc_control_RW;
2647  }
2648 
2649  seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2650  " %c %llx %llx %llx %llx %x %x\n",
2651  ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
2652  ctx->flags,
2653  ctx->sched_flags,
2654  ctx->prio,
2655  ctx->time_slice,
2656  ctx->spu ? ctx->spu->number : -1,
2657  !list_empty(&ctx->rq) ? 'q' : ' ',
2658  ctx->csa.class_0_pending,
2659  ctx->csa.class_0_dar,
2660  ctx->csa.class_1_dsisr,
2661  mfc_control_RW,
2662  ctx->ops->runcntl_read(ctx),
2663  ctx->ops->status_read(ctx));
2664 
2665  mutex_unlock(&ctx->state_mutex);
2666 
2667  return 0;
2668 }
2669 
2670 static int spufs_ctx_open(struct inode *inode, struct file *file)
2671 {
2672  return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
2673 }
2674 
2675 static const struct file_operations spufs_ctx_fops = {
2676  .open = spufs_ctx_open,
2677  .read = seq_read,
2678  .llseek = seq_lseek,
2679  .release = single_release,
2680 };
2681 
2683  { "capabilities", &spufs_caps_fops, 0444, },
2684  { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
2685  { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), },
2686  { "mbox", &spufs_mbox_fops, 0444, },
2687  { "ibox", &spufs_ibox_fops, 0444, },
2688  { "wbox", &spufs_wbox_fops, 0222, },
2689  { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2690  { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2691  { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2692  { "signal1", &spufs_signal1_fops, 0666, },
2693  { "signal2", &spufs_signal2_fops, 0666, },
2694  { "signal1_type", &spufs_signal1_type, 0666, },
2695  { "signal2_type", &spufs_signal2_type, 0666, },
2696  { "cntl", &spufs_cntl_fops, 0666, },
2697  { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
2698  { "lslr", &spufs_lslr_ops, 0444, },
2699  { "mfc", &spufs_mfc_fops, 0666, },
2700  { "mss", &spufs_mss_fops, 0666, },
2701  { "npc", &spufs_npc_ops, 0666, },
2702  { "srr0", &spufs_srr0_ops, 0666, },
2703  { "decr", &spufs_decr_ops, 0666, },
2704  { "decr_status", &spufs_decr_status_ops, 0666, },
2705  { "event_mask", &spufs_event_mask_ops, 0666, },
2706  { "event_status", &spufs_event_status_ops, 0444, },
2707  { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2708  { "phys-id", &spufs_id_ops, 0666, },
2709  { "object-id", &spufs_object_id_ops, 0666, },
2710  { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
2711  { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
2712  { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
2713  { "dma_info", &spufs_dma_info_fops, 0444,
2714  sizeof(struct spu_dma_info), },
2715  { "proxydma_info", &spufs_proxydma_info_fops, 0444,
2716  sizeof(struct spu_proxydma_info)},
2717  { "tid", &spufs_tid_fops, 0444, },
2718  { "stat", &spufs_stat_fops, 0444, },
2719  { "switch_log", &spufs_switch_log_fops, 0444 },
2720  {},
2721 };
2722 
2724  { "capabilities", &spufs_caps_fops, 0444, },
2725  { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
2726  { "mbox", &spufs_mbox_fops, 0444, },
2727  { "ibox", &spufs_ibox_fops, 0444, },
2728  { "wbox", &spufs_wbox_fops, 0222, },
2729  { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2730  { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2731  { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2732  { "signal1", &spufs_signal1_nosched_fops, 0222, },
2733  { "signal2", &spufs_signal2_nosched_fops, 0222, },
2734  { "signal1_type", &spufs_signal1_type, 0666, },
2735  { "signal2_type", &spufs_signal2_type, 0666, },
2736  { "mss", &spufs_mss_fops, 0666, },
2737  { "mfc", &spufs_mfc_fops, 0666, },
2738  { "cntl", &spufs_cntl_fops, 0666, },
2739  { "npc", &spufs_npc_ops, 0666, },
2740  { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2741  { "phys-id", &spufs_id_ops, 0666, },
2742  { "object-id", &spufs_object_id_ops, 0666, },
2743  { "tid", &spufs_tid_fops, 0444, },
2744  { "stat", &spufs_stat_fops, 0444, },
2745  {},
2746 };
2747 
2749  { ".ctx", &spufs_ctx_fops, 0444, },
2750  {},
2751 };
2752 
2754  { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2755  { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
2756  { "lslr", NULL, spufs_lslr_get, 19 },
2757  { "decr", NULL, spufs_decr_get, 19 },
2758  { "decr_status", NULL, spufs_decr_status_get, 19 },
2759  { "mem", __spufs_mem_read, NULL, LS_SIZE, },
2760  { "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
2761  { "signal1_type", NULL, spufs_signal1_type_get, 19 },
2762  { "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
2763  { "signal2_type", NULL, spufs_signal2_type_get, 19 },
2764  { "event_mask", NULL, spufs_event_mask_get, 19 },
2765  { "event_status", NULL, spufs_event_status_get, 19 },
2766  { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2767  { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2768  { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2769  { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2770  { "proxydma_info", __spufs_proxydma_info_read,
2771  NULL, sizeof(struct spu_proxydma_info)},
2772  { "object-id", NULL, spufs_object_id_get, 19 },
2773  { "npc", NULL, spufs_npc_get, 19 },
2774  { NULL },
2775 };