34 #include <linux/pci.h>
35 #include <linux/poll.h>
38 #include <linux/export.h>
40 #include <linux/slab.h>
45 #include <asm/pgtable.h>
51 static int ipath_open(
struct inode *,
struct file *);
52 static int ipath_close(
struct inode *,
struct file *);
53 static ssize_t ipath_write(
struct file *,
const char __user *,
size_t,
56 unsigned long , loff_t);
63 .aio_write = ipath_writev,
65 .release = ipath_close,
77 static u64 cvt_kvaddr(
void *
p)
89 static int ipath_get_base_info(
struct file *
fp,
90 void __user *ubase,
size_t ubase_size)
113 sz -= 7 *
sizeof(
u64);
114 if (ubase_size < sz) {
116 "Base size %zu, need %zu (version mismatch?)\n",
255 sz = (ubase_size <
sizeof(*kinfo)) ? ubase_size :
sizeof(*kinfo);
300 unsigned long tidmap[8];
301 struct page **pagep =
NULL;
311 ipath_dbg(
"After copyin, tidcnt 0, tidlist %llx\n",
312 (
unsigned long long) ti->
tidlist);
324 }
else if (!subport) {
332 tidoff = tidcnt * (subport - 1);
339 "TIDs, only trying max (%u)\n", cnt, tidcnt);
345 memset(tidmap, 0,
sizeof(tidmap));
350 porttid *
sizeof(*tidbase));
359 ipath_dbg(
"Fail vaddr %p, %u pages, !access_ok\n",
367 ipath_dbg(
"Failed to lock addr %p, %u pages "
368 "(already locked)\n",
369 (
void *) vaddr, cnt);
379 "Failed to lock addr %p, %u pages: "
380 "errno %d\n", (
void *) vaddr, cnt, -ret);
385 for (; ntids--; tid++) {
397 ipath_dbg(
"Not enough free TIDs for %u pages "
398 "(index %d), failing\n", cnt, i);
403 tidlist[
i] = tid + tidoff;
405 "vaddr %lx\n", i, tid + tidoff, vaddr);
409 dd->
pcidev, pagep[i], 0, PAGE_SIZE,
418 "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
419 tid, vaddr, (
unsigned long long) physaddr,
434 ipath_dbg(
"After failure (ret=%d), undo %d of %d entries\n",
442 for (; tid <
limit; tid++) {
451 pci_unmap_page(dd->
pcidev,
467 tidlist, cnt *
sizeof(*tidlist))) {
472 tidmap,
sizeof tidmap)) {
486 ipath_dbg(
"Failed to map %u TID pages, failing with %d\n",
508 static int ipath_tid_free(
struct ipath_portdata *pd,
unsigned subport,
515 unsigned long tidmap[8];
537 porttid += tidcnt * (subport - 1);
541 porttid *
sizeof(*tidbase));
550 limit, tid, porttid);
551 for (cnt = 0; tid <
limit; tid++) {
571 pci_unmap_page(dd->
pcidev,
577 ipath_dbg(
"Unused tid %u, ignoring\n", tid);
580 ipath_dbg(
"passed in tidcnt %d, only %d bits set in map\n",
584 ipath_dbg(
"Failed to unmap %u TID pages, failing with %d\n",
608 int i, any = 0, pidx = -1;
619 "%hx:%x %hx:%x %hx:%x %hx:%x\n",
646 "(%x) more than once\n",
653 ipath_dbg(
"All pkeys for port %u already in use, "
669 "matches #%d, count now %d\n",
680 "0, after dec, it's %d\n",
696 ipath_dbg(
"port %u, all pkeys already in use, "
715 "portidx %d, new pkey reg %llx\n",
717 (
unsigned long long) pkey);
725 ipath_dbg(
"port %u, all pkeys already in use 2nd pass, "
743 static int ipath_manage_rcvq(
struct ipath_portdata *pd,
unsigned subport,
768 ipath_clear_rcvhdrtail(pd);
777 ipath_read_kreg64(dd, dd->
ipath_kregs->kr_scratch);
796 int i,
j, pchanged = 0;
825 VERBOSE,
"p%u key %x matches #%d, "
840 (
unsigned long long) oldpkey,
841 (
unsigned long long) pkey);
842 ipath_write_kreg(dd, dd->
ipath_kregs->kr_partitionkey,
856 unsigned egrperchunk, egrcnt,
size;
892 unsigned e, egrcnt, egrperchunk,
chunk, egrsize, egroff;
910 "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
936 goto bail_rcvegrbuf_phys;
946 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
982 void *kvaddr,
char *
what)
990 "FAIL on %s: len %lx > %x\n", what,
999 "%s must be mapped readonly\n", what);
1013 "bytes r%c failed: %d\n", what, pd->
port_port,
1014 pfn, len, write_ok?
'w':
'o', ret);
1042 vma->
vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
1054 unsigned piobufs,
unsigned piocnt)
1067 "reqlen %lx > PAGE\n",
1075 #if defined(__powerpc__)
1087 vma->
vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
1109 "reqlen %lx > actual %lx\n",
1111 (
unsigned long) total_size);
1118 "writable (flags=%lx)\n", vma->
vm_flags);
1144 struct vm_fault *vmf)
1150 return VM_FAULT_SIGBUS;
1157 static const struct vm_operations_struct ipath_file_vm_ops = {
1158 .fault = ipath_file_vma_fault,
1192 PAGE_SIZE * subport)) {
1206 "Can't map eager buffers as "
1207 "writable (flags=%lx)\n", vma->
vm_flags);
1227 vma->
vm_ops = &ipath_file_vm_ops;
1228 vma->
vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1250 unsigned piobufs, piocnt;
1279 ipath_cdbg(
MM,
"pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n",
1280 (
unsigned long long) pgaddr, vma->
vm_start,
1289 ret = mmap_kvaddr(vma, pgaddr, pd,
subport_fp(fp));
1316 ret = mmap_ureg(vma, dd, ureg);
1317 else if (pgaddr == piobufs)
1318 ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt);
1321 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1323 "pioavail registers");
1325 ret = mmap_rcvegrbufs(vma, pd);
1337 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1347 "Failure %d on off %llx len %lx\n",
1348 -ret, (
unsigned long long)pgaddr,
1356 unsigned pollflag = 0;
1371 unsigned pollflag = 0;
1378 pollflag = ipath_poll_hdrqfull(pd);
1402 unsigned pollflag = 0;
1409 pollflag = ipath_poll_hdrqfull(pd);
1413 tail = ipath_get_rcvhdrtail(pd);
1442 static unsigned int ipath_poll(
struct file *fp,
1452 pollflag = ipath_poll_urgent(pd, fp, pt);
1454 pollflag = ipath_poll_next(pd, fp, pt);
1459 static int ipath_supports_subports(
int user_swmajor,
int user_swminor)
1462 return (user_swmajor > 1) || (user_swminor >= 3);
1465 static int ipath_compatible_subports(
int user_swmajor,
int user_swminor)
1481 return user_swminor == 3;
1484 return user_swminor >= 4;
1496 unsigned num_subports;
1511 "Inconsistent ipath_compatible_subports()\n");
1519 "Mismatched user version (%d.%d) and driver "
1520 "version (%d.%d) while port sharing. Ensure "
1521 "that driver and library are from the same "
1542 sizeof(
u32), PAGE_SIZE) * num_subports;
1594 "memory, failing open\n");
1608 init_user_egr_sizes(pd);
1609 if ((ret = init_subports(dd, pd, uinfo)) != 0)
1637 static int find_free_port(
int unit,
struct file *fp,
1654 ret = try_alloc_port(dd, i, fp, uinfo);
1664 static int find_best_unit(
struct file *fp,
1667 int ret = 0,
i, prefunit = -1, devmax;
1668 int maxofallports, npresent, nup;
1696 "cpu %d/%d\n", current->comm,
1697 current->pid, i, ncpus);
1702 if (curcpu != -1 && nset != ncpus) {
1704 prefunit = curcpu / (ncpus / npresent);
1706 "%d cpus/chip, select unit %d\n",
1708 npresent, ncpus, ncpus / npresent,
1720 devmax = prefunit + 1;
1722 for (i = 1; i < maxofallports; i++) {
1723 for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
1735 ret = try_alloc_port(dd, i, fp, uinfo);
1744 ipath_dbg(
"No ports available (none initialized "
1750 "%s[%u] no ports on prefunit "
1751 "%d, clear and re-check\n",
1771 static int find_shared_port(
struct file *fp,
1774 int devmax,
ndev,
i;
1779 for (ndev = 0; ndev < devmax; ndev++) {
1807 "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
1821 static int ipath_open(
struct inode *
in,
struct file *fp)
1829 static int ipath_assign_port(
struct file *fp,
1834 unsigned swmajor, swminor;
1845 ipath_dbg(
"User major version %d not same as driver "
1854 ipath_dbg(
"User minor version %d not same as driver "
1859 if (ipath_compatible_subports(swmajor, swminor) &&
1861 (ret = find_shared_port(fp, uinfo))) {
1869 (
long)fp->
f_path.dentry->d_inode->i_rdev, i_minor);
1872 ret = find_free_port(i_minor - 1, fp, uinfo);
1874 ret = find_best_unit(fp, uinfo);
1898 static int ipath_do_user_init(
struct file *fp,
1951 ret = ipath_create_user_egr(pd);
1987 ipath_clear_rcvhdrtail(pd);
2018 for (i = port_tidbase; i <
maxtid; i++) {
2038 (
unsigned long long)
2042 static int ipath_close(
struct inode *in,
struct file *fp)
2048 unsigned long flags;
2094 "%u rcv %u, pio already\n",
2113 ipath_write_kreg( dd, dd->
ipath_kregs->kr_rcvctrl,
2120 ipath_clean_part_key(pd, dd);
2143 unlock_expected_tids(pd);
2168 info.num_active = nup;
2173 if (ipath_supports_subports(pd->
userversion >> 16,
2180 sz =
sizeof(
info) - 2 *
sizeof(
u16);
2193 void __user *slave_mask_addr)
2203 u32 __user *inflightp)
2213 static int ipath_sdma_get_complete(
struct ipath_devdata *dd,
2215 u32 __user *completep)
2232 size_t count, loff_t *off)
2237 size_t consumed, copy;
2242 if (count <
sizeof(
cmd.type)) {
2254 consumed =
sizeof(
cmd.type);
2260 copy =
sizeof(
cmd.cmd.user_info);
2261 dest = &
cmd.cmd.user_info;
2265 copy =
sizeof(
cmd.cmd.recv_ctrl);
2266 dest = &
cmd.cmd.recv_ctrl;
2270 copy =
sizeof(
cmd.cmd.port_info);
2271 dest = &
cmd.cmd.port_info;
2276 copy =
sizeof(
cmd.cmd.tid_info);
2277 dest = &
cmd.cmd.tid_info;
2281 copy =
sizeof(
cmd.cmd.part_key);
2282 dest = &
cmd.cmd.part_key;
2286 copy =
sizeof(
cmd.cmd.slave_mask_addr);
2287 dest = &
cmd.cmd.slave_mask_addr;
2296 copy =
sizeof(
cmd.cmd.poll_type);
2297 dest = &
cmd.cmd.poll_type;
2301 copy =
sizeof(
cmd.cmd.armlaunch_ctrl);
2302 dest = &
cmd.cmd.armlaunch_ctrl;
2306 copy =
sizeof(
cmd.cmd.sdma_inflight);
2307 dest = &
cmd.cmd.sdma_inflight;
2311 copy =
sizeof(
cmd.cmd.sdma_complete);
2312 dest = &
cmd.cmd.sdma_complete;
2321 if ((count - consumed) < copy) {
2343 ret = ipath_assign_port(fp, &
cmd.cmd.user_info);
2349 ret = ipath_assign_port(fp, &
cmd.cmd.user_info);
2354 ret = ipath_do_user_init(fp, &
cmd.cmd.user_info);
2357 ret = ipath_get_base_info(
2358 fp, (
void __user *) (
unsigned long)
2359 cmd.cmd.user_info.spu_base_info,
2360 cmd.cmd.user_info.spu_base_info_size);
2363 ret = ipath_manage_rcvq(pd,
subport_fp(fp),
cmd.cmd.recv_ctrl);
2368 (
unsigned long)
cmd.cmd.port_info);
2371 ret = ipath_tid_update(pd, fp, &
cmd.cmd.tid_info);
2374 ret = ipath_tid_free(pd,
subport_fp(fp), &
cmd.cmd.tid_info);
2377 ret = ipath_set_part_key(pd,
cmd.cmd.part_key);
2380 ret = ipath_get_slave_info(pd,
2381 (
void __user *) (
unsigned long)
2382 cmd.cmd.slave_mask_addr);
2391 if (
cmd.cmd.armlaunch_ctrl)
2398 (
u32 __user *) (
unsigned long)
2399 cmd.cmd.sdma_inflight);
2402 ret = ipath_sdma_get_complete(pd->
port_dd,
2404 (
u32 __user *) (
unsigned long)
2405 cmd.cmd.sdma_complete);
2417 unsigned long dim, loff_t off)
2430 static struct class *ipath_class;
2443 ": Could not allocate cdev for minor %d, %s\n",
2456 ": Could not add cdev for minor %d, %s (err %d)\n",
2463 if (IS_ERR(device)) {
2464 ret = PTR_ERR(device);
2466 "device for minor %d, %s (err %d)\n",
2490 struct cdev **cdevp,
struct device **devp)
2492 return init_cdev(minor, name, fops, cdevp, devp);
2495 static void cleanup_cdev(
struct cdev **cdevp,
2496 struct device **devp)
2498 struct device *dev = *devp;
2512 struct device **devp)
2514 cleanup_cdev(cdevp, devp);
2517 static struct cdev *wildcard_cdev;
2518 static struct device *wildcard_dev;
2522 static int user_init(
void)
2529 "chrdev region (err %d)\n", -ret);
2535 if (IS_ERR(ipath_class)) {
2536 ret = PTR_ERR(ipath_class);
2538 "device class (err %d)\n", -ret);
2549 static void user_cleanup(
void)
2571 "error %d\n", -ret);
2574 ret = init_cdev(0,
"ipath", &ipath_file_ops, &wildcard_cdev,
2578 "minor: error %d\n", -ret);
2587 ret = init_cdev(dd->
ipath_unit + 1, name, &ipath_file_ops,
2609 cleanup_cdev(&wildcard_cdev, &wildcard_dev);