35 #include <linux/pci.h>
36 #include <linux/poll.h>
42 #include <linux/uio.h>
44 #include <asm/pgtable.h>
46 #include <linux/export.h>
53 #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
55 static int qib_open(
struct inode *,
struct file *);
56 static int qib_close(
struct inode *,
struct file *);
57 static ssize_t qib_write(
struct file *,
const char __user *,
size_t, loff_t *);
59 unsigned long, loff_t);
66 .aio_write = qib_aio_write,
80 static u64 cvt_kvaddr(
void *
p)
92 static int qib_get_base_info(
struct file *
fp,
void __user *ubase,
100 unsigned subctxt_cnt;
117 sz -= 7 *
sizeof(
u64);
118 if (ubase_size < sz) {
194 (rcd->
piocnt % subctxt_cnt);
245 sz = (ubase_size <
sizeof(*kinfo)) ? ubase_size :
sizeof(*kinfo);
288 unsigned long tidmap[8];
289 struct page **pagep =
NULL;
307 }
else if (!subctxt) {
315 tidoff = tidcnt * (subctxt - 1);
322 "Process tried to allocate %u TIDs, only trying max (%u)\n",
331 memset(tidmap, 0,
sizeof(tidmap));
336 ctxttid *
sizeof(*tidbase));
355 "Failed to lock addr %p, %u pages: "
356 "errno %d\n", (
void *) vaddr, cnt, -ret);
360 for (; ntids--; tid++) {
376 tidlist[
i] = tid + tidoff;
407 for (; tid <
limit; tid++) {
421 pci_unmap_page(dd->
pcidev, phys, PAGE_SIZE,
435 tidlist, cnt *
sizeof(*tidlist))) {
440 tidmap,
sizeof tidmap)) {
472 static int qib_tid_free(
struct qib_ctxtdata *rcd,
unsigned subctxt,
479 unsigned long tidmap[8];
501 ctxttid += tidcnt * (subctxt - 1);
505 ctxttid *
sizeof(*tidbase));
512 for (cnt = 0; tid <
limit; tid++) {
536 pci_unmap_page(dd->
pcidev, phys, PAGE_SIZE,
564 int i, any = 0, pidx = -1;
588 if (!rcd->
pkeys[i] && pidx == -1)
590 if (rcd->
pkeys[i] == key) {
600 if (!ppd->
pkeys[i]) {
604 if (ppd->
pkeys[i] == key) {
619 if ((ppd->
pkeys[i] & 0x7FFF) == lkey) {
634 if (!ppd->
pkeys[i] &&
659 static int qib_manage_rcvq(
struct qib_ctxtdata *rcd,
unsigned subctxt,
663 unsigned int rcvctrl_op;
678 qib_clear_rcvhdrtail(rcd);
688 static void qib_clean_part_key(
struct qib_ctxtdata *rcd,
691 int i,
j, pchanged = 0;
706 if ((ppd->
pkeys[j] & 0x7fff) !=
723 unsigned len,
void *kvaddr,
u32 write_ok,
char *
what)
731 "FAIL on %s: len %lx > %x\n", what,
744 "%s must be mapped readonly\n", what);
758 "%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
759 what, rcd->
ctxt, pfn, len, ret);
779 "FAIL mmap userreg: reqlen %lx > PAGE\n",
786 vma->
vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
798 unsigned piobufs,
unsigned piocnt)
811 "FAIL mmap piobufs: reqlen %lx > PAGE\n",
819 #if defined(__powerpc__)
831 vma->
vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
856 "FAIL on egr bufs: reqlen %lx > actual %lx\n",
858 (
unsigned long) total_size);
865 "Can't map eager buffers as writable (flags=%lx)\n",
891 static int qib_file_vma_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
897 return VM_FAULT_SIGBUS;
905 static struct vm_operations_struct qib_file_vm_ops = {
906 .fault = qib_file_vma_fault,
913 unsigned subctxt_cnt;
929 size = PAGE_SIZE * subctxt_cnt;
937 PAGE_SIZE * subctxt)) {
954 "Can't map eager buffers as "
955 "writable (flags=%lx)\n", vma->
vm_flags);
973 vma->
vm_ops = &qib_file_vm_ops;
974 vma->
vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
996 unsigned piobufs, piocnt;
1000 if (!rcd || !(vma->
vm_flags & VM_SHARED)) {
1031 ret = mmap_kvaddr(vma, pgaddr, rcd,
subctxt_fp(fp));
1058 ret = mmap_ureg(vma, dd, ureg);
1059 else if (pgaddr == piobufs)
1060 ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
1063 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1065 "pioavail registers");
1067 ret = mmap_rcvegrbufs(vma, rcd);
1078 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1090 "mmap Failure %d: off %llx len %lx\n",
1091 -ret, (
unsigned long long)pgaddr,
1097 static unsigned int qib_poll_urgent(
struct qib_ctxtdata *rcd,
1104 poll_wait(fp, &rcd->
wait, pt);
1119 static unsigned int qib_poll_next(
struct qib_ctxtdata *rcd,
1126 poll_wait(fp, &rcd->
wait, pt);
1149 pollflag = qib_poll_urgent(rcd, fp, pt);
1151 pollflag = qib_poll_next(rcd, fp, pt);
1161 static int qib_compatible_subctxts(
int user_swmajor,
int user_swminor)
1177 return user_swminor == 3;
1180 return user_swminor >= 4;
1192 unsigned num_subctxts;
1207 "Mismatched user version (%d.%d) and driver "
1208 "version (%d.%d) while context sharing. Ensure "
1209 "that driver and library are from the same "
1228 sizeof(
u32), PAGE_SIZE) * num_subctxts;
1278 if (!rcd || !ptmp) {
1280 "Unable to allocate ctxtdata memory, failing open\n");
1285 ret = init_subctxts(dd, rcd, uinfo);
1325 if (!usable(dd->
pport + port - 1)) {
1329 ppd = dd->
pport + port - 1;
1340 if (usable(dd->
pport + pidx))
1341 ppd = dd->
pport + pidx;
1343 for (pidx = 0; pidx < dd->
num_pports && !ppd;
1345 if (usable(dd->
pport + pidx))
1346 ppd = dd->
pport + pidx;
1349 ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -
ENETDOWN;
1354 static int find_free_ctxt(
int unit,
struct file *fp,
1363 ret = choose_port_ctxt(fp, dd, uinfo->
spu_port, uinfo);
1372 int ret = 0, devmax, npresent, nup,
ndev, dusable = 0,
i;
1388 for (ndev = 0; ndev < devmax; ndev++) {
1390 unsigned cused = 0, cfree = 0, pusable = 0;
1394 usable(dd->
pport + port - 1))
1398 if (usable(dd->
pport + i))
1408 if (pusable && cfree && cused < inuse) {
1414 ret = choose_port_ctxt(fp, udd, port, uinfo);
1418 for (ndev = 0; ndev < devmax; ndev++) {
1421 ret = choose_port_ctxt(fp, dd, port, uinfo);
1435 static int find_shared_ctxt(
struct file *fp,
1438 int devmax,
ndev,
i;
1443 for (ndev = 0; ndev < devmax; ndev++) {
1453 if (!rcd || !rcd->
cnt)
1479 static int qib_open(
struct inode *
in,
struct file *fp)
1517 if (qib_compatible_subctxts(swmajor, swminor) &&
1519 ret = find_shared_ctxt(fp, uinfo);
1529 ret = find_free_ctxt(i_minor - 1, fp, uinfo);
1531 ret = get_a_ctxt(fp, uinfo, alg);
1566 }
else if (weight == 1 &&
1570 "%s PID %u affinity set to cpu %d; already allocated\n",
1582 static int qib_do_user_init(
struct file *fp,
1619 "%u:ctxt%u: no 2KB buffers available\n",
1625 qib_dev_err(dd,
"Ctxt%u: would use 4KB bufs, using %u\n",
1674 qib_clear_rcvhdrtail(rcd);
1700 static void unlock_expected_tids(
struct qib_ctxtdata *rcd)
1706 for (i = ctxt_tidbase; i <
maxtid; i++) {
1716 pci_unmap_page(dd->
pcidev, phys, PAGE_SIZE,
1723 static int qib_close(
struct inode *in,
struct file *fp)
1729 unsigned long flags;
1775 spin_unlock_irqrestore(&dd->
uctxt_lock, flags);
1793 qib_clean_part_key(rcd, dd);
1801 unlock_expected_tids(rcd);
1825 info.unit = rcd->
dd->unit;
1830 info.num_ctxts = rcd->
dd->cfgctxts - rcd->
dd->first_user_ctxt;
1846 u32 __user *inflightp)
1858 u32 __user *completep)
1881 if (!usable(rcd->
ppd)) {
1900 for (i = 0; !usable(rcd->
ppd) && i < 300; i++)
1917 unsigned long flags;
1920 for (ctxt = ppd->
dd->first_user_ctxt; ctxt < ppd->dd->
cfgctxts;
1922 rcd = ppd->
dd->rcd[
ctxt];
1938 spin_unlock_irqrestore(&ppd->
dd->uctxt_lock, flags);
1952 static int qib_user_event_ack(
struct qib_ctxtdata *rcd,
int subctxt,
1962 ret = disarm_req_delay(rcd);
1970 size_t count, loff_t *off)
1975 size_t consumed, copy = 0;
1980 if (count <
sizeof(
cmd.type)) {
1992 consumed =
sizeof(
cmd.type);
1997 copy =
sizeof(
cmd.cmd.user_info);
1998 dest = &
cmd.cmd.user_info;
2003 copy =
sizeof(
cmd.cmd.recv_ctrl);
2004 dest = &
cmd.cmd.recv_ctrl;
2009 copy =
sizeof(
cmd.cmd.ctxt_info);
2010 dest = &
cmd.cmd.ctxt_info;
2016 copy =
sizeof(
cmd.cmd.tid_info);
2017 dest = &
cmd.cmd.tid_info;
2022 copy =
sizeof(
cmd.cmd.part_key);
2023 dest = &
cmd.cmd.part_key;
2035 copy =
sizeof(
cmd.cmd.poll_type);
2036 dest = &
cmd.cmd.poll_type;
2041 copy =
sizeof(
cmd.cmd.armlaunch_ctrl);
2042 dest = &
cmd.cmd.armlaunch_ctrl;
2047 copy =
sizeof(
cmd.cmd.sdma_inflight);
2048 dest = &
cmd.cmd.sdma_inflight;
2053 copy =
sizeof(
cmd.cmd.sdma_complete);
2054 dest = &
cmd.cmd.sdma_complete;
2059 copy =
sizeof(
cmd.cmd.event_mask);
2060 dest = &
cmd.cmd.event_mask;
2070 if ((count - consumed) < copy) {
2089 ret = qib_assign_ctxt(fp, &
cmd.cmd.user_info);
2095 ret = qib_do_user_init(fp, &
cmd.cmd.user_info);
2098 ret = qib_get_base_info(fp, (
void __user *) (
unsigned long)
2099 cmd.cmd.user_info.spu_base_info,
2100 cmd.cmd.user_info.spu_base_info_size);
2104 ret = qib_manage_rcvq(rcd,
subctxt_fp(fp),
cmd.cmd.recv_ctrl);
2109 (
unsigned long)
cmd.cmd.ctxt_info);
2113 ret = qib_tid_update(rcd, fp, &
cmd.cmd.tid_info);
2121 ret = qib_set_part_key(rcd,
cmd.cmd.part_key);
2126 ret = disarm_req_delay(rcd);
2138 rcd->
dd->f_set_armlaunch(rcd->
dd,
cmd.cmd.armlaunch_ctrl);
2143 (
u32 __user *) (
unsigned long)
2144 cmd.cmd.sdma_inflight);
2148 ret = qib_sdma_get_complete(rcd->
ppd,
2150 (
u32 __user *) (
unsigned long)
2151 cmd.cmd.sdma_complete);
2155 ret = qib_user_event_ack(rcd,
subctxt_fp(fp),
2156 cmd.cmd.event_mask);
2168 unsigned long dim, loff_t off)
2180 static struct class *qib_class;
2181 static dev_t qib_dev;
2194 pr_err(
"Could not allocate cdev for minor %d, %s\n",
2206 pr_err(
"Could not add cdev for minor %d, %s (err %d)\n",
2212 if (!IS_ERR(device))
2214 ret = PTR_ERR(device);
2216 pr_err(
"Could not create device for minor %d, %s (err %d)\n",
2242 static struct cdev *wildcard_cdev;
2243 static struct device *wildcard_device;
2251 pr_err(
"Could not allocate chrdev region (err %d)\n", -ret);
2256 if (IS_ERR(qib_class)) {
2257 ret = PTR_ERR(qib_class);
2258 pr_err(
"Could not create device class (err %d)\n", -ret);
2278 static void qib_user_remove(
struct qib_devdata *dd)
2293 &wildcard_cdev, &wildcard_device);
2302 qib_user_remove(dd);
2314 r = qib_user_add(dd);
2327 qib_user_remove(dd);