46 #include <linux/pci.h>
47 #include <linux/poll.h>
49 #include <linux/export.h>
57 #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
67 static struct qib_diag_client {
68 struct qib_diag_client *
next;
78 static struct qib_diag_client *get_client(
struct qib_devdata *
dd)
80 struct qib_diag_client *
dc;
85 client_pool = dc->next;
102 static void return_client(
struct qib_diag_client *dc)
105 struct qib_diag_client *tdc, *rdc;
112 tdc = dc->dd->diag_client;
114 if (dc == tdc->next) {
115 tdc->next = dc->next;
126 rdc->next = client_pool;
132 static int qib_diag_release(
struct inode *
in,
struct file *
fp);
134 size_t count, loff_t *off);
136 size_t count, loff_t *off);
140 .write = qib_diag_write,
141 .read = qib_diag_read,
142 .open = qib_diag_open,
143 .release = qib_diag_release,
148 static struct cdev *diagpkt_cdev;
149 static struct device *diagpkt_device;
152 size_t count, loff_t *off);
156 .write = qib_diagpkt_write,
167 &diagpkt_file_ops, &diagpkt_cdev,
181 static void qib_unregister_observers(
struct qib_devdata *dd);
185 struct qib_diag_client *
dc;
200 while (client_pool) {
202 client_pool = dc->next;
206 qib_unregister_observers(dd);
238 u32 snd_bottom, snd_lim = 0;
246 if (offset < kreglen) {
247 map = krb32 + (offset /
sizeof(
u32));
263 if (offset >= dd->
uregbase && offset < ulim) {
283 snd_lim = snd_bottom + tot2k;
291 if (snd_bottom > offs4k)
296 snd_lim = offs4k + tot4k;
303 if (offset >= snd_bottom && offset < snd_lim) {
304 offset -= snd_bottom;
310 snd_lim = offs4k + tot4k + 2 * dd->
align4k;
311 if (offset >= (offs4k + tot4k) && offset < snd_lim) {
313 ((offset - (offs4k + tot4k)) /
sizeof(
u32));
345 reg_addr = (
const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
352 reg_end = reg_addr + (count /
sizeof(
u64));
355 while (reg_addr < reg_end) {
363 uaddr +=
sizeof(
u64);
382 const void __user *uaddr,
size_t count)
389 reg_addr = (
u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
396 reg_end = reg_addr + (count /
sizeof(
u64));
399 while (reg_addr < reg_end) {
408 uaddr +=
sizeof(
u64);
425 static int qib_read_umem32(
struct qib_devdata *dd,
void __user *uaddr,
426 u32 regoffs,
size_t count)
433 reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
440 reg_end = reg_addr + (count /
sizeof(
u32));
443 while (reg_addr < reg_end) {
452 uaddr +=
sizeof(
u32);
472 const void __user *uaddr,
size_t count)
479 reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
486 reg_end = reg_addr + (count /
sizeof(
u32));
488 while (reg_addr < reg_end) {
498 uaddr +=
sizeof(
u32);
509 struct qib_diag_client *
dc;
545 const char __user *data,
546 size_t count, loff_t *off)
556 if (count !=
sizeof(
dp)) {
577 qib_dev_err(dd,
"Invalid version %u for diagpkt_write\n",
595 plen =
sizeof(
u32) +
dp.len;
605 "Unable to allocate tmp buffer, failing\n");
611 (
const void __user *) (
unsigned long)
dp.data,
670 static int qib_diag_release(
struct inode *in,
struct file *fp)
699 pr_err(
"vmalloc for observer failed\n");
717 static void qib_unregister_observers(
struct qib_devdata *dd)
750 if (addr >= op->
bottom && addr <= op->
top)
760 static ssize_t qib_diag_read(
struct file *fp,
char __user *data,
761 size_t count, loff_t *off)
777 else if ((count % 4) || (*off % 4))
780 else if (dc->state <
READY && (*off || count != 8))
788 use_32 = (count % 8) || (*off % 8);
796 op = diag_get_observer(dd, *off);
799 ret = op->
hook(dd, op, offset, &data64, 0, use_32);
812 ret = qib_read_umem32(dd, data, (
u32) *off,
815 ret = qib_read_umem64(dd, data, (
u32) *off,
817 }
else if (ret == count) {
820 sizeof(
u32) :
sizeof(
u64));
836 static ssize_t qib_diag_write(
struct file *fp,
const char __user *data,
837 size_t count, loff_t *off)
853 else if ((count % 4) || (*off % 4))
856 else if (dc->state <
READY &&
857 ((*off || count != 8) || dc->state !=
INIT))
863 int use_32 = (count % 8) || (*off % 8);
873 if (count == 4 || count == 8) {
882 op = diag_get_observer(dd, *off);
884 ret = op->
hook(dd, op, offset, &data64, ~0Ull,
895 ret = qib_write_umem32(dd, (
u32) *off, data,
898 ret = qib_write_umem64(dd, (
u32) *off, data,
906 if (dc->state ==
INIT)