36 #include <linux/module.h>
38 #include <linux/device.h>
43 #include <linux/poll.h>
47 #include <linux/sched.h>
49 #include <linux/slab.h>
51 #include <asm/uaccess.h>
127 static struct class *umad_class;
137 static void ib_umad_release_dev(
struct kref *ref)
148 sizeof (struct ib_user_mad_hdr_old);
165 for (packet->
mad.hdr.id = 0;
167 packet->
mad.hdr.id++)
168 if (agent == __get_agent(file, packet->
mad.hdr.id)) {
194 dequeue_send(file, packet);
201 if (!queue_packet(file, agent, packet))
223 packet->
mad.hdr.status = 0;
227 packet->
mad.hdr.sl = mad_recv_wc->
wc->sl;
228 packet->
mad.hdr.path_bits = mad_recv_wc->
wc->dlid_path_bits;
229 packet->
mad.hdr.pkey_index = mad_recv_wc->
wc->pkey_index;
230 packet->
mad.hdr.grh_present = !!(mad_recv_wc->
wc->wc_flags &
IB_WC_GRH);
231 if (packet->
mad.hdr.grh_present) {
238 packet->
mad.hdr.gid_index = ah_attr.grh.sgid_index;
239 packet->
mad.hdr.hop_limit = ah_attr.grh.hop_limit;
240 packet->
mad.hdr.traffic_class = ah_attr.grh.traffic_class;
241 memcpy(packet->
mad.hdr.gid, &ah_attr.grh.dgid, 16);
245 if (queue_packet(file, agent, packet))
259 int left, seg_payload,
offset, max_seg_payload;
262 recv_buf = &packet->
recv_wc->recv_buf;
277 if (seg_payload < packet->
length) {
290 max_seg_payload =
sizeof (
struct ib_mad) - offset;
292 for (left = packet->
length - seg_payload, buf += seg_payload;
293 left; left -= seg_payload, buf += seg_payload) {
296 seg_payload =
min(left, max_seg_payload);
324 static ssize_t ib_umad_read(
struct file *filp,
char __user *buf,
325 size_t count, loff_t *
pos)
355 ret = copy_recv_mad(file, buf, packet, count);
357 ret = copy_send_mad(file, buf, packet, count);
396 return (hdr1->
lid == hdr2->
lid);
430 if (same_destination(&packet->
mad.hdr, &sent_packet->
mad.hdr))
437 static ssize_t ib_umad_write(
struct file *filp,
const char __user *buf,
438 size_t count, loff_t *pos)
475 agent = __get_agent(file, packet->
mad.hdr.id);
481 memset(&ah_attr, 0,
sizeof ah_attr);
483 ah_attr.sl = packet->
mad.hdr.sl;
484 ah_attr.src_path_bits = packet->
mad.hdr.path_bits;
485 ah_attr.port_num = file->
port->port_num;
486 if (packet->
mad.hdr.grh_present) {
488 memcpy(ah_attr.grh.dgid.raw, packet->
mad.hdr.gid, 16);
489 ah_attr.grh.sgid_index = packet->
mad.hdr.gid_index;
491 ah_attr.grh.hop_limit = packet->
mad.hdr.hop_limit;
492 ah_attr.grh.traffic_class = packet->
mad.hdr.traffic_class;
508 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->
rmpp_hdr) &
515 packet->
mad.hdr.pkey_index, rmpp_active,
517 if (IS_ERR(packet->
msg)) {
518 ret = PTR_ERR(packet->
msg);
522 packet->
msg->ah =
ah;
523 packet->
msg->timeout_ms = packet->
mad.hdr.timeout_ms;
524 packet->
msg->retries = packet->
mad.hdr.retries;
525 packet->
msg->context[0] = packet;
533 hdr_len + data_len - copy_offset)) {
538 ret = copy_rmpp_mad(packet->
msg, buf);
556 ret = is_duplicate(file, packet);
573 dequeue_send(file, packet);
600 static int ib_umad_reg_agent(
struct ib_umad_file *file,
void __user *
arg,
601 int compat_method_mask)
612 if (!file->
port->ib_dev) {
622 if (ureq.qpn != 0 && ureq.qpn != 1) {
628 if (!__get_agent(file, agent_id))
635 if (ureq.mgmt_class) {
636 req.mgmt_class = ureq.mgmt_class;
637 req.mgmt_class_version = ureq.mgmt_class_version;
640 if (compat_method_mask) {
641 u32 *umm = (
u32 *) ureq.method_mask;
646 umm[
i * 2] | ((
u64) umm[
i * 2 + 1] << 32);
648 memcpy(
req.method_mask, ureq.method_mask,
649 sizeof req.method_mask);
658 ret = PTR_ERR(agent);
673 "P_Key index support.\n",
current->comm);
675 "has info on the new ABI.\n");
693 static int ib_umad_unreg_agent(
struct ib_umad_file *file,
u32 __user *arg)
705 if (
id >= IB_UMAD_MAX_AGENTS || !__get_agent(file,
id)) {
724 static long ib_umad_enable_pkey(
struct ib_umad_file *file)
738 static long ib_umad_ioctl(
struct file *filp,
unsigned int cmd,
743 return ib_umad_reg_agent(filp->
private_data, (
void __user *) arg, 0);
754 static long ib_umad_compat_ioctl(
struct file *filp,
unsigned int cmd,
759 return ib_umad_reg_agent(filp->
private_data, compat_ptr(arg), 1);
761 return ib_umad_unreg_agent(filp->
private_data, compat_ptr(arg));
779 static int ib_umad_open(
struct inode *
inode,
struct file *filp)
800 kref_put(&port->
umad_dev->ref, ib_umad_release_dev);
823 static int ib_umad_close(
struct inode *inode,
struct file *filp)
855 kref_put(&dev->
ref, ib_umad_release_dev);
862 .read = ib_umad_read,
863 .write = ib_umad_write,
864 .poll = ib_umad_poll,
865 .unlocked_ioctl = ib_umad_ioctl,
867 .compat_ioctl = ib_umad_compat_ioctl,
869 .open = ib_umad_open,
870 .release = ib_umad_close,
874 static int ib_umad_sm_open(
struct inode *inode,
struct file *filp)
911 kref_put(&port->
umad_dev->ref, ib_umad_release_dev);
915 static int ib_umad_sm_close(
struct inode *inode,
struct file *filp)
930 kref_put(&port->
umad_dev->ref, ib_umad_release_dev);
937 .open = ib_umad_sm_open,
938 .release = ib_umad_sm_close,
944 .add = ib_umad_add_one,
945 .remove = ib_umad_remove_one
975 static dev_t overflow_maj;
977 static int find_overflow_devnum(
void)
985 printk(
KERN_ERR "user_mad: couldn't register dynamic device number\n");
1003 spin_lock(&port_lock);
1006 spin_unlock(&port_lock);
1007 devnum = find_overflow_devnum();
1011 spin_lock(&port_lock);
1013 base = devnum + overflow_maj;
1014 set_bit(devnum, overflow_map);
1017 base = devnum + base_dev;
1020 spin_unlock(&port_lock);
1024 sema_init(&port->
sm_sem, 1);
1035 port->
cdev.dev, port,
1037 if (IS_ERR(port->
dev))
1055 if (IS_ERR(port->
sm_dev))
1084 static void ib_umad_kill_port(
struct ib_umad_port *port)
1108 if (file->
agent[
id])
1120 static void ib_umad_add_one(
struct ib_device *device)
1135 umad_dev = kzalloc(
sizeof *umad_dev +
1141 kref_init(&umad_dev->
ref);
1146 for (i = s; i <=
e; ++
i) {
1147 umad_dev->
port[i -
s].umad_dev = umad_dev;
1149 if (ib_umad_init_port(device, i, &umad_dev->
port[i - s]))
1159 ib_umad_kill_port(&umad_dev->
port[i - s]);
1161 kref_put(&umad_dev->
ref, ib_umad_release_dev);
1164 static void ib_umad_remove_one(
struct ib_device *device)
1173 ib_umad_kill_port(&umad_dev->
port[i]);
1175 kref_put(&umad_dev->
ref, ib_umad_release_dev);
1178 static char *umad_devnode(
struct device *dev,
umode_t *
mode)
1183 static int __init ib_umad_init(
void)
1195 if (IS_ERR(umad_class)) {
1196 ret = PTR_ERR(umad_class);
1197 printk(
KERN_ERR "user_mad: couldn't create class infiniband_mad\n");
1201 umad_class->
devnode = umad_devnode;
1205 printk(
KERN_ERR "user_mad: couldn't create abi_version attribute\n");
1227 static void __exit ib_umad_cleanup(
void)