Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ucm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2005 Topspin Communications. All rights reserved.
3  * Copyright (c) 2005 Intel Corporation. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses. You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  *
15  * - Redistributions of source code must retain the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above
20  * copyright notice, this list of conditions and the following
21  * disclaimer in the documentation and/or other materials
22  * provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/completion.h>
35 #include <linux/init.h>
36 #include <linux/fs.h>
37 #include <linux/module.h>
38 #include <linux/device.h>
39 #include <linux/err.h>
40 #include <linux/poll.h>
41 #include <linux/sched.h>
42 #include <linux/file.h>
43 #include <linux/mount.h>
44 #include <linux/cdev.h>
45 #include <linux/idr.h>
46 #include <linux/mutex.h>
47 #include <linux/slab.h>
48 
49 #include <asm/uaccess.h>
50 
51 #include <rdma/ib_cm.h>
52 #include <rdma/ib_user_cm.h>
53 #include <rdma/ib_marshall.h>
54 
55 MODULE_AUTHOR("Libor Michalek");
56 MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
57 MODULE_LICENSE("Dual BSD/GPL");
58 
59 struct ib_ucm_device {
60  int devnum;
61  struct cdev cdev;
62  struct device dev;
63  struct ib_device *ib_dev;
64 };
65 
66 struct ib_ucm_file {
67  struct mutex file_mutex;
68  struct file *filp;
70 
71  struct list_head ctxs;
72  struct list_head events;
74 };
75 
77  int id;
78  struct completion comp;
81 
82  struct ib_ucm_file *file;
83  struct ib_cm_id *cm_id;
85 
86  struct list_head events; /* list of pending events. */
87  struct list_head file_list; /* member in file ctx list */
88 };
89 
90 struct ib_ucm_event {
92  struct list_head file_list; /* member in file event list */
93  struct list_head ctx_list; /* member in ctx event list */
94 
95  struct ib_cm_id *cm_id;
97  void *data;
98  void *info;
99  int data_len;
100  int info_len;
101 };
102 
103 enum {
107 };
108 
109 #define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
110 
111 static void ib_ucm_add_one(struct ib_device *device);
112 static void ib_ucm_remove_one(struct ib_device *device);
113 
114 static struct ib_client ucm_client = {
115  .name = "ucm",
116  .add = ib_ucm_add_one,
117  .remove = ib_ucm_remove_one
118 };
119 
120 static DEFINE_MUTEX(ctx_id_mutex);
121 static DEFINE_IDR(ctx_id_table);
122 static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES);
123 
124 static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
125 {
126  struct ib_ucm_context *ctx;
127 
128  mutex_lock(&ctx_id_mutex);
129  ctx = idr_find(&ctx_id_table, id);
130  if (!ctx)
131  ctx = ERR_PTR(-ENOENT);
132  else if (ctx->file != file)
133  ctx = ERR_PTR(-EINVAL);
134  else
135  atomic_inc(&ctx->ref);
136  mutex_unlock(&ctx_id_mutex);
137 
138  return ctx;
139 }
140 
141 static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
142 {
143  if (atomic_dec_and_test(&ctx->ref))
144  complete(&ctx->comp);
145 }
146 
147 static inline int ib_ucm_new_cm_id(int event)
148 {
149  return event == IB_CM_REQ_RECEIVED || event == IB_CM_SIDR_REQ_RECEIVED;
150 }
151 
152 static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
153 {
154  struct ib_ucm_event *uevent;
155 
156  mutex_lock(&ctx->file->file_mutex);
157  list_del(&ctx->file_list);
158  while (!list_empty(&ctx->events)) {
159 
160  uevent = list_entry(ctx->events.next,
161  struct ib_ucm_event, ctx_list);
162  list_del(&uevent->file_list);
163  list_del(&uevent->ctx_list);
164  mutex_unlock(&ctx->file->file_mutex);
165 
166  /* clear incoming connections. */
167  if (ib_ucm_new_cm_id(uevent->resp.event))
168  ib_destroy_cm_id(uevent->cm_id);
169 
170  kfree(uevent);
171  mutex_lock(&ctx->file->file_mutex);
172  }
173  mutex_unlock(&ctx->file->file_mutex);
174 }
175 
176 static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
177 {
178  struct ib_ucm_context *ctx;
179  int result;
180 
181  ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
182  if (!ctx)
183  return NULL;
184 
185  atomic_set(&ctx->ref, 1);
186  init_completion(&ctx->comp);
187  ctx->file = file;
188  INIT_LIST_HEAD(&ctx->events);
189 
190  do {
191  result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
192  if (!result)
193  goto error;
194 
195  mutex_lock(&ctx_id_mutex);
196  result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
197  mutex_unlock(&ctx_id_mutex);
198  } while (result == -EAGAIN);
199 
200  if (result)
201  goto error;
202 
203  list_add_tail(&ctx->file_list, &file->ctxs);
204  return ctx;
205 
206 error:
207  kfree(ctx);
208  return NULL;
209 }
210 
211 static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
212  struct ib_cm_req_event_param *kreq)
213 {
214  ureq->remote_ca_guid = kreq->remote_ca_guid;
215  ureq->remote_qkey = kreq->remote_qkey;
216  ureq->remote_qpn = kreq->remote_qpn;
217  ureq->qp_type = kreq->qp_type;
218  ureq->starting_psn = kreq->starting_psn;
220  ureq->initiator_depth = kreq->initiator_depth;
222  ureq->flow_control = kreq->flow_control;
224  ureq->retry_count = kreq->retry_count;
225  ureq->rnr_retry_count = kreq->rnr_retry_count;
226  ureq->srq = kreq->srq;
227  ureq->port = kreq->port;
228 
230  if (kreq->alternate_path)
232  kreq->alternate_path);
233 }
234 
235 static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
236  struct ib_cm_rep_event_param *krep)
237 {
238  urep->remote_ca_guid = krep->remote_ca_guid;
239  urep->remote_qkey = krep->remote_qkey;
240  urep->remote_qpn = krep->remote_qpn;
241  urep->starting_psn = krep->starting_psn;
243  urep->initiator_depth = krep->initiator_depth;
244  urep->target_ack_delay = krep->target_ack_delay;
245  urep->failover_accepted = krep->failover_accepted;
246  urep->flow_control = krep->flow_control;
247  urep->rnr_retry_count = krep->rnr_retry_count;
248  urep->srq = krep->srq;
249 }
250 
251 static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
252  struct ib_cm_sidr_rep_event_param *krep)
253 {
254  urep->status = krep->status;
255  urep->qkey = krep->qkey;
256  urep->qpn = krep->qpn;
257 };
258 
259 static int ib_ucm_event_process(struct ib_cm_event *evt,
260  struct ib_ucm_event *uvt)
261 {
262  void *info = NULL;
263 
264  switch (evt->event) {
265  case IB_CM_REQ_RECEIVED:
266  ib_ucm_event_req_get(&uvt->resp.u.req_resp,
267  &evt->param.req_rcvd);
269  uvt->resp.present = IB_UCM_PRES_PRIMARY;
270  uvt->resp.present |= (evt->param.req_rcvd.alternate_path ?
272  break;
273  case IB_CM_REP_RECEIVED:
274  ib_ucm_event_rep_get(&uvt->resp.u.rep_resp,
275  &evt->param.rep_rcvd);
277  break;
278  case IB_CM_RTU_RECEIVED:
280  uvt->resp.u.send_status = evt->param.send_status;
281  break;
282  case IB_CM_DREQ_RECEIVED:
284  uvt->resp.u.send_status = evt->param.send_status;
285  break;
286  case IB_CM_DREP_RECEIVED:
288  uvt->resp.u.send_status = evt->param.send_status;
289  break;
290  case IB_CM_MRA_RECEIVED:
291  uvt->resp.u.mra_resp.timeout =
292  evt->param.mra_rcvd.service_timeout;
294  break;
295  case IB_CM_REJ_RECEIVED:
296  uvt->resp.u.rej_resp.reason = evt->param.rej_rcvd.reason;
298  uvt->info_len = evt->param.rej_rcvd.ari_length;
299  info = evt->param.rej_rcvd.ari;
300  break;
301  case IB_CM_LAP_RECEIVED:
302  ib_copy_path_rec_to_user(&uvt->resp.u.lap_resp.path,
303  evt->param.lap_rcvd.alternate_path);
305  uvt->resp.present = IB_UCM_PRES_ALTERNATE;
306  break;
307  case IB_CM_APR_RECEIVED:
308  uvt->resp.u.apr_resp.status = evt->param.apr_rcvd.ap_status;
310  uvt->info_len = evt->param.apr_rcvd.info_len;
311  info = evt->param.apr_rcvd.apr_info;
312  break;
314  uvt->resp.u.sidr_req_resp.pkey =
315  evt->param.sidr_req_rcvd.pkey;
316  uvt->resp.u.sidr_req_resp.port =
317  evt->param.sidr_req_rcvd.port;
319  break;
321  ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp,
322  &evt->param.sidr_rep_rcvd);
324  uvt->info_len = evt->param.sidr_rep_rcvd.info_len;
325  info = evt->param.sidr_rep_rcvd.info;
326  break;
327  default:
328  uvt->resp.u.send_status = evt->param.send_status;
329  break;
330  }
331 
332  if (uvt->data_len) {
333  uvt->data = kmemdup(evt->private_data, uvt->data_len, GFP_KERNEL);
334  if (!uvt->data)
335  goto err1;
336 
337  uvt->resp.present |= IB_UCM_PRES_DATA;
338  }
339 
340  if (uvt->info_len) {
341  uvt->info = kmemdup(info, uvt->info_len, GFP_KERNEL);
342  if (!uvt->info)
343  goto err2;
344 
345  uvt->resp.present |= IB_UCM_PRES_INFO;
346  }
347  return 0;
348 
349 err2:
350  kfree(uvt->data);
351 err1:
352  return -ENOMEM;
353 }
354 
355 static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
356  struct ib_cm_event *event)
357 {
358  struct ib_ucm_event *uevent;
359  struct ib_ucm_context *ctx;
360  int result = 0;
361 
362  ctx = cm_id->context;
363 
364  uevent = kzalloc(sizeof *uevent, GFP_KERNEL);
365  if (!uevent)
366  goto err1;
367 
368  uevent->ctx = ctx;
369  uevent->cm_id = cm_id;
370  uevent->resp.uid = ctx->uid;
371  uevent->resp.id = ctx->id;
372  uevent->resp.event = event->event;
373 
374  result = ib_ucm_event_process(event, uevent);
375  if (result)
376  goto err2;
377 
378  mutex_lock(&ctx->file->file_mutex);
379  list_add_tail(&uevent->file_list, &ctx->file->events);
380  list_add_tail(&uevent->ctx_list, &ctx->events);
381  wake_up_interruptible(&ctx->file->poll_wait);
382  mutex_unlock(&ctx->file->file_mutex);
383  return 0;
384 
385 err2:
386  kfree(uevent);
387 err1:
388  /* Destroy new cm_id's */
389  return ib_ucm_new_cm_id(event->event);
390 }
391 
392 static ssize_t ib_ucm_event(struct ib_ucm_file *file,
393  const char __user *inbuf,
394  int in_len, int out_len)
395 {
396  struct ib_ucm_context *ctx;
397  struct ib_ucm_event_get cmd;
398  struct ib_ucm_event *uevent;
399  int result = 0;
400 
401  if (out_len < sizeof(struct ib_ucm_event_resp))
402  return -ENOSPC;
403 
404  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
405  return -EFAULT;
406 
407  mutex_lock(&file->file_mutex);
408  while (list_empty(&file->events)) {
409  mutex_unlock(&file->file_mutex);
410 
411  if (file->filp->f_flags & O_NONBLOCK)
412  return -EAGAIN;
413 
415  !list_empty(&file->events)))
416  return -ERESTARTSYS;
417 
418  mutex_lock(&file->file_mutex);
419  }
420 
421  uevent = list_entry(file->events.next, struct ib_ucm_event, file_list);
422 
423  if (ib_ucm_new_cm_id(uevent->resp.event)) {
424  ctx = ib_ucm_ctx_alloc(file);
425  if (!ctx) {
426  result = -ENOMEM;
427  goto done;
428  }
429 
430  ctx->cm_id = uevent->cm_id;
431  ctx->cm_id->context = ctx;
432  uevent->resp.id = ctx->id;
433  }
434 
435  if (copy_to_user((void __user *)(unsigned long)cmd.response,
436  &uevent->resp, sizeof(uevent->resp))) {
437  result = -EFAULT;
438  goto done;
439  }
440 
441  if (uevent->data) {
442  if (cmd.data_len < uevent->data_len) {
443  result = -ENOMEM;
444  goto done;
445  }
446  if (copy_to_user((void __user *)(unsigned long)cmd.data,
447  uevent->data, uevent->data_len)) {
448  result = -EFAULT;
449  goto done;
450  }
451  }
452 
453  if (uevent->info) {
454  if (cmd.info_len < uevent->info_len) {
455  result = -ENOMEM;
456  goto done;
457  }
458  if (copy_to_user((void __user *)(unsigned long)cmd.info,
459  uevent->info, uevent->info_len)) {
460  result = -EFAULT;
461  goto done;
462  }
463  }
464 
465  list_del(&uevent->file_list);
466  list_del(&uevent->ctx_list);
467  uevent->ctx->events_reported++;
468 
469  kfree(uevent->data);
470  kfree(uevent->info);
471  kfree(uevent);
472 done:
473  mutex_unlock(&file->file_mutex);
474  return result;
475 }
476 
477 static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
478  const char __user *inbuf,
479  int in_len, int out_len)
480 {
481  struct ib_ucm_create_id cmd;
483  struct ib_ucm_context *ctx;
484  int result;
485 
486  if (out_len < sizeof(resp))
487  return -ENOSPC;
488 
489  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
490  return -EFAULT;
491 
492  mutex_lock(&file->file_mutex);
493  ctx = ib_ucm_ctx_alloc(file);
494  mutex_unlock(&file->file_mutex);
495  if (!ctx)
496  return -ENOMEM;
497 
498  ctx->uid = cmd.uid;
499  ctx->cm_id = ib_create_cm_id(file->device->ib_dev,
500  ib_ucm_event_handler, ctx);
501  if (IS_ERR(ctx->cm_id)) {
502  result = PTR_ERR(ctx->cm_id);
503  goto err1;
504  }
505 
506  resp.id = ctx->id;
507  if (copy_to_user((void __user *)(unsigned long)cmd.response,
508  &resp, sizeof(resp))) {
509  result = -EFAULT;
510  goto err2;
511  }
512  return 0;
513 
514 err2:
515  ib_destroy_cm_id(ctx->cm_id);
516 err1:
517  mutex_lock(&ctx_id_mutex);
518  idr_remove(&ctx_id_table, ctx->id);
519  mutex_unlock(&ctx_id_mutex);
520  kfree(ctx);
521  return result;
522 }
523 
524 static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
525  const char __user *inbuf,
526  int in_len, int out_len)
527 {
528  struct ib_ucm_destroy_id cmd;
530  struct ib_ucm_context *ctx;
531  int result = 0;
532 
533  if (out_len < sizeof(resp))
534  return -ENOSPC;
535 
536  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
537  return -EFAULT;
538 
539  mutex_lock(&ctx_id_mutex);
540  ctx = idr_find(&ctx_id_table, cmd.id);
541  if (!ctx)
542  ctx = ERR_PTR(-ENOENT);
543  else if (ctx->file != file)
544  ctx = ERR_PTR(-EINVAL);
545  else
546  idr_remove(&ctx_id_table, ctx->id);
547  mutex_unlock(&ctx_id_mutex);
548 
549  if (IS_ERR(ctx))
550  return PTR_ERR(ctx);
551 
552  ib_ucm_ctx_put(ctx);
553  wait_for_completion(&ctx->comp);
554 
555  /* No new events will be generated after destroying the cm_id. */
556  ib_destroy_cm_id(ctx->cm_id);
557  /* Cleanup events not yet reported to the user. */
558  ib_ucm_cleanup_events(ctx);
559 
560  resp.events_reported = ctx->events_reported;
561  if (copy_to_user((void __user *)(unsigned long)cmd.response,
562  &resp, sizeof(resp)))
563  result = -EFAULT;
564 
565  kfree(ctx);
566  return result;
567 }
568 
569 static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
570  const char __user *inbuf,
571  int in_len, int out_len)
572 {
573  struct ib_ucm_attr_id_resp resp;
574  struct ib_ucm_attr_id cmd;
575  struct ib_ucm_context *ctx;
576  int result = 0;
577 
578  if (out_len < sizeof(resp))
579  return -ENOSPC;
580 
581  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
582  return -EFAULT;
583 
584  ctx = ib_ucm_ctx_get(file, cmd.id);
585  if (IS_ERR(ctx))
586  return PTR_ERR(ctx);
587 
588  resp.service_id = ctx->cm_id->service_id;
589  resp.service_mask = ctx->cm_id->service_mask;
590  resp.local_id = ctx->cm_id->local_id;
591  resp.remote_id = ctx->cm_id->remote_id;
592 
593  if (copy_to_user((void __user *)(unsigned long)cmd.response,
594  &resp, sizeof(resp)))
595  result = -EFAULT;
596 
597  ib_ucm_ctx_put(ctx);
598  return result;
599 }
600 
601 static ssize_t ib_ucm_init_qp_attr(struct ib_ucm_file *file,
602  const char __user *inbuf,
603  int in_len, int out_len)
604 {
605  struct ib_uverbs_qp_attr resp;
606  struct ib_ucm_init_qp_attr cmd;
607  struct ib_ucm_context *ctx;
608  struct ib_qp_attr qp_attr;
609  int result = 0;
610 
611  if (out_len < sizeof(resp))
612  return -ENOSPC;
613 
614  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
615  return -EFAULT;
616 
617  ctx = ib_ucm_ctx_get(file, cmd.id);
618  if (IS_ERR(ctx))
619  return PTR_ERR(ctx);
620 
621  resp.qp_attr_mask = 0;
622  memset(&qp_attr, 0, sizeof qp_attr);
623  qp_attr.qp_state = cmd.qp_state;
624  result = ib_cm_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
625  if (result)
626  goto out;
627 
628  ib_copy_qp_attr_to_user(&resp, &qp_attr);
629 
630  if (copy_to_user((void __user *)(unsigned long)cmd.response,
631  &resp, sizeof(resp)))
632  result = -EFAULT;
633 
634 out:
635  ib_ucm_ctx_put(ctx);
636  return result;
637 }
638 
639 static int ucm_validate_listen(__be64 service_id, __be64 service_mask)
640 {
641  service_id &= service_mask;
642 
643  if (((service_id & IB_CMA_SERVICE_ID_MASK) == IB_CMA_SERVICE_ID) ||
644  ((service_id & IB_SDP_SERVICE_ID_MASK) == IB_SDP_SERVICE_ID))
645  return -EINVAL;
646 
647  return 0;
648 }
649 
650 static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
651  const char __user *inbuf,
652  int in_len, int out_len)
653 {
654  struct ib_ucm_listen cmd;
655  struct ib_ucm_context *ctx;
656  int result;
657 
658  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
659  return -EFAULT;
660 
661  ctx = ib_ucm_ctx_get(file, cmd.id);
662  if (IS_ERR(ctx))
663  return PTR_ERR(ctx);
664 
665  result = ucm_validate_listen(cmd.service_id, cmd.service_mask);
666  if (result)
667  goto out;
668 
669  result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask,
670  NULL);
671 out:
672  ib_ucm_ctx_put(ctx);
673  return result;
674 }
675 
676 static ssize_t ib_ucm_notify(struct ib_ucm_file *file,
677  const char __user *inbuf,
678  int in_len, int out_len)
679 {
680  struct ib_ucm_notify cmd;
681  struct ib_ucm_context *ctx;
682  int result;
683 
684  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
685  return -EFAULT;
686 
687  ctx = ib_ucm_ctx_get(file, cmd.id);
688  if (IS_ERR(ctx))
689  return PTR_ERR(ctx);
690 
691  result = ib_cm_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
692  ib_ucm_ctx_put(ctx);
693  return result;
694 }
695 
696 static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len)
697 {
698  void *data;
699 
700  *dest = NULL;
701 
702  if (!len)
703  return 0;
704 
705  data = memdup_user((void __user *)(unsigned long)src, len);
706  if (IS_ERR(data))
707  return PTR_ERR(data);
708 
709  *dest = data;
710  return 0;
711 }
712 
713 static int ib_ucm_path_get(struct ib_sa_path_rec **path, u64 src)
714 {
715  struct ib_user_path_rec upath;
716  struct ib_sa_path_rec *sa_path;
717 
718  *path = NULL;
719 
720  if (!src)
721  return 0;
722 
723  sa_path = kmalloc(sizeof(*sa_path), GFP_KERNEL);
724  if (!sa_path)
725  return -ENOMEM;
726 
727  if (copy_from_user(&upath, (void __user *)(unsigned long)src,
728  sizeof(upath))) {
729 
730  kfree(sa_path);
731  return -EFAULT;
732  }
733 
734  ib_copy_path_rec_from_user(sa_path, &upath);
735  *path = sa_path;
736  return 0;
737 }
738 
739 static ssize_t ib_ucm_send_req(struct ib_ucm_file *file,
740  const char __user *inbuf,
741  int in_len, int out_len)
742 {
743  struct ib_cm_req_param param;
744  struct ib_ucm_context *ctx;
745  struct ib_ucm_req cmd;
746  int result;
747 
748  param.private_data = NULL;
749  param.primary_path = NULL;
750  param.alternate_path = NULL;
751 
752  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
753  return -EFAULT;
754 
755  result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len);
756  if (result)
757  goto done;
758 
759  result = ib_ucm_path_get(&param.primary_path, cmd.primary_path);
760  if (result)
761  goto done;
762 
763  result = ib_ucm_path_get(&param.alternate_path, cmd.alternate_path);
764  if (result)
765  goto done;
766 
767  param.private_data_len = cmd.len;
768  param.service_id = cmd.sid;
769  param.qp_num = cmd.qpn;
770  param.qp_type = cmd.qp_type;
771  param.starting_psn = cmd.psn;
772  param.peer_to_peer = cmd.peer_to_peer;
773  param.responder_resources = cmd.responder_resources;
774  param.initiator_depth = cmd.initiator_depth;
775  param.remote_cm_response_timeout = cmd.remote_cm_response_timeout;
776  param.flow_control = cmd.flow_control;
777  param.local_cm_response_timeout = cmd.local_cm_response_timeout;
778  param.retry_count = cmd.retry_count;
779  param.rnr_retry_count = cmd.rnr_retry_count;
780  param.max_cm_retries = cmd.max_cm_retries;
781  param.srq = cmd.srq;
782 
783  ctx = ib_ucm_ctx_get(file, cmd.id);
784  if (!IS_ERR(ctx)) {
785  result = ib_send_cm_req(ctx->cm_id, &param);
786  ib_ucm_ctx_put(ctx);
787  } else
788  result = PTR_ERR(ctx);
789 
790 done:
791  kfree(param.private_data);
792  kfree(param.primary_path);
793  kfree(param.alternate_path);
794  return result;
795 }
796 
797 static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file,
798  const char __user *inbuf,
799  int in_len, int out_len)
800 {
801  struct ib_cm_rep_param param;
802  struct ib_ucm_context *ctx;
803  struct ib_ucm_rep cmd;
804  int result;
805 
806  param.private_data = NULL;
807 
808  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
809  return -EFAULT;
810 
811  result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len);
812  if (result)
813  return result;
814 
815  param.qp_num = cmd.qpn;
816  param.starting_psn = cmd.psn;
817  param.private_data_len = cmd.len;
818  param.responder_resources = cmd.responder_resources;
819  param.initiator_depth = cmd.initiator_depth;
820  param.failover_accepted = cmd.failover_accepted;
821  param.flow_control = cmd.flow_control;
822  param.rnr_retry_count = cmd.rnr_retry_count;
823  param.srq = cmd.srq;
824 
825  ctx = ib_ucm_ctx_get(file, cmd.id);
826  if (!IS_ERR(ctx)) {
827  ctx->uid = cmd.uid;
828  result = ib_send_cm_rep(ctx->cm_id, &param);
829  ib_ucm_ctx_put(ctx);
830  } else
831  result = PTR_ERR(ctx);
832 
833  kfree(param.private_data);
834  return result;
835 }
836 
837 static ssize_t ib_ucm_send_private_data(struct ib_ucm_file *file,
838  const char __user *inbuf, int in_len,
839  int (*func)(struct ib_cm_id *cm_id,
840  const void *private_data,
841  u8 private_data_len))
842 {
843  struct ib_ucm_private_data cmd;
844  struct ib_ucm_context *ctx;
845  const void *private_data = NULL;
846  int result;
847 
848  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
849  return -EFAULT;
850 
851  result = ib_ucm_alloc_data(&private_data, cmd.data, cmd.len);
852  if (result)
853  return result;
854 
855  ctx = ib_ucm_ctx_get(file, cmd.id);
856  if (!IS_ERR(ctx)) {
857  result = func(ctx->cm_id, private_data, cmd.len);
858  ib_ucm_ctx_put(ctx);
859  } else
860  result = PTR_ERR(ctx);
861 
862  kfree(private_data);
863  return result;
864 }
865 
866 static ssize_t ib_ucm_send_rtu(struct ib_ucm_file *file,
867  const char __user *inbuf,
868  int in_len, int out_len)
869 {
870  return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_rtu);
871 }
872 
873 static ssize_t ib_ucm_send_dreq(struct ib_ucm_file *file,
874  const char __user *inbuf,
875  int in_len, int out_len)
876 {
877  return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_dreq);
878 }
879 
880 static ssize_t ib_ucm_send_drep(struct ib_ucm_file *file,
881  const char __user *inbuf,
882  int in_len, int out_len)
883 {
884  return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_drep);
885 }
886 
887 static ssize_t ib_ucm_send_info(struct ib_ucm_file *file,
888  const char __user *inbuf, int in_len,
889  int (*func)(struct ib_cm_id *cm_id,
890  int status,
891  const void *info,
892  u8 info_len,
893  const void *data,
894  u8 data_len))
895 {
896  struct ib_ucm_context *ctx;
897  struct ib_ucm_info cmd;
898  const void *data = NULL;
899  const void *info = NULL;
900  int result;
901 
902  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
903  return -EFAULT;
904 
905  result = ib_ucm_alloc_data(&data, cmd.data, cmd.data_len);
906  if (result)
907  goto done;
908 
909  result = ib_ucm_alloc_data(&info, cmd.info, cmd.info_len);
910  if (result)
911  goto done;
912 
913  ctx = ib_ucm_ctx_get(file, cmd.id);
914  if (!IS_ERR(ctx)) {
915  result = func(ctx->cm_id, cmd.status, info, cmd.info_len,
916  data, cmd.data_len);
917  ib_ucm_ctx_put(ctx);
918  } else
919  result = PTR_ERR(ctx);
920 
921 done:
922  kfree(data);
923  kfree(info);
924  return result;
925 }
926 
927 static ssize_t ib_ucm_send_rej(struct ib_ucm_file *file,
928  const char __user *inbuf,
929  int in_len, int out_len)
930 {
931  return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_rej);
932 }
933 
934 static ssize_t ib_ucm_send_apr(struct ib_ucm_file *file,
935  const char __user *inbuf,
936  int in_len, int out_len)
937 {
938  return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_apr);
939 }
940 
941 static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file,
942  const char __user *inbuf,
943  int in_len, int out_len)
944 {
945  struct ib_ucm_context *ctx;
946  struct ib_ucm_mra cmd;
947  const void *data = NULL;
948  int result;
949 
950  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
951  return -EFAULT;
952 
953  result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
954  if (result)
955  return result;
956 
957  ctx = ib_ucm_ctx_get(file, cmd.id);
958  if (!IS_ERR(ctx)) {
959  result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, data, cmd.len);
960  ib_ucm_ctx_put(ctx);
961  } else
962  result = PTR_ERR(ctx);
963 
964  kfree(data);
965  return result;
966 }
967 
968 static ssize_t ib_ucm_send_lap(struct ib_ucm_file *file,
969  const char __user *inbuf,
970  int in_len, int out_len)
971 {
972  struct ib_ucm_context *ctx;
973  struct ib_sa_path_rec *path = NULL;
974  struct ib_ucm_lap cmd;
975  const void *data = NULL;
976  int result;
977 
978  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
979  return -EFAULT;
980 
981  result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
982  if (result)
983  goto done;
984 
985  result = ib_ucm_path_get(&path, cmd.path);
986  if (result)
987  goto done;
988 
989  ctx = ib_ucm_ctx_get(file, cmd.id);
990  if (!IS_ERR(ctx)) {
991  result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len);
992  ib_ucm_ctx_put(ctx);
993  } else
994  result = PTR_ERR(ctx);
995 
996 done:
997  kfree(data);
998  kfree(path);
999  return result;
1000 }
1001 
1002 static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file,
1003  const char __user *inbuf,
1004  int in_len, int out_len)
1005 {
1006  struct ib_cm_sidr_req_param param;
1007  struct ib_ucm_context *ctx;
1008  struct ib_ucm_sidr_req cmd;
1009  int result;
1010 
1011  param.private_data = NULL;
1012  param.path = NULL;
1013 
1014  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1015  return -EFAULT;
1016 
1017  result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len);
1018  if (result)
1019  goto done;
1020 
1021  result = ib_ucm_path_get(&param.path, cmd.path);
1022  if (result)
1023  goto done;
1024 
1025  param.private_data_len = cmd.len;
1026  param.service_id = cmd.sid;
1027  param.timeout_ms = cmd.timeout;
1028  param.max_cm_retries = cmd.max_cm_retries;
1029 
1030  ctx = ib_ucm_ctx_get(file, cmd.id);
1031  if (!IS_ERR(ctx)) {
1032  result = ib_send_cm_sidr_req(ctx->cm_id, &param);
1033  ib_ucm_ctx_put(ctx);
1034  } else
1035  result = PTR_ERR(ctx);
1036 
1037 done:
1038  kfree(param.private_data);
1039  kfree(param.path);
1040  return result;
1041 }
1042 
1043 static ssize_t ib_ucm_send_sidr_rep(struct ib_ucm_file *file,
1044  const char __user *inbuf,
1045  int in_len, int out_len)
1046 {
1047  struct ib_cm_sidr_rep_param param;
1048  struct ib_ucm_sidr_rep cmd;
1049  struct ib_ucm_context *ctx;
1050  int result;
1051 
1052  param.info = NULL;
1053 
1054  if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1055  return -EFAULT;
1056 
1057  result = ib_ucm_alloc_data(&param.private_data,
1058  cmd.data, cmd.data_len);
1059  if (result)
1060  goto done;
1061 
1062  result = ib_ucm_alloc_data(&param.info, cmd.info, cmd.info_len);
1063  if (result)
1064  goto done;
1065 
1066  param.qp_num = cmd.qpn;
1067  param.qkey = cmd.qkey;
1068  param.status = cmd.status;
1069  param.info_length = cmd.info_len;
1070  param.private_data_len = cmd.data_len;
1071 
1072  ctx = ib_ucm_ctx_get(file, cmd.id);
1073  if (!IS_ERR(ctx)) {
1074  result = ib_send_cm_sidr_rep(ctx->cm_id, &param);
1075  ib_ucm_ctx_put(ctx);
1076  } else
1077  result = PTR_ERR(ctx);
1078 
1079 done:
1080  kfree(param.private_data);
1081  kfree(param.info);
1082  return result;
1083 }
1084 
1085 static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file,
1086  const char __user *inbuf,
1087  int in_len, int out_len) = {
1093  [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req,
1094  [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep,
1095  [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu,
1096  [IB_USER_CM_CMD_SEND_DREQ] = ib_ucm_send_dreq,
1097  [IB_USER_CM_CMD_SEND_DREP] = ib_ucm_send_drep,
1098  [IB_USER_CM_CMD_SEND_REJ] = ib_ucm_send_rej,
1099  [IB_USER_CM_CMD_SEND_MRA] = ib_ucm_send_mra,
1100  [IB_USER_CM_CMD_SEND_LAP] = ib_ucm_send_lap,
1101  [IB_USER_CM_CMD_SEND_APR] = ib_ucm_send_apr,
1102  [IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req,
1103  [IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep,
1106 };
1107 
1108 static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
1109  size_t len, loff_t *pos)
1110 {
1111  struct ib_ucm_file *file = filp->private_data;
1112  struct ib_ucm_cmd_hdr hdr;
1113  ssize_t result;
1114 
1115  if (len < sizeof(hdr))
1116  return -EINVAL;
1117 
1118  if (copy_from_user(&hdr, buf, sizeof(hdr)))
1119  return -EFAULT;
1120 
1121  if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
1122  return -EINVAL;
1123 
1124  if (hdr.in + sizeof(hdr) > len)
1125  return -EINVAL;
1126 
1127  result = ucm_cmd_table[hdr.cmd](file, buf + sizeof(hdr),
1128  hdr.in, hdr.out);
1129  if (!result)
1130  result = len;
1131 
1132  return result;
1133 }
1134 
1135 static unsigned int ib_ucm_poll(struct file *filp,
1136  struct poll_table_struct *wait)
1137 {
1138  struct ib_ucm_file *file = filp->private_data;
1139  unsigned int mask = 0;
1140 
1141  poll_wait(filp, &file->poll_wait, wait);
1142 
1143  if (!list_empty(&file->events))
1144  mask = POLLIN | POLLRDNORM;
1145 
1146  return mask;
1147 }
1148 
1149 /*
1150  * ib_ucm_open() does not need the BKL:
1151  *
1152  * - no global state is referred to;
1153  * - there is no ioctl method to race against;
1154  * - no further module initialization is required for open to work
1155  * after the device is registered.
1156  */
1157 static int ib_ucm_open(struct inode *inode, struct file *filp)
1158 {
1159  struct ib_ucm_file *file;
1160 
1161  file = kmalloc(sizeof(*file), GFP_KERNEL);
1162  if (!file)
1163  return -ENOMEM;
1164 
1165  INIT_LIST_HEAD(&file->events);
1166  INIT_LIST_HEAD(&file->ctxs);
1168 
1169  mutex_init(&file->file_mutex);
1170 
1171  filp->private_data = file;
1172  file->filp = filp;
1173  file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
1174 
1175  return nonseekable_open(inode, filp);
1176 }
1177 
1178 static int ib_ucm_close(struct inode *inode, struct file *filp)
1179 {
1180  struct ib_ucm_file *file = filp->private_data;
1181  struct ib_ucm_context *ctx;
1182 
1183  mutex_lock(&file->file_mutex);
1184  while (!list_empty(&file->ctxs)) {
1185  ctx = list_entry(file->ctxs.next,
1186  struct ib_ucm_context, file_list);
1187  mutex_unlock(&file->file_mutex);
1188 
1189  mutex_lock(&ctx_id_mutex);
1190  idr_remove(&ctx_id_table, ctx->id);
1191  mutex_unlock(&ctx_id_mutex);
1192 
1193  ib_destroy_cm_id(ctx->cm_id);
1194  ib_ucm_cleanup_events(ctx);
1195  kfree(ctx);
1196 
1197  mutex_lock(&file->file_mutex);
1198  }
1199  mutex_unlock(&file->file_mutex);
1200  kfree(file);
1201  return 0;
1202 }
1203 
1204 static void ib_ucm_release_dev(struct device *dev)
1205 {
1206  struct ib_ucm_device *ucm_dev;
1207 
1208  ucm_dev = container_of(dev, struct ib_ucm_device, dev);
1209  cdev_del(&ucm_dev->cdev);
1210  if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
1211  clear_bit(ucm_dev->devnum, dev_map);
1212  else
1213  clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map);
1214  kfree(ucm_dev);
1215 }
1216 
1217 static const struct file_operations ucm_fops = {
1218  .owner = THIS_MODULE,
1219  .open = ib_ucm_open,
1220  .release = ib_ucm_close,
1221  .write = ib_ucm_write,
1222  .poll = ib_ucm_poll,
1223  .llseek = no_llseek,
1224 };
1225 
1226 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
1227  char *buf)
1228 {
1229  struct ib_ucm_device *ucm_dev;
1230 
1231  ucm_dev = container_of(dev, struct ib_ucm_device, dev);
1232  return sprintf(buf, "%s\n", ucm_dev->ib_dev->name);
1233 }
1234 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1235 
1236 static dev_t overflow_maj;
1237 static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
1238 static int find_overflow_devnum(void)
1239 {
1240  int ret;
1241 
1242  if (!overflow_maj) {
1243  ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
1244  "infiniband_cm");
1245  if (ret) {
1246  printk(KERN_ERR "ucm: couldn't register dynamic device number\n");
1247  return ret;
1248  }
1249  }
1250 
1251  ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES);
1252  if (ret >= IB_UCM_MAX_DEVICES)
1253  return -1;
1254 
1255  return ret;
1256 }
1257 
1258 static void ib_ucm_add_one(struct ib_device *device)
1259 {
1260  int devnum;
1261  dev_t base;
1262  struct ib_ucm_device *ucm_dev;
1263 
1264  if (!device->alloc_ucontext ||
1266  return;
1267 
1268  ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
1269  if (!ucm_dev)
1270  return;
1271 
1272  ucm_dev->ib_dev = device;
1273 
1274  devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
1275  if (devnum >= IB_UCM_MAX_DEVICES) {
1276  devnum = find_overflow_devnum();
1277  if (devnum < 0)
1278  goto err;
1279 
1280  ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES;
1281  base = devnum + overflow_maj;
1282  set_bit(devnum, overflow_map);
1283  } else {
1284  ucm_dev->devnum = devnum;
1285  base = devnum + IB_UCM_BASE_DEV;
1286  set_bit(devnum, dev_map);
1287  }
1288 
1289  cdev_init(&ucm_dev->cdev, &ucm_fops);
1290  ucm_dev->cdev.owner = THIS_MODULE;
1291  kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum);
1292  if (cdev_add(&ucm_dev->cdev, base, 1))
1293  goto err;
1294 
1295  ucm_dev->dev.class = &cm_class;
1296  ucm_dev->dev.parent = device->dma_device;
1297  ucm_dev->dev.devt = ucm_dev->cdev.dev;
1298  ucm_dev->dev.release = ib_ucm_release_dev;
1299  dev_set_name(&ucm_dev->dev, "ucm%d", ucm_dev->devnum);
1300  if (device_register(&ucm_dev->dev))
1301  goto err_cdev;
1302 
1303  if (device_create_file(&ucm_dev->dev, &dev_attr_ibdev))
1304  goto err_dev;
1305 
1306  ib_set_client_data(device, &ucm_client, ucm_dev);
1307  return;
1308 
1309 err_dev:
1310  device_unregister(&ucm_dev->dev);
1311 err_cdev:
1312  cdev_del(&ucm_dev->cdev);
1313  if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
1314  clear_bit(devnum, dev_map);
1315  else
1316  clear_bit(devnum, overflow_map);
1317 err:
1318  kfree(ucm_dev);
1319  return;
1320 }
1321 
1322 static void ib_ucm_remove_one(struct ib_device *device)
1323 {
1324  struct ib_ucm_device *ucm_dev = ib_get_client_data(device, &ucm_client);
1325 
1326  if (!ucm_dev)
1327  return;
1328 
1329  device_unregister(&ucm_dev->dev);
1330 }
1331 
1332 static CLASS_ATTR_STRING(abi_version, S_IRUGO,
1334 
1335 static int __init ib_ucm_init(void)
1336 {
1337  int ret;
1338 
1340  "infiniband_cm");
1341  if (ret) {
1342  printk(KERN_ERR "ucm: couldn't register device number\n");
1343  goto error1;
1344  }
1345 
1346  ret = class_create_file(&cm_class, &class_attr_abi_version.attr);
1347  if (ret) {
1348  printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
1349  goto error2;
1350  }
1351 
1352  ret = ib_register_client(&ucm_client);
1353  if (ret) {
1354  printk(KERN_ERR "ucm: couldn't register client\n");
1355  goto error3;
1356  }
1357  return 0;
1358 
1359 error3:
1360  class_remove_file(&cm_class, &class_attr_abi_version.attr);
1361 error2:
1363 error1:
1364  return ret;
1365 }
1366 
1367 static void __exit ib_ucm_cleanup(void)
1368 {
1369  ib_unregister_client(&ucm_client);
1370  class_remove_file(&cm_class, &class_attr_abi_version.attr);
1372  if (overflow_maj)
1374  idr_destroy(&ctx_id_table);
1375 }
1376 
1377 module_init(ib_ucm_init);
1378 module_exit(ib_ucm_cleanup);