Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ib_verbs.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4  * Copyright (c) 2004 Intel Corporation. All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses. You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  * Redistribution and use in source and binary forms, with or
17  * without modification, are permitted provided that the following
18  * conditions are met:
19  *
20  * - Redistributions of source code must retain the above
21  * copyright notice, this list of conditions and the following
22  * disclaimer.
23  *
24  * - Redistributions in binary form must reproduce the above
25  * copyright notice, this list of conditions and the following
26  * disclaimer in the documentation and/or other materials
27  * provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38 
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
41 
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/mm.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
50 #include <linux/workqueue.h>
51 
52 #include <linux/atomic.h>
53 #include <asm/uaccess.h>
54 
55 extern struct workqueue_struct *ib_wq;
56 
57 union ib_gid {
58  u8 raw[16];
59  struct {
62  } global;
63 };
64 
66  /* IB values map to NodeInfo:NodeType. */
71 };
72 
76 };
77 
80 
85 };
86 
104  IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
106  /*
107  * Devices should set IB_DEVICE_UD_IP_SUM if they support
108  * insertion of UDP and TCP checksum on outgoing UD IPoIB
109  * messages and can verify the validity of checksum for
110  * incoming messages. Setting this flag implies that the
111  * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
112  */
114  IB_DEVICE_UD_TSO = (1<<19),
115  IB_DEVICE_XRC = (1<<20),
118 };
119 
124 };
125 
134  int max_qp;
137  int max_sge;
139  int max_cq;
140  int max_cqe;
141  int max_mr;
142  int max_pd;
150  int max_ee;
151  int max_rdd;
152  int max_mw;
158  int max_ah;
159  int max_fmr;
161  int max_srq;
167 };
168 
169 enum ib_mtu {
175 };
176 
177 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
178 {
179  switch (mtu) {
180  case IB_MTU_256: return 256;
181  case IB_MTU_512: return 512;
182  case IB_MTU_1024: return 1024;
183  case IB_MTU_2048: return 2048;
184  case IB_MTU_4096: return 4096;
185  default: return -1;
186  }
187 }
188 
196 };
197 
199  IB_PORT_SM = 1 << 1,
212  IB_PORT_CM_SUP = 1 << 16,
222 };
223 
229 };
230 
231 static inline int ib_width_enum_to_int(enum ib_port_width width)
232 {
233  switch (width) {
234  case IB_WIDTH_1X: return 1;
235  case IB_WIDTH_4X: return 4;
236  case IB_WIDTH_8X: return 8;
237  case IB_WIDTH_12X: return 12;
238  default: return -1;
239  }
240 }
241 
249 };
250 
252  /* TBD... */
253 };
254 
280 
295 };
296 
300 };
301 
302 struct ib_port_attr {
322 };
323 
327 };
328 
331  char node_desc[64];
332 };
333 
338 };
339 
344 };
345 
366 };
367 
368 struct ib_event {
369  struct ib_device *device;
370  union {
371  struct ib_cq *cq;
372  struct ib_qp *qp;
373  struct ib_srq *srq;
375  } element;
377 };
378 
380  struct ib_device *device;
381  void (*handler)(struct ib_event_handler *, struct ib_event *);
382  struct list_head list;
383 };
384 
385 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
386  do { \
387  (_ptr)->device = _device; \
388  (_ptr)->handler = _handler; \
389  INIT_LIST_HEAD(&(_ptr)->list); \
390  } while (0)
391 
393  union ib_gid dgid;
398 };
399 
400 struct ib_grh {
405  union ib_gid sgid;
406  union ib_gid dgid;
407 };
408 
409 enum {
410  IB_MULTICAST_QPN = 0xffffff
411 };
412 
413 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
414 
417 };
418 
419 enum ib_rate {
438 };
439 
447 
454 
461 
462 struct ib_ah_attr {
470 };
471 
495 };
496 
509 /*
510  * Set value of IB_WC_RECV so consumers can test if a completion is a
511  * receive by testing (opcode & IB_WC_RECV).
512  */
513  IB_WC_RECV = 1 << 7,
515 };
516 
519  IB_WC_WITH_IMM = (1<<1),
522 };
523 
524 struct ib_wc {
530  struct ib_qp *qp;
531  union {
534  } ex;
536  int wc_flags;
541  u8 port_num; /* valid only for DR SMPs on switches */
542 };
543 
545  IB_CQ_SOLICITED = 1 << 0,
546  IB_CQ_NEXT_COMP = 1 << 1,
549 };
550 
554 };
555 
557  IB_SRQ_MAX_WR = 1 << 0,
558  IB_SRQ_LIMIT = 1 << 1,
559 };
560 
561 struct ib_srq_attr {
565 };
566 
568  void (*event_handler)(struct ib_event *, void *);
569  void *srq_context;
572 
573  union {
574  struct {
575  struct ib_xrcd *xrcd;
576  struct ib_cq *cq;
577  } xrc;
578  } ext;
579 };
580 
581 struct ib_qp_cap {
587 };
588 
592 };
593 
595  /*
596  * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
597  * here (and in that order) since the MAD layer uses them as
598  * indices into a 2-entry table.
599  */
602 
612 };
613 
617  /* reserve bits 26-31 for low level drivers' internal use */
620 };
621 
623  void (*event_handler)(struct ib_event *, void *);
624  void *qp_context;
625  struct ib_cq *send_cq;
626  struct ib_cq *recv_cq;
627  struct ib_srq *srq;
628  struct ib_xrcd *xrcd; /* XRC TGT QPs only */
629  struct ib_qp_cap cap;
633  u8 port_num; /* special QP types only */
634 };
635 
637  void (*event_handler)(struct ib_event *, void *);
638  void *qp_context;
641 };
642 
676 };
677 
680  IB_QP_CUR_STATE = (1<<1),
684  IB_QP_PORT = (1<<5),
685  IB_QP_QKEY = (1<<6),
686  IB_QP_AV = (1<<7),
687  IB_QP_PATH_MTU = (1<<8),
688  IB_QP_TIMEOUT = (1<<9),
689  IB_QP_RETRY_CNT = (1<<10),
690  IB_QP_RNR_RETRY = (1<<11),
691  IB_QP_RQ_PSN = (1<<12),
693  IB_QP_ALT_PATH = (1<<14),
695  IB_QP_SQ_PSN = (1<<16),
698  IB_QP_CAP = (1<<19),
699  IB_QP_DEST_QPN = (1<<20)
700 };
701 
710 };
711 
716 };
717 
718 struct ib_qp_attr {
728  struct ib_qp_cap cap;
744 };
745 
761 };
762 
767  IB_SEND_INLINE = (1<<3),
769 };
770 
771 struct ib_sge {
775 };
776 
778  struct ib_device *device;
780  unsigned int max_page_list_len;
781 };
782 
783 struct ib_send_wr {
784  struct ib_send_wr *next;
786  struct ib_sge *sg_list;
787  int num_sge;
790  union {
793  } ex;
794  union {
795  struct {
798  } rdma;
799  struct {
805  u32 rkey;
806  } atomic;
807  struct {
808  struct ib_ah *ah;
809  void *header;
810  int hlen;
811  int mss;
814  u16 pkey_index; /* valid for GSI only */
815  u8 port_num; /* valid for DR SMPs on switch only */
816  } ud;
817  struct {
820  unsigned int page_shift;
821  unsigned int page_list_len;
824  u32 rkey;
825  } fast_reg;
826  } wr;
827  u32 xrc_remote_srq_num; /* XRC TGT QPs only */
828 };
829 
830 struct ib_recv_wr {
831  struct ib_recv_wr *next;
833  struct ib_sge *sg_list;
834  int num_sge;
835 };
836 
843 };
844 
845 struct ib_phys_buf {
848 };
849 
850 struct ib_mr_attr {
851  struct ib_pd *pd;
857 };
858 
861  IB_MR_REREG_PD = (1<<1),
863 };
864 
865 struct ib_mw_bind {
866  struct ib_mr *mr;
872 };
873 
874 struct ib_fmr_attr {
876  int max_maps;
878 };
879 
880 struct ib_ucontext {
881  struct ib_device *device;
890  int closing;
891 };
892 
893 struct ib_uobject {
894  u64 user_handle; /* handle given to us by userspace */
895  struct ib_ucontext *context; /* associated user context */
896  void *object; /* containing object */
897  struct list_head list; /* link to context's list */
898  int id; /* index into kernel idr */
899  struct kref ref;
900  struct rw_semaphore mutex; /* protects .live */
901  int live;
902 };
903 
904 struct ib_udata {
905  void __user *inbuf;
906  void __user *outbuf;
907  size_t inlen;
908  size_t outlen;
909 };
910 
911 struct ib_pd {
912  struct ib_device *device;
914  atomic_t usecnt; /* count all resources */
915 };
916 
917 struct ib_xrcd {
918  struct ib_device *device;
919  atomic_t usecnt; /* count all exposed resources */
920  struct inode *inode;
921 
924 };
925 
926 struct ib_ah {
927  struct ib_device *device;
928  struct ib_pd *pd;
930 };
931 
932 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
933 
934 struct ib_cq {
935  struct ib_device *device;
938  void (*event_handler)(struct ib_event *, void *);
939  void *cq_context;
940  int cqe;
941  atomic_t usecnt; /* count number of work queues */
942 };
943 
944 struct ib_srq {
945  struct ib_device *device;
946  struct ib_pd *pd;
948  void (*event_handler)(struct ib_event *, void *);
949  void *srq_context;
952 
953  union {
954  struct {
955  struct ib_xrcd *xrcd;
956  struct ib_cq *cq;
958  } xrc;
959  } ext;
960 };
961 
962 struct ib_qp {
963  struct ib_device *device;
964  struct ib_pd *pd;
965  struct ib_cq *send_cq;
966  struct ib_cq *recv_cq;
967  struct ib_srq *srq;
968  struct ib_xrcd *xrcd; /* XRC TGT QPs only */
970  atomic_t usecnt; /* count times opened, mcast attaches */
972  struct ib_qp *real_qp;
974  void (*event_handler)(struct ib_event *, void *);
975  void *qp_context;
978 };
979 
980 struct ib_mr {
981  struct ib_device *device;
982  struct ib_pd *pd;
986  atomic_t usecnt; /* count number of MWs */
987 };
988 
989 struct ib_mw {
990  struct ib_device *device;
991  struct ib_pd *pd;
994 };
995 
996 struct ib_fmr {
997  struct ib_device *device;
998  struct ib_pd *pd;
999  struct list_head list;
1002 };
1003 
1004 struct ib_mad;
1005 struct ib_grh;
1006 
1011 };
1012 
1014  IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
1015  IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
1016  IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
1017  IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
1018 };
1019 
1020 #define IB_DEVICE_NAME_MAX 64
1021 
1022 struct ib_cache {
1028 };
1029 
1032  u64 dma_addr);
1034  void *ptr, size_t size,
1037  u64 addr, size_t size,
1040  struct page *page, unsigned long offset,
1041  size_t size,
1044  u64 addr, size_t size,
1046  int (*map_sg)(struct ib_device *dev,
1047  struct scatterlist *sg, int nents,
1050  struct scatterlist *sg, int nents,
1053  struct scatterlist *sg);
1054  unsigned int (*dma_len)(struct ib_device *dev,
1055  struct scatterlist *sg);
1057  u64 dma_handle,
1058  size_t size,
1059  enum dma_data_direction dir);
1061  u64 dma_handle,
1062  size_t size,
1063  enum dma_data_direction dir);
1064  void *(*alloc_coherent)(struct ib_device *dev,
1065  size_t size,
1066  u64 *dma_handle,
1067  gfp_t flag);
1069  size_t size, void *cpu_addr,
1070  u64 dma_handle);
1071 };
1072 
1073 struct iw_cm_verbs;
1074 
1075 struct ib_device {
1077 
1079 
1082 
1086 
1087  struct ib_cache cache;
1090 
1092 
1094 
1096  union rdma_protocol_stats *stats);
1098  struct ib_device_attr *device_attr);
1100  u8 port_num,
1101  struct ib_port_attr *port_attr);
1103  u8 port_num);
1105  u8 port_num, int index,
1106  union ib_gid *gid);
1108  u8 port_num, u16 index, u16 *pkey);
1110  int device_modify_mask,
1111  struct ib_device_modify *device_modify);
1113  u8 port_num, int port_modify_mask,
1114  struct ib_port_modify *port_modify);
1115  struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1116  struct ib_udata *udata);
1119  struct vm_area_struct *vma);
1120  struct ib_pd * (*alloc_pd)(struct ib_device *device,
1121  struct ib_ucontext *context,
1122  struct ib_udata *udata);
1123  int (*dealloc_pd)(struct ib_pd *pd);
1124  struct ib_ah * (*create_ah)(struct ib_pd *pd,
1125  struct ib_ah_attr *ah_attr);
1126  int (*modify_ah)(struct ib_ah *ah,
1127  struct ib_ah_attr *ah_attr);
1128  int (*query_ah)(struct ib_ah *ah,
1129  struct ib_ah_attr *ah_attr);
1130  int (*destroy_ah)(struct ib_ah *ah);
1131  struct ib_srq * (*create_srq)(struct ib_pd *pd,
1132  struct ib_srq_init_attr *srq_init_attr,
1133  struct ib_udata *udata);
1134  int (*modify_srq)(struct ib_srq *srq,
1135  struct ib_srq_attr *srq_attr,
1136  enum ib_srq_attr_mask srq_attr_mask,
1137  struct ib_udata *udata);
1138  int (*query_srq)(struct ib_srq *srq,
1139  struct ib_srq_attr *srq_attr);
1140  int (*destroy_srq)(struct ib_srq *srq);
1141  int (*post_srq_recv)(struct ib_srq *srq,
1142  struct ib_recv_wr *recv_wr,
1143  struct ib_recv_wr **bad_recv_wr);
1144  struct ib_qp * (*create_qp)(struct ib_pd *pd,
1145  struct ib_qp_init_attr *qp_init_attr,
1146  struct ib_udata *udata);
1147  int (*modify_qp)(struct ib_qp *qp,
1148  struct ib_qp_attr *qp_attr,
1149  int qp_attr_mask,
1150  struct ib_udata *udata);
1151  int (*query_qp)(struct ib_qp *qp,
1152  struct ib_qp_attr *qp_attr,
1153  int qp_attr_mask,
1154  struct ib_qp_init_attr *qp_init_attr);
1155  int (*destroy_qp)(struct ib_qp *qp);
1156  int (*post_send)(struct ib_qp *qp,
1157  struct ib_send_wr *send_wr,
1158  struct ib_send_wr **bad_send_wr);
1159  int (*post_recv)(struct ib_qp *qp,
1160  struct ib_recv_wr *recv_wr,
1161  struct ib_recv_wr **bad_recv_wr);
1162  struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
1163  int comp_vector,
1164  struct ib_ucontext *context,
1165  struct ib_udata *udata);
1166  int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1167  u16 cq_period);
1168  int (*destroy_cq)(struct ib_cq *cq);
1169  int (*resize_cq)(struct ib_cq *cq, int cqe,
1170  struct ib_udata *udata);
1171  int (*poll_cq)(struct ib_cq *cq, int num_entries,
1172  struct ib_wc *wc);
1173  int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1174  int (*req_notify_cq)(struct ib_cq *cq,
1175  enum ib_cq_notify_flags flags);
1176  int (*req_ncomp_notif)(struct ib_cq *cq,
1177  int wc_cnt);
1178  struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1179  int mr_access_flags);
1180  struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1181  struct ib_phys_buf *phys_buf_array,
1182  int num_phys_buf,
1183  int mr_access_flags,
1184  u64 *iova_start);
1185  struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1186  u64 start, u64 length,
1187  u64 virt_addr,
1188  int mr_access_flags,
1189  struct ib_udata *udata);
1190  int (*query_mr)(struct ib_mr *mr,
1191  struct ib_mr_attr *mr_attr);
1192  int (*dereg_mr)(struct ib_mr *mr);
1193  struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1194  int max_page_list_len);
1195  struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1196  int page_list_len);
1198  int (*rereg_phys_mr)(struct ib_mr *mr,
1199  int mr_rereg_mask,
1200  struct ib_pd *pd,
1201  struct ib_phys_buf *phys_buf_array,
1202  int num_phys_buf,
1203  int mr_access_flags,
1204  u64 *iova_start);
1205  struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
1206  int (*bind_mw)(struct ib_qp *qp,
1207  struct ib_mw *mw,
1208  struct ib_mw_bind *mw_bind);
1209  int (*dealloc_mw)(struct ib_mw *mw);
1210  struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1211  int mr_access_flags,
1212  struct ib_fmr_attr *fmr_attr);
1214  u64 *page_list, int list_len,
1215  u64 iova);
1216  int (*unmap_fmr)(struct list_head *fmr_list);
1217  int (*dealloc_fmr)(struct ib_fmr *fmr);
1218  int (*attach_mcast)(struct ib_qp *qp,
1219  union ib_gid *gid,
1220  u16 lid);
1221  int (*detach_mcast)(struct ib_qp *qp,
1222  union ib_gid *gid,
1223  u16 lid);
1225  int process_mad_flags,
1226  u8 port_num,
1227  struct ib_wc *in_wc,
1228  struct ib_grh *in_grh,
1229  struct ib_mad *in_mad,
1230  struct ib_mad *out_mad);
1231  struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1232  struct ib_ucontext *ucontext,
1233  struct ib_udata *udata);
1234  int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1235 
1237 
1238  struct module *owner;
1239  struct device dev;
1242 
1243  enum {
1247  } reg_state;
1248 
1251 
1252  char node_desc[64];
1257 };
1258 
1259 struct ib_client {
1260  char *name;
1261  void (*add) (struct ib_device *);
1262  void (*remove)(struct ib_device *);
1263 
1264  struct list_head list;
1265 };
1266 
1267 struct ib_device *ib_alloc_device(size_t size);
1268 void ib_dealloc_device(struct ib_device *device);
1269 
1270 int ib_register_device(struct ib_device *device,
1271  int (*port_callback)(struct ib_device *,
1272  u8, struct kobject *));
1273 void ib_unregister_device(struct ib_device *device);
1274 
1275 int ib_register_client (struct ib_client *client);
1276 void ib_unregister_client(struct ib_client *client);
1277 
1278 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1279 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1280  void *data);
1281 
1282 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1283 {
1284  return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1285 }
1286 
1287 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1288 {
1289  return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1290 }
1291 
1307 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1308  enum ib_qp_type type, enum ib_qp_attr_mask mask);
1309 
1312 void ib_dispatch_event(struct ib_event *event);
1313 
1314 int ib_query_device(struct ib_device *device,
1315  struct ib_device_attr *device_attr);
1316 
1317 int ib_query_port(struct ib_device *device,
1318  u8 port_num, struct ib_port_attr *port_attr);
1319 
1321  u8 port_num);
1322 
1323 int ib_query_gid(struct ib_device *device,
1324  u8 port_num, int index, union ib_gid *gid);
1325 
1326 int ib_query_pkey(struct ib_device *device,
1327  u8 port_num, u16 index, u16 *pkey);
1328 
1329 int ib_modify_device(struct ib_device *device,
1330  int device_modify_mask,
1331  struct ib_device_modify *device_modify);
1332 
1333 int ib_modify_port(struct ib_device *device,
1334  u8 port_num, int port_modify_mask,
1335  struct ib_port_modify *port_modify);
1336 
1337 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1338  u8 *port_num, u16 *index);
1339 
1340 int ib_find_pkey(struct ib_device *device,
1341  u8 port_num, u16 pkey, u16 *index);
1342 
1350 struct ib_pd *ib_alloc_pd(struct ib_device *device);
1351 
1356 int ib_dealloc_pd(struct ib_pd *pd);
1357 
1366 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1367 
1379 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1380  struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1381 
1394 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1395  struct ib_grh *grh, u8 port_num);
1396 
1404 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1405 
1413 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1414 
1419 int ib_destroy_ah(struct ib_ah *ah);
1420 
1434 struct ib_srq *ib_create_srq(struct ib_pd *pd,
1435  struct ib_srq_init_attr *srq_init_attr);
1436 
1449 int ib_modify_srq(struct ib_srq *srq,
1450  struct ib_srq_attr *srq_attr,
1451  enum ib_srq_attr_mask srq_attr_mask);
1452 
1459 int ib_query_srq(struct ib_srq *srq,
1460  struct ib_srq_attr *srq_attr);
1461 
1466 int ib_destroy_srq(struct ib_srq *srq);
1467 
1475 static inline int ib_post_srq_recv(struct ib_srq *srq,
1476  struct ib_recv_wr *recv_wr,
1477  struct ib_recv_wr **bad_recv_wr)
1478 {
1479  return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1480 }
1481 
1490 struct ib_qp *ib_create_qp(struct ib_pd *pd,
1491  struct ib_qp_init_attr *qp_init_attr);
1492 
1502 int ib_modify_qp(struct ib_qp *qp,
1503  struct ib_qp_attr *qp_attr,
1504  int qp_attr_mask);
1505 
1517 int ib_query_qp(struct ib_qp *qp,
1518  struct ib_qp_attr *qp_attr,
1519  int qp_attr_mask,
1520  struct ib_qp_init_attr *qp_init_attr);
1521 
1526 int ib_destroy_qp(struct ib_qp *qp);
1527 
1535 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1536  struct ib_qp_open_attr *qp_open_attr);
1537 
1545 int ib_close_qp(struct ib_qp *qp);
1546 
1560 static inline int ib_post_send(struct ib_qp *qp,
1561  struct ib_send_wr *send_wr,
1562  struct ib_send_wr **bad_send_wr)
1563 {
1564  return qp->device->post_send(qp, send_wr, bad_send_wr);
1565 }
1566 
1575 static inline int ib_post_recv(struct ib_qp *qp,
1576  struct ib_recv_wr *recv_wr,
1577  struct ib_recv_wr **bad_recv_wr)
1578 {
1579  return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1580 }
1581 
1597 struct ib_cq *ib_create_cq(struct ib_device *device,
1599  void (*event_handler)(struct ib_event *, void *),
1600  void *cq_context, int cqe, int comp_vector);
1601 
1609 int ib_resize_cq(struct ib_cq *cq, int cqe);
1610 
1618 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1619 
1624 int ib_destroy_cq(struct ib_cq *cq);
1625 
1638 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1639  struct ib_wc *wc)
1640 {
1641  return cq->device->poll_cq(cq, num_entries, wc);
1642 }
1643 
1654 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1655 
1683 static inline int ib_req_notify_cq(struct ib_cq *cq,
1685 {
1686  return cq->device->req_notify_cq(cq, flags);
1687 }
1688 
1696 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1697 {
1698  return cq->device->req_ncomp_notif ?
1699  cq->device->req_ncomp_notif(cq, wc_cnt) :
1700  -ENOSYS;
1701 }
1702 
1713 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1714 
1720 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1721 {
1722  if (dev->dma_ops)
1723  return dev->dma_ops->mapping_error(dev, dma_addr);
1724  return dma_mapping_error(dev->dma_device, dma_addr);
1725 }
1726 
1734 static inline u64 ib_dma_map_single(struct ib_device *dev,
1735  void *cpu_addr, size_t size,
1737 {
1738  if (dev->dma_ops)
1739  return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1740  return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1741 }
1742 
1750 static inline void ib_dma_unmap_single(struct ib_device *dev,
1751  u64 addr, size_t size,
1752  enum dma_data_direction direction)
1753 {
1754  if (dev->dma_ops)
1755  dev->dma_ops->unmap_single(dev, addr, size, direction);
1756  else
1757  dma_unmap_single(dev->dma_device, addr, size, direction);
1758 }
1759 
1760 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1761  void *cpu_addr, size_t size,
1762  enum dma_data_direction direction,
1763  struct dma_attrs *attrs)
1764 {
1765  return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1766  direction, attrs);
1767 }
1768 
1769 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1770  u64 addr, size_t size,
1771  enum dma_data_direction direction,
1772  struct dma_attrs *attrs)
1773 {
1774  return dma_unmap_single_attrs(dev->dma_device, addr, size,
1775  direction, attrs);
1776 }
1777 
1786 static inline u64 ib_dma_map_page(struct ib_device *dev,
1787  struct page *page,
1788  unsigned long offset,
1789  size_t size,
1790  enum dma_data_direction direction)
1791 {
1792  if (dev->dma_ops)
1793  return dev->dma_ops->map_page(dev, page, offset, size, direction);
1794  return dma_map_page(dev->dma_device, page, offset, size, direction);
1795 }
1796 
1804 static inline void ib_dma_unmap_page(struct ib_device *dev,
1805  u64 addr, size_t size,
1806  enum dma_data_direction direction)
1807 {
1808  if (dev->dma_ops)
1809  dev->dma_ops->unmap_page(dev, addr, size, direction);
1810  else
1811  dma_unmap_page(dev->dma_device, addr, size, direction);
1812 }
1813 
1821 static inline int ib_dma_map_sg(struct ib_device *dev,
1822  struct scatterlist *sg, int nents,
1823  enum dma_data_direction direction)
1824 {
1825  if (dev->dma_ops)
1826  return dev->dma_ops->map_sg(dev, sg, nents, direction);
1827  return dma_map_sg(dev->dma_device, sg, nents, direction);
1828 }
1829 
1837 static inline void ib_dma_unmap_sg(struct ib_device *dev,
1838  struct scatterlist *sg, int nents,
1839  enum dma_data_direction direction)
1840 {
1841  if (dev->dma_ops)
1842  dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1843  else
1844  dma_unmap_sg(dev->dma_device, sg, nents, direction);
1845 }
1846 
1847 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1848  struct scatterlist *sg, int nents,
1849  enum dma_data_direction direction,
1850  struct dma_attrs *attrs)
1851 {
1852  return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1853 }
1854 
1855 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1856  struct scatterlist *sg, int nents,
1857  enum dma_data_direction direction,
1858  struct dma_attrs *attrs)
1859 {
1860  dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1861 }
1867 static inline u64 ib_sg_dma_address(struct ib_device *dev,
1868  struct scatterlist *sg)
1869 {
1870  if (dev->dma_ops)
1871  return dev->dma_ops->dma_address(dev, sg);
1872  return sg_dma_address(sg);
1873 }
1874 
1880 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1881  struct scatterlist *sg)
1882 {
1883  if (dev->dma_ops)
1884  return dev->dma_ops->dma_len(dev, sg);
1885  return sg_dma_len(sg);
1886 }
1887 
1895 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1896  u64 addr,
1897  size_t size,
1898  enum dma_data_direction dir)
1899 {
1900  if (dev->dma_ops)
1901  dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1902  else
1903  dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1904 }
1905 
1913 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1914  u64 addr,
1915  size_t size,
1916  enum dma_data_direction dir)
1917 {
1918  if (dev->dma_ops)
1919  dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1920  else
1921  dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1922 }
1923 
1931 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1932  size_t size,
1933  u64 *dma_handle,
1934  gfp_t flag)
1935 {
1936  if (dev->dma_ops)
1937  return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1938  else {
1940  void *ret;
1941 
1942  ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1943  *dma_handle = handle;
1944  return ret;
1945  }
1946 }
1947 
1955 static inline void ib_dma_free_coherent(struct ib_device *dev,
1956  size_t size, void *cpu_addr,
1957  u64 dma_handle)
1958 {
1959  if (dev->dma_ops)
1960  dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1961  else
1962  dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1963 }
1964 
1975 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1976  struct ib_phys_buf *phys_buf_array,
1977  int num_phys_buf,
1978  int mr_access_flags,
1979  u64 *iova_start);
1980 
2003 int ib_rereg_phys_mr(struct ib_mr *mr,
2004  int mr_rereg_mask,
2005  struct ib_pd *pd,
2006  struct ib_phys_buf *phys_buf_array,
2007  int num_phys_buf,
2008  int mr_access_flags,
2009  u64 *iova_start);
2010 
2016 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2017 
2023 int ib_dereg_mr(struct ib_mr *mr);
2024 
2032 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2033 
2052  struct ib_device *device, int page_list_len);
2053 
2060 
2067 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2068 {
2069  mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2070  mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2071 }
2072 
2077 struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
2078 
2088 static inline int ib_bind_mw(struct ib_qp *qp,
2089  struct ib_mw *mw,
2090  struct ib_mw_bind *mw_bind)
2091 {
2092  /* XXX reference counting in corresponding MR? */
2093  return mw->device->bind_mw ?
2094  mw->device->bind_mw(qp, mw, mw_bind) :
2095  -ENOSYS;
2096 }
2097 
2102 int ib_dealloc_mw(struct ib_mw *mw);
2103 
2113 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2114  int mr_access_flags,
2115  struct ib_fmr_attr *fmr_attr);
2116 
2124 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2125  u64 *page_list, int list_len,
2126  u64 iova)
2127 {
2128  return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2129 }
2130 
2135 int ib_unmap_fmr(struct list_head *fmr_list);
2136 
2141 int ib_dealloc_fmr(struct ib_fmr *fmr);
2142 
2155 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2156 
2163 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2164 
2169 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2170 
2175 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2176 
2177 #endif /* IB_VERBS_H */