39 #if !defined(IB_VERBS_H)
42 #include <linux/types.h>
43 #include <linux/device.h>
47 #include <linux/list.h>
53 #include <asm/uaccess.h>
177 static inline int ib_mtu_enum_to_int(
enum ib_mtu mtu)
385 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
387 (_ptr)->device = _device; \
388 (_ptr)->handler = _handler; \
389 INIT_LIST_HEAD(&(_ptr)->list); \
413 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
1020 #define IB_DEVICE_NAME_MAX 64
1110 int device_modify_mask,
1179 int mr_access_flags);
1183 int mr_access_flags,
1188 int mr_access_flags,
1194 int max_page_list_len);
1203 int mr_access_flags,
1211 int mr_access_flags,
1225 int process_mad_flags,
1227 struct ib_wc *in_wc,
1271 int (*port_callback)(
struct ib_device *,
1282 static inline int ib_copy_from_udata(
void *
dest,
struct ib_udata *udata,
size_t len)
1287 static inline int ib_copy_to_udata(
struct ib_udata *udata,
void *
src,
size_t len)
1330 int device_modify_mask,
1475 static inline int ib_post_srq_recv(
struct ib_srq *srq,
1479 return srq->
device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1560 static inline int ib_post_send(
struct ib_qp *qp,
1564 return qp->
device->post_send(qp, send_wr, bad_send_wr);
1575 static inline int ib_post_recv(
struct ib_qp *qp,
1579 return qp->
device->post_recv(qp, recv_wr, bad_recv_wr);
1641 return cq->
device->poll_cq(cq, num_entries, wc);
1683 static inline int ib_req_notify_cq(
struct ib_cq *cq,
1686 return cq->
device->req_notify_cq(cq, flags);
1696 static inline int ib_req_ncomp_notif(
struct ib_cq *cq,
int wc_cnt)
1698 return cq->
device->req_ncomp_notif ?
1699 cq->
device->req_ncomp_notif(cq, wc_cnt) :
1723 return dev->
dma_ops->mapping_error(dev, dma_addr);
1734 static inline u64 ib_dma_map_single(
struct ib_device *dev,
1739 return dev->
dma_ops->map_single(dev, cpu_addr, size, direction);
1750 static inline void ib_dma_unmap_single(
struct ib_device *dev,
1755 dev->
dma_ops->unmap_single(dev, addr, size, direction);
1760 static inline u64 ib_dma_map_single_attrs(
struct ib_device *dev,
1761 void *cpu_addr,
size_t size,
1769 static inline void ib_dma_unmap_single_attrs(
struct ib_device *dev,
1770 u64 addr,
size_t size,
1786 static inline u64 ib_dma_map_page(
struct ib_device *dev,
1793 return dev->
dma_ops->map_page(dev, page, offset, size, direction);
1804 static inline void ib_dma_unmap_page(
struct ib_device *dev,
1805 u64 addr,
size_t size,
1809 dev->
dma_ops->unmap_page(dev, addr, size, direction);
1821 static inline int ib_dma_map_sg(
struct ib_device *dev,
1826 return dev->
dma_ops->map_sg(dev, sg, nents, direction);
1837 static inline void ib_dma_unmap_sg(
struct ib_device *dev,
1842 dev->
dma_ops->unmap_sg(dev, sg, nents, direction);
1847 static inline int ib_dma_map_sg_attrs(
struct ib_device *dev,
1855 static inline void ib_dma_unmap_sg_attrs(
struct ib_device *dev,
1867 static inline u64 ib_sg_dma_address(
struct ib_device *dev,
1871 return dev->
dma_ops->dma_address(dev, sg);
1880 static inline unsigned int ib_sg_dma_len(
struct ib_device *dev,
1884 return dev->
dma_ops->dma_len(dev, sg);
1895 static inline void ib_dma_sync_single_for_cpu(
struct ib_device *dev,
1901 dev->
dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1913 static inline void ib_dma_sync_single_for_device(
struct ib_device *dev,
1919 dev->
dma_ops->sync_single_for_device(dev, addr, size, dir);
1931 static inline void *ib_dma_alloc_coherent(
struct ib_device *dev,
1937 return dev->
dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1955 static inline void ib_dma_free_coherent(
struct ib_device *dev,
1956 size_t size,
void *cpu_addr,
1960 dev->
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1978 int mr_access_flags,
2008 int mr_access_flags,
2052 struct ib_device *device,
int page_list_len);
2067 static inline void ib_update_fast_reg_key(
struct ib_mr *mr,
u8 newkey)
2069 mr->
lkey = (mr->
lkey & 0xffffff00) | newkey;
2070 mr->
rkey = (mr->
rkey & 0xffffff00) | newkey;
2088 static inline int ib_bind_mw(
struct ib_qp *qp,
2093 return mw->
device->bind_mw ?
2094 mw->
device->bind_mw(qp, mw, mw_bind) :
2114 int mr_access_flags,
2124 static inline int ib_map_phys_fmr(
struct ib_fmr *
fmr,
2128 return fmr->
device->map_phys_fmr(fmr, page_list, list_len, iova);