36 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/mii.h>
44 #include <linux/if_vlan.h>
48 #include <linux/tcp.h>
54 #include <linux/slab.h>
60 #include <asm/byteorder.h>
66 #define C2_MIN_PAGESIZE 1024
68 #define C2_MAX_MRS 32768
69 #define C2_MAX_QPS 16000
70 #define C2_MAX_WQE_SZ 256
71 #define C2_MAX_QP_WR ((128*1024)/C2_MAX_WQE_SZ)
73 #define C2_MAX_SGE_RD 1
74 #define C2_MAX_CQS 32768
75 #define C2_MAX_CQES 4096
76 #define C2_MAX_PDS 16384
81 static int c2_adapter_init(
struct c2_dev *c2dev)
86 memset(&wr, 0,
sizeof(wr));
105 static void c2_adapter_term(
struct c2_dev *c2dev)
135 wr.hdr.context = (
unsigned long) vq_req;
155 err = c2_errno(reply);
232 wr->
hdr.context = (
unsigned long) vq_req;
261 err = c2_errno(reply);
294 wr->
hdr.context = (
unsigned long) vq_req;
323 err = c2_errno(reply);
337 static int c2_rnic_open(
struct c2_dev *c2dev)
345 if (vq_req ==
NULL) {
351 wr.rnic_open.req.hdr.context = (
unsigned long) (vq_req);
354 wr.rnic_open.req.user_context = (
unsigned long) c2dev;
375 if ((err = c2_errno(reply)) != 0) {
391 static int c2_rnic_close(
struct c2_dev *c2dev)
399 if (vq_req ==
NULL) {
405 wr.rnic_close.req.hdr.context = (
unsigned long) vq_req;
427 if ((err = c2_errno(reply)) != 0) {
493 &c2dev->
req_vq.shared_dma,
496 &c2dev->
rep_vq.shared_dma,
501 !c2dev->
rep_vq.shared || !c2dev->
aeq.shared) {
506 mmio_regs = c2dev->
kva;
527 pr_debug(
"%s rep_vq va %p dma %llx\n", __func__, q1_pages,
528 (
unsigned long long) c2dev->
rep_vq.host_dma);
548 pr_debug(
"%s aeq va %p dma %llx\n", __func__, q2_pages,
549 (
unsigned long long) c2dev->
aeq.host_dma);
568 err = c2_adapter_init(c2dev);
574 err = c2_rnic_open(c2dev);
579 if (c2_rnic_query(c2dev, &c2dev->
props))
592 c2_rnic_close(c2dev);
597 c2dev->
aeq.q_size * c2dev->
aeq.msg_size,
618 c2_rnic_close(c2dev);
621 c2_adapter_term(c2dev);
637 c2dev->
aeq.q_size * c2dev->
aeq.msg_size,
638 c2dev->
aeq.msg_pool.host,
644 c2dev->
rep_vq.msg_pool.host,