29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/tcp.h>
34 #include <linux/udp.h>
36 #include <linux/list.h>
37 #include <linux/slab.h>
38 #include <linux/if_ether.h>
40 #include <linux/reboot.h>
42 #include <asm/kexec.h>
44 #include <linux/prefetch.h>
59 static int msg_level = -1;
64 static int use_mcs = 1;
65 static int prop_carrier_state;
77 "port to stack. 1:yes, 0:no. Default = 0 ");
79 "[2^x - 1], x = [6..14]. Default = "
82 "[2^x - 1], x = [6..14]. Default = "
85 "[2^x - 1], x = [6..14]. Default = "
88 "[2^x - 1], x = [6..14]. Default = "
90 MODULE_PARM_DESC(use_mcs,
" Multiple receive queues, 1: enable, 0: disable, "
93 static int port_name_cnt;
95 static unsigned long ehea_driver_flags;
109 .compatible =
"IBM,lhea",
115 static struct of_platform_driver ehea_driver = {
119 .of_match_table = ehea_device_table,
121 .probe = ehea_probe_adapter,
122 .remove = ehea_remove,
128 unsigned char *
deb = adr;
129 for (x = 0; x < len; x += 16) {
130 pr_info(
"%s adr=%p ofs=%04x %016llx %016llx\n",
131 msg, deb, x, *((
u64 *)&deb[0]), *((
u64 *)&deb[8]));
142 static void ehea_update_firmware_handles(
void)
146 int num_adapters = 0;
150 int num_fw_handles,
k,
l;
173 if (num_fw_handles) {
174 arr = kcalloc(num_fw_handles,
sizeof(*arr),
GFP_KERNEL);
181 if (num_adapters == 0)
195 arr[i++].
fwh = pr->
qp->fw_handle;
201 arr[i++].
fwh = pr->
eq->fw_handle;
208 arr[i++].
fwh = port->
qp_eq->fw_handle;
213 arr[i++].
fwh = adapter->
neq->fw_handle;
215 if (adapter->
mr.handle) {
217 arr[i++].
fwh = adapter->
mr.handle;
223 kfree(ehea_fw_handles.arr);
224 ehea_fw_handles.arr = arr;
225 ehea_fw_handles.num_entries =
i;
230 static void ehea_update_bcmc_registrations(
void)
236 int num_registrations = 0;
244 for (k = 0; k < EHEA_MAX_PORTS; k++) {
250 num_registrations += 2;
253 num_registrations += 2;
256 if (num_registrations) {
257 arr = kcalloc(num_registrations,
sizeof(*arr),
GFP_ATOMIC);
270 if (num_registrations == 0)
284 num_registrations -= 2;
288 if (num_registrations == 0)
306 num_registrations -= 2;
312 kfree(ehea_bcmc_regs.arr);
313 ehea_bcmc_regs.arr = arr;
314 ehea_bcmc_regs.num_entries =
i;
316 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
322 struct ehea_port *port = netdev_priv(dev);
327 rx_packets += port->
port_res[
i].rx_packets;
357 netdev_err(dev,
"No mem for cb2. Some interface statistics were not updated\n");
364 if (hret != H_SUCCESS) {
365 netdev_err(dev,
"query_ehea_port failed\n");
370 ehea_dump(cb2,
sizeof(*cb2),
"net_device_stats");
386 int max_index_mask = pr->
rq1_skba.len - 1;
387 int fill_wqes = pr->
rq1_skba.os_skbs + nr_of_wqes;
400 for (i = 0; i < fill_wqes; i++) {
401 if (!skb_arr_rq1[index]) {
402 skb_arr_rq1[
index] = netdev_alloc_skb(dev,
404 if (!skb_arr_rq1[index]) {
405 netdev_info(dev,
"Unable to allocate enough skb in the array\n");
411 index &= max_index_mask;
419 ehea_update_rq1a(pr->
qp, adder);
422 static void ehea_init_fill_rq1(
struct ehea_port_res *pr,
int nr_rq1a)
429 netdev_err(dev,
"NR_RQ1A bigger than skb array len\n");
433 for (i = 0; i < nr_rq1a; i++) {
435 if (!skb_arr_rq1[i]) {
436 netdev_info(dev,
"Not enough memory to allocate skb array\n");
441 ehea_update_rq1a(pr->
qp, i - 1);
446 int num_wqes,
int wqe_type,
int packet_size)
452 int i,
index, max_index_mask, fill_wqes;
456 fill_wqes = q_skba->
os_skbs + num_wqes;
464 index = q_skba->
index;
465 max_index_mask = q_skba->
len - 1;
466 for (i = 0; i < fill_wqes; i++) {
470 skb = netdev_alloc_skb_ip_align(dev, packet_size);
474 netdev_info(pr->
port->netdev,
475 "rq%i ran dry - no mem for skb\n",
484 if (tmp_addr == -1) {
491 rwqe = ehea_get_next_rwqe(qp, rq_nr);
495 rwqe->
sg_list[0].vaddr = tmp_addr;
496 rwqe->
sg_list[0].len = packet_size;
500 index &= max_index_mask;
511 ehea_update_rq2a(pr->
qp, adder);
513 ehea_update_rq3a(pr->
qp, adder);
519 static int ehea_refill_rq2(
struct ehea_port_res *pr,
int nr_of_wqes)
521 return ehea_refill_rq_def(pr, &pr->
rq2_skba, 2,
527 static int ehea_refill_rq3(
struct ehea_port_res *pr,
int nr_of_wqes)
529 return ehea_refill_rq_def(pr, &pr->
rq3_skba, 3,
534 static inline int ehea_check_cqe(
struct ehea_cqe *
cqe,
int *rq_num)
545 static inline void ehea_fill_skb(
struct net_device *dev,
562 skb_record_rx_queue(skb, pr - &pr->
port->port_res[0]);
565 static inline struct sk_buff *get_skb_by_index(
struct sk_buff **skb_array,
582 pref = (skb_array[
x]->
data);
589 skb = skb_array[skb_index];
590 skb_array[skb_index] =
NULL;
594 static inline struct sk_buff *get_skb_by_index_ll(
struct sk_buff **skb_array,
595 int arr_len,
int wqe_index)
609 pref = (skb_array[
x]->
data);
614 skb = skb_array[wqe_index];
615 skb_array[wqe_index] =
NULL;
620 struct ehea_cqe *cqe,
int *processed_rq2,
636 }
else if (rq == 3) {
644 pr_err(
"Critical receive error for QP %d. Resetting port.\n",
645 pr->
qp->init_attr.qp_nr);
648 ehea_schedule_port_reset(pr->
port);
655 static int ehea_proc_rwqes(
struct net_device *dev,
666 int skb_arr_rq1_len = pr->
rq1_skba.len;
667 int skb_arr_rq2_len = pr->
rq2_skba.len;
668 int skb_arr_rq3_len = pr->
rq3_skba.len;
669 int processed, processed_rq1, processed_rq2, processed_rq3;
670 u64 processed_bytes = 0;
671 int wqe_index, last_wqe_index,
rq, port_reset;
673 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
676 cqe = ehea_poll_rq1(qp, &wqe_index);
677 while ((processed < budget) && cqe) {
684 last_wqe_index = wqe_index;
686 if (!ehea_check_cqe(cqe, &rq)) {
689 skb = get_skb_by_index_ll(skb_arr_rq1,
694 "LL rq1: skb=NULL\n");
696 skb = netdev_alloc_skb(dev,
699 netdev_err(dev,
"Not enough memory to allocate skb\n");
703 skb_copy_to_linear_data(skb, ((
char *)cqe) + 64,
705 ehea_fill_skb(dev, skb, cqe, pr);
706 }
else if (rq == 2) {
708 skb = get_skb_by_index(skb_arr_rq2,
709 skb_arr_rq2_len, cqe);
715 ehea_fill_skb(dev, skb, cqe, pr);
719 skb = get_skb_by_index(skb_arr_rq3,
720 skb_arr_rq3_len, cqe);
726 ehea_fill_skb(dev, skb, cqe, pr);
730 processed_bytes += skb->
len;
733 __vlan_hwaccel_put_tag(skb, cqe->
vlan_tag);
737 pr->
p_stats.poll_receive_errors++;
738 port_reset = ehea_treat_poll_error(pr, rq, cqe,
744 cqe = ehea_poll_rq1(qp, &wqe_index);
750 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
751 ehea_refill_rq2(pr, processed_rq2);
752 ehea_refill_rq3(pr, processed_rq3);
757 #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
759 static void reset_sq_restart_flag(
struct ehea_port *port)
770 static void check_sqs(
struct ehea_port *port)
780 swqe = ehea_get_swqe(pr->
qp, &swqe_index);
790 ehea_post_swqe(pr->
qp, swqe);
797 pr_err(
"HW/SW queues out of sync\n");
798 ehea_schedule_port_reset(pr->
port);
810 int quota = my_quota;
815 pr - &pr->
port->port_res[0]);
817 cqe = ehea_poll_cq(send_cq);
818 while (cqe && (quota > 0)) {
819 ehea_inc_cq(send_cq);
831 pr_err(
"Bad send completion status=0x%04X\n",
835 ehea_dump(cqe,
sizeof(*cqe),
"Send CQE");
838 pr_err(
"Resetting port\n");
839 ehea_schedule_port_reset(pr->
port);
859 cqe = ehea_poll_cq(send_cq);
862 ehea_update_feca(send_cq, cqe_counter);
865 if (
unlikely(netif_tx_queue_stopped(txq) &&
868 if (netif_tx_queue_stopped(txq) &&
870 netif_tx_wake_queue(txq);
871 __netif_tx_unlock(txq);
879 #define EHEA_POLL_MAX_CQES 65535
892 rx += ehea_proc_rwqes(dev, pr, budget - rx);
894 while (rx != budget) {
901 cqe = ehea_poll_rq1(pr->
qp, &wqe_index);
902 cqe_skb = ehea_poll_cq(pr->
send_cq);
904 if (!cqe && !cqe_skb)
907 if (!napi_reschedule(napi))
911 rx += ehea_proc_rwqes(dev, pr, budget - rx);
917 #ifdef CONFIG_NET_POLL_CONTROLLER
918 static void ehea_netpoll(
struct net_device *dev)
920 struct ehea_port *port = netdev_priv(dev);
924 napi_schedule(&port->
port_res[i].napi);
932 napi_schedule(&pr->
napi);
950 pr_err(
"QP aff_err: entry=0x%llx, token=0x%x\n",
951 eqe->
entry, qp_token);
969 pr_err(
"Resetting port\n");
970 ehea_schedule_port_reset(port);
982 if (adapter->
port[i])
983 if (adapter->
port[i]->logical_port_id == logical_port)
984 return adapter->
port[
i];
997 pr_err(
"no mem for cb0\n");
1006 if (hret != H_SUCCESS) {
1014 if (!is_valid_ether_addr((
u8 *)&port->
mac_addr)) {
1068 ehea_dump(cb0,
sizeof(*cb0),
"ehea_sense_port_attr");
1082 pr_err(
"no mem for cb4\n");
1094 if (hret == H_SUCCESS) {
1101 if (hret == H_SUCCESS) {
1133 pr_err(
"Failed sensing port speed\n");
1137 if (hret == H_AUTHORITY) {
1138 pr_info(
"Hypervisor denied setting port speed\n");
1142 pr_err(
"Failed setting port speed\n");
1163 port = ehea_get_port(adapter, portnum);
1170 netdev_err(dev,
"unknown portnum %x\n", portnum);
1175 if (!netif_carrier_ok(dev)) {
1178 netdev_err(dev,
"failed resensing port attributes\n");
1183 "Logical port up: %dMbps %s Duplex\n",
1189 netif_wake_queue(dev);
1192 if (netif_carrier_ok(dev)) {
1194 "Logical port down\n");
1196 netif_tx_disable(dev);
1202 "Physical port up\n");
1203 if (prop_carrier_state)
1208 "Physical port down\n");
1209 if (prop_carrier_state)
1215 "External switch port is primary port\n");
1218 "External switch port is backup port\n");
1222 netdev_err(dev,
"Adapter malfunction\n");
1225 netdev_info(dev,
"Port malfunction\n");
1227 netif_tx_disable(dev);
1230 netdev_err(dev,
"unknown event code %x, eqe=0x%llX\n", ec, eqe);
1235 static void ehea_neq_tasklet(
unsigned long data)
1246 ehea_parse_eqe(adapter, eqe->
entry);
1256 adapter->
neq->fw_handle, event_mask);
1259 static irqreturn_t ehea_interrupt_neq(
int irq,
void *param)
1272 ehea_init_fill_rq1(pr, pr->
rq1_skba.len);
1281 static int ehea_reg_interrupts(
struct net_device *dev)
1283 struct ehea_port *port = netdev_priv(dev);
1292 ehea_qp_aff_irq_handler,
1295 netdev_err(dev,
"failed registering irq for qp_aff_irq_handler:ist=%X\n",
1296 port->
qp_eq->attr.ist1);
1301 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1302 port->
qp_eq->attr.ist1);
1308 "%s-queue%d", dev->
name, i);
1310 ehea_recv_irq_handler,
1314 netdev_err(dev,
"failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1315 i, pr->
eq->attr.ist1);
1319 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1320 pr->
eq->attr.ist1, i);
1340 static void ehea_free_interrupts(
struct net_device *dev)
1342 struct ehea_port *port = netdev_priv(dev);
1352 "free send irq for res %d with handle 0x%X\n",
1353 i, pr->
eq->attr.ist1);
1359 "associated event interrupt for handle 0x%X freed\n",
1360 port->
qp_eq->attr.ist1);
1363 static int ehea_configure_port(
struct ehea_port *port)
1382 for (i = 0; i < port->
num_mcs; i++)
1388 port->
port_res[0].qp->init_attr.qp_nr;
1391 ehea_dump(cb0,
sizeof(*cb0),
"ehea_configure_port");
1400 if (hret != H_SUCCESS)
1429 pr_err(
"Generating SMRS failed\n");
1442 static int ehea_init_q_skba(
struct ehea_q_skb_arr *q_skba,
int max_q_entries)
1444 int arr_size =
sizeof(
void *) * max_q_entries;
1450 q_skba->
len = max_q_entries;
1482 pr_err(
"create_eq failed (eq)\n");
1490 pr_err(
"create_cq failed (cq_recv)\n");
1498 pr_err(
"create_cq failed (cq_send)\n");
1503 pr_info(
"Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1504 pr->
send_cq->attr.act_nr_of_cqes,
1505 pr->
recv_cq->attr.act_nr_of_cqes);
1507 init_attr = kzalloc(
sizeof(*init_attr),
GFP_KERNEL);
1510 pr_err(
"no mem for ehea_qp_init_attr\n");
1535 pr_err(
"create_qp failed\n");
1541 pr_info(
"QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1558 if (ehea_gen_smrs(pr) != 0) {
1600 for (i = 0; i < pr->
rq1_skba.len; i++)
1602 dev_kfree_skb(pr->
rq1_skba.arr[i]);
1604 for (i = 0; i < pr->
rq2_skba.len; i++)
1606 dev_kfree_skb(pr->
rq2_skba.arr[i]);
1608 for (i = 0; i < pr->
rq3_skba.len; i++)
1610 dev_kfree_skb(pr->
rq3_skba.arr[i]);
1612 for (i = 0; i < pr->
sq_skba.len; i++)
1614 dev_kfree_skb(pr->
sq_skba.arr[i]);
1620 ret = ehea_rem_smrs(pr);
1625 static void write_swqe2_immediate(
struct sk_buff *skb,
struct ehea_swqe *swqe,
1628 int skb_data_size = skb_headlen(skb);
1635 if (skb_is_gso(skb)) {
1637 swqe->
mss = skb_shinfo(skb)->gso_size;
1642 immediate_len =
ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1646 skb_copy_from_linear_data(skb, imm_data, immediate_len);
1649 if (skb_data_size > immediate_len) {
1651 sg1entry->
len = skb_data_size - immediate_len;
1657 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1662 static inline void write_swqe2_data(
struct sk_buff *skb,
struct net_device *dev,
1667 int nfrags, sg1entry_contains_frag_data,
i;
1669 nfrags = skb_shinfo(skb)->nr_frags;
1670 sg1entry = &swqe->
u.immdata_desc.
sg_entry;
1672 sg1entry_contains_frag_data = 0;
1674 write_swqe2_immediate(skb, swqe, lkey);
1680 frag = &skb_shinfo(skb)->frags[0];
1684 sg1entry->
len = skb_frag_size(frag);
1688 sg1entry_contains_frag_data = 1;
1691 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1693 frag = &skb_shinfo(skb)->frags[
i];
1694 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1697 sgentry->
len = skb_frag_size(frag);
1704 static int ehea_broadcast_reg_helper(
struct ehea_port *port,
u32 hcallid)
1714 reg_type, port->
mac_addr, 0, hcallid);
1715 if (hret != H_SUCCESS) {
1716 pr_err(
"%sregistering bc address failed (tagged)\n",
1717 hcallid == H_REG_BCMC ?
"" :
"de");
1726 reg_type, port->
mac_addr, 0, hcallid);
1727 if (hret != H_SUCCESS) {
1728 pr_err(
"%sregistering bc address failed (vlan)\n",
1729 hcallid == H_REG_BCMC ?
"" :
"de");
1736 static int ehea_set_mac_addr(
struct net_device *dev,
void *
sa)
1738 struct ehea_port *port = netdev_priv(dev);
1744 if (!is_valid_ether_addr(mac_addr->
sa_data)) {
1751 pr_err(
"no mem for cb0\n");
1763 if (hret != H_SUCCESS) {
1772 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1781 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1789 ehea_update_bcmc_registrations();
1796 static void ehea_promiscuous_error(
u64 hret,
int enable)
1798 if (hret == H_AUTHORITY)
1799 pr_info(
"Hypervisor denied %sabling promiscuous mode\n",
1800 enable == 1 ?
"en" :
"dis");
1802 pr_err(
"failed %sabling promiscuous mode\n",
1803 enable == 1 ?
"en" :
"dis");
1806 static void ehea_promiscuous(
struct net_device *dev,
int enable)
1808 struct ehea_port *port = netdev_priv(dev);
1817 pr_err(
"no mem for cb7\n");
1828 ehea_promiscuous_error(hret, enable);
1837 static u64 ehea_multicast_reg_helper(
struct ehea_port *port,
u64 mc_mac_addr,
1844 if (mc_mac_addr == 0)
1849 reg_type, mc_mac_addr, 0, hcallid);
1854 if (mc_mac_addr == 0)
1859 reg_type, mc_mac_addr, 0, hcallid);
1864 static int ehea_drop_multicast_list(
struct net_device *dev)
1866 struct ehea_port *port = netdev_priv(dev);
1876 hret = ehea_multicast_reg_helper(port, mc_entry->
macaddr,
1879 pr_err(
"failed deregistering mcast MAC\n");
1889 static void ehea_allmulti(
struct net_device *dev,
int enable)
1891 struct ehea_port *port = netdev_priv(dev);
1897 ehea_drop_multicast_list(dev);
1898 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1903 "failed enabling IFF_ALLMULTI\n");
1908 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1913 "failed disabling IFF_ALLMULTI\n");
1918 static void ehea_add_multicast_entry(
struct ehea_port *port,
u8 *mc_mac_addr)
1923 ehea_mcl_entry = kzalloc(
sizeof(*ehea_mcl_entry),
GFP_ATOMIC);
1924 if (!ehea_mcl_entry) {
1925 pr_err(
"no mem for mcl_entry\n");
1929 INIT_LIST_HEAD(&ehea_mcl_entry->
list);
1933 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->
macaddr,
1936 list_add(&ehea_mcl_entry->
list, &port->
mc_list->list);
1938 pr_err(
"failed registering mcast MAC\n");
1939 kfree(ehea_mcl_entry);
1943 static void ehea_set_multicast_list(
struct net_device *dev)
1945 struct ehea_port *port = netdev_priv(dev);
1952 ehea_allmulti(dev, 1);
1955 ehea_allmulti(dev, 0);
1958 ret = ehea_drop_multicast_list(dev);
1963 ehea_allmulti(dev, 1);
1967 pr_info(
"Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1973 ehea_add_multicast_entry(port, ha->
addr);
1977 ehea_update_bcmc_registrations();
1998 swqe->
ip_start = skb_network_offset(skb);
2025 xmit_common(skb, swqe);
2027 write_swqe2_data(skb, dev, swqe, lkey);
2035 xmit_common(skb, swqe);
2038 skb_copy_from_linear_data(skb, imm_data, skb->
len);
2048 struct ehea_port *port = netdev_priv(dev);
2055 pr = &port->
port_res[skb_get_queue_mapping(skb)];
2056 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2058 swqe = ehea_get_swqe(pr->
qp, &swqe_index);
2073 ehea_xmit3(skb, dev, swqe);
2095 ehea_xmit2(skb, dev, swqe, lkey);
2101 "post swqe on QP %d\n", pr->
qp->init_attr.qp_nr);
2106 netif_tx_stop_queue(txq);
2110 ehea_post_swqe(pr->
qp, swqe);
2114 netif_tx_stop_queue(txq);
2120 static int ehea_vlan_rx_add_vid(
struct net_device *dev,
unsigned short vid)
2122 struct ehea_port *port = netdev_priv(dev);
2131 pr_err(
"no mem for cb1\n");
2138 if (hret != H_SUCCESS) {
2139 pr_err(
"query_ehea_port failed\n");
2149 if (hret != H_SUCCESS) {
2150 pr_err(
"modify_ehea_port failed\n");
2158 static int ehea_vlan_rx_kill_vid(
struct net_device *dev,
unsigned short vid)
2160 struct ehea_port *port = netdev_priv(dev);
2169 pr_err(
"no mem for cb1\n");
2176 if (hret != H_SUCCESS) {
2177 pr_err(
"query_ehea_port failed\n");
2187 if (hret != H_SUCCESS) {
2188 pr_err(
"modify_ehea_port failed\n");
2212 if (hret != H_SUCCESS) {
2213 pr_err(
"query_ehea_qp failed (1)\n");
2220 &dummy64, &dummy64, &dummy16, &dummy16);
2221 if (hret != H_SUCCESS) {
2222 pr_err(
"modify_ehea_qp failed (1)\n");
2228 if (hret != H_SUCCESS) {
2229 pr_err(
"query_ehea_qp failed (2)\n");
2236 &dummy64, &dummy64, &dummy16, &dummy16);
2237 if (hret != H_SUCCESS) {
2238 pr_err(
"modify_ehea_qp failed (2)\n");
2244 if (hret != H_SUCCESS) {
2245 pr_err(
"query_ehea_qp failed (3)\n");
2252 &dummy64, &dummy64, &dummy16, &dummy16);
2253 if (hret != H_SUCCESS) {
2254 pr_err(
"modify_ehea_qp failed (3)\n");
2260 if (hret != H_SUCCESS) {
2261 pr_err(
"query_ehea_qp failed (4)\n");
2271 static int ehea_port_res_setup(
struct ehea_port *port,
int def_qps)
2281 pr_err(
"ehea_create_eq failed (qp_eq)\n");
2292 pr_cfg_small_rx.max_entries_rcq = 1;
2293 pr_cfg_small_rx.max_entries_scq = sq_entries;
2294 pr_cfg_small_rx.max_entries_sq = sq_entries;
2295 pr_cfg_small_rx.max_entries_rq1 = 1;
2296 pr_cfg_small_rx.max_entries_rq2 = 1;
2297 pr_cfg_small_rx.max_entries_rq3 = 1;
2299 for (i = 0; i < def_qps; i++) {
2300 ret = ehea_init_port_res(port, &port->
port_res[i], &pr_cfg, i);
2304 for (i = def_qps; i < def_qps; i++) {
2305 ret = ehea_init_port_res(port, &port->
port_res[i],
2306 &pr_cfg_small_rx, i);
2315 ehea_clean_portres(port, &port->
port_res[i]);
2322 static int ehea_clean_all_portres(
struct ehea_port *port)
2328 ret |= ehea_clean_portres(port, &port->
port_res[i]);
2335 static void ehea_remove_adapter_mr(
struct ehea_adapter *adapter)
2343 static int ehea_add_adapter_mr(
struct ehea_adapter *adapter)
2354 struct ehea_port *port = netdev_priv(dev);
2359 ret = ehea_port_res_setup(port, port->
num_def_qps);
2361 netdev_err(dev,
"port_res_failed\n");
2366 ret = ehea_configure_port(port);
2368 netdev_err(dev,
"ehea_configure_port failed. ret:%d\n", ret);
2372 ret = ehea_reg_interrupts(dev);
2374 netdev_err(dev,
"reg_interrupts failed. ret:%d\n", ret);
2381 netdev_err(dev,
"activate_qp failed\n");
2387 ret = ehea_fill_port_res(&port->
port_res[i]);
2389 netdev_err(dev,
"out_free_irqs\n");
2394 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2406 ehea_free_interrupts(dev);
2409 ehea_clean_all_portres(port);
2412 netdev_info(dev,
"Failed starting. ret=%i\n", ret);
2414 ehea_update_bcmc_registrations();
2415 ehea_update_firmware_handles();
2420 static void port_napi_disable(
struct ehea_port *port)
2425 napi_disable(&port->
port_res[i].napi);
2428 static void port_napi_enable(
struct ehea_port *port)
2433 napi_enable(&port->
port_res[i].napi);
2439 struct ehea_port *port = netdev_priv(dev);
2443 netif_info(port, ifup, dev,
"enabling port\n");
2447 port_napi_enable(port);
2448 netif_tx_start_all_queues(dev);
2461 struct ehea_port *port = netdev_priv(dev);
2466 ehea_drop_multicast_list(dev);
2467 ehea_allmulti(dev, 0);
2468 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2470 ehea_free_interrupts(dev);
2474 ehea_update_bcmc_registrations();
2476 ret = ehea_clean_all_portres(port);
2478 netdev_info(dev,
"Failed freeing resources. ret=%i\n", ret);
2480 ehea_update_firmware_handles();
2488 struct ehea_port *port = netdev_priv(dev);
2490 netif_info(port, ifdown, dev,
"disabling port\n");
2496 netif_tx_stop_all_queues(dev);
2497 port_napi_disable(port);
2498 ret = ehea_down(dev);
2504 static void ehea_purge_sq(
struct ehea_qp *orig_qp)
2513 swqe = ehea_get_swqe(&qp, &wqe_index);
2518 static void ehea_flush_sq(
struct ehea_port *port)
2532 pr_err(
"WARNING: sq not flushed completely\n");
2538 static int ehea_stop_qps(
struct net_device *dev)
2540 struct ehea_port *port = netdev_priv(dev);
2567 if (hret != H_SUCCESS) {
2568 pr_err(
"query_ehea_qp failed (1)\n");
2578 &dummy64, &dummy16, &dummy16);
2579 if (hret != H_SUCCESS) {
2580 pr_err(
"modify_ehea_qp failed (1)\n");
2587 if (hret != H_SUCCESS) {
2588 pr_err(
"query_ehea_qp failed (2)\n");
2593 dret = ehea_rem_smrs(pr);
2595 pr_err(
"unreg shared memory region failed\n");
2622 rwqe = ehea_get_next_rwqe(&qp, 2);
2625 skb = skba_rq2[
index];
2631 rwqe = ehea_get_next_rwqe(&qp, 3);
2634 skb = skba_rq3[
index];
2640 static int ehea_restart_qps(
struct net_device *dev)
2642 struct ehea_port *port = netdev_priv(dev);
2662 ret = ehea_gen_smrs(pr);
2664 netdev_err(dev,
"creation of shared memory regions failed\n");
2668 ehea_update_rqs(qp, pr);
2674 if (hret != H_SUCCESS) {
2675 netdev_err(dev,
"query_ehea_qp failed (1)\n");
2685 &dummy64, &dummy16, &dummy16);
2686 if (hret != H_SUCCESS) {
2687 netdev_err(dev,
"modify_ehea_qp failed (1)\n");
2694 if (hret != H_SUCCESS) {
2695 netdev_err(dev,
"query_ehea_qp failed (2)\n");
2700 ehea_refill_rq1(pr, pr->
rq1_skba.index, 0);
2701 ehea_refill_rq2(pr, 0);
2702 ehea_refill_rq3(pr, 0);
2710 static void ehea_reset_port(
struct work_struct *work)
2720 netif_tx_disable(dev);
2722 port_napi_disable(port);
2730 ehea_set_multicast_list(dev);
2734 port_napi_enable(port);
2736 netif_tx_wake_all_queues(dev);
2742 static void ehea_rereg_mrs(
void)
2747 pr_info(
"LPAR memory changed - re-initializing driver\n");
2763 netif_tx_disable(dev);
2764 ehea_flush_sq(port);
2765 ret = ehea_stop_qps(dev);
2770 port_napi_disable(port);
2773 reset_sq_restart_flag(port);
2779 pr_err(
"unregister MR failed - driver inoperable!\n");
2787 if (adapter->active_ports) {
2791 pr_err(
"register MR failed - driver inoperable!\n");
2804 ret = ehea_restart_qps(dev);
2807 port_napi_enable(port);
2808 netif_tx_wake_all_queues(dev);
2810 netdev_err(dev,
"Unable to restart QPS\n");
2817 pr_info(
"re-initializing driver complete\n");
2822 static void ehea_tx_watchdog(
struct net_device *dev)
2824 struct ehea_port *port = netdev_priv(dev);
2826 if (netif_carrier_ok(dev) &&
2828 ehea_schedule_port_reset(port);
2831 static int ehea_sense_adapter_attr(
struct ehea_adapter *adapter)
2845 if (hret != H_SUCCESS) {
2859 static int ehea_get_jumboframe_status(
struct ehea_port *port,
int *jumbo)
2870 pr_err(
"no mem for cb4\n");
2878 if (hret == H_SUCCESS) {
2890 if (hret == H_SUCCESS)
2915 of_node_put(port->
ofdev.dev.of_node);
2923 port->
ofdev.dev.of_node = of_node_get(dn);
2928 port->
ofdev.dev.release = logical_port_release;
2932 pr_err(
"failed to register device. ret=%d\n", ret);
2938 pr_err(
"failed to register attributes, ret=%d\n", ret);
2939 goto out_unreg_of_dev;
2942 return &port->
ofdev.dev;
2950 static void ehea_unregister_port(
struct ehea_port *port)
2957 .ndo_open = ehea_open,
2958 .ndo_stop = ehea_stop,
2959 .ndo_start_xmit = ehea_start_xmit,
2960 #ifdef CONFIG_NET_POLL_CONTROLLER
2961 .ndo_poll_controller = ehea_netpoll,
2963 .ndo_get_stats64 = ehea_get_stats64,
2964 .ndo_set_mac_address = ehea_set_mac_addr,
2966 .ndo_set_rx_mode = ehea_set_multicast_list,
2967 .ndo_change_mtu = ehea_change_mtu,
2968 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
2969 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
2970 .ndo_tx_timeout = ehea_tx_watchdog,
2991 port = netdev_priv(dev);
3006 goto out_free_ethdev;
3009 INIT_LIST_HEAD(&port->
mc_list->list);
3013 goto out_free_mc_list;
3015 netif_set_real_num_rx_queues(dev, port->
num_def_qps);
3018 port_dev = ehea_register_port(port, dn);
3020 goto out_free_mc_list;
3049 pr_err(
"register_netdev failed. ret=%d\n", ret);
3050 goto out_unreg_port;
3053 ret = ehea_get_jumboframe_status(port, &jumbo);
3055 netdev_err(dev,
"failed determining jumbo frame status\n");
3057 netdev_info(dev,
"Jumbo frames are %sabled\n",
3058 jumbo == 1 ?
"en" :
"dis");
3065 ehea_unregister_port(port);
3074 pr_err(
"setting up logical port with id=%d failed, ret=%d\n",
3075 logical_port_id, ret);
3079 static void ehea_shutdown_single_port(
struct ehea_port *port)
3086 ehea_unregister_port(port);
3092 static int ehea_setup_ports(
struct ehea_adapter *adapter)
3097 const u32 *dn_log_port_id;
3100 lhea_dn = adapter->
ofdev->dev.of_node;
3105 if (!dn_log_port_id) {
3106 pr_err(
"bad device node: eth_dn name=%s\n",
3111 if (ehea_add_adapter_mr(adapter)) {
3112 pr_err(
"creating MR failed\n");
3113 of_node_put(eth_dn);
3117 adapter->
port[
i] = ehea_setup_single_port(adapter,
3120 if (adapter->
port[i])
3121 netdev_info(adapter->
port[i]->netdev,
3122 "logical port id #%d\n", *dn_log_port_id);
3124 ehea_remove_adapter_mr(adapter);
3132 u32 logical_port_id)
3136 const u32 *dn_log_port_id;
3138 lhea_dn = adapter->
ofdev->dev.of_node;
3144 if (*dn_log_port_id == logical_port_id)
3160 u32 logical_port_id;
3162 sscanf(buf,
"%d", &logical_port_id);
3164 port = ehea_get_port(adapter, logical_port_id);
3167 netdev_info(port->
netdev,
"adding port with logical port id=%d failed: port already configured\n",
3172 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3175 pr_info(
"no logical port with id %d found\n", logical_port_id);
3179 if (ehea_add_adapter_mr(adapter)) {
3180 pr_err(
"creating MR failed\n");
3184 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3186 of_node_put(eth_dn);
3190 if (!adapter->
port[i]) {
3195 netdev_info(port->
netdev,
"added: (logical port id=%d)\n",
3198 ehea_remove_adapter_mr(adapter);
3207 const char *buf,
size_t count)
3214 sscanf(buf,
"%d", &logical_port_id);
3216 port = ehea_get_port(adapter, logical_port_id);
3219 netdev_info(port->
netdev,
"removed: (logical port id=%d)\n",
3222 ehea_shutdown_single_port(port);
3225 if (adapter->
port[i] == port) {
3230 pr_err(
"removing port with logical port id=%d failed. port not configured.\n",
3235 ehea_remove_adapter_mr(adapter);
3264 const u64 *adapter_handle;
3268 if (!dev || !dev->
dev.of_node) {
3269 pr_err(
"Invalid ibmebus device probed\n");
3273 adapter = kzalloc(
sizeof(*adapter),
GFP_KERNEL);
3276 dev_err(&dev->
dev,
"no mem for ehea_adapter\n");
3280 list_add(&adapter->
list, &adapter_list);
3287 adapter->
handle = *adapter_handle;
3290 dev_err(&dev->
dev,
"failed getting handle for adapter"
3291 " '%s'\n", dev->
dev.of_node->full_name);
3303 ret = ehea_sense_adapter_attr(adapter);
3305 dev_err(&dev->
dev,
"sense_adapter_attr failed: %d\n", ret);
3311 if (!adapter->
neq) {
3318 (
unsigned long)adapter);
3320 ret = ehea_create_device_sysfs(dev);
3324 ret = ehea_setup_ports(adapter);
3327 goto out_rem_dev_sysfs;
3332 "ehea_neq", adapter);
3334 dev_err(&dev->
dev,
"requesting NEQ IRQ failed\n");
3335 goto out_shutdown_ports;
3346 if (adapter->
port[i]) {
3347 ehea_shutdown_single_port(adapter->
port[i]);
3352 ehea_remove_device_sysfs(dev);
3362 ehea_update_firmware_handles();
3373 if (adapter->
port[i]) {
3374 ehea_shutdown_single_port(adapter->
port[i]);
3378 ehea_remove_device_sysfs(dev);
3384 ehea_remove_adapter_mr(adapter);
3388 ehea_update_firmware_handles();
3393 static void ehea_crash_handler(
void)
3397 if (ehea_fw_handles.arr)
3398 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3400 ehea_fw_handles.arr[i].fwh,
3403 if (ehea_bcmc_regs.arr)
3404 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3406 ehea_bcmc_regs.arr[i].port_id,
3407 ehea_bcmc_regs.arr[i].reg_type,
3408 ehea_bcmc_regs.arr[i].macaddr,
3413 unsigned long action,
void *data)
3415 int ret = NOTIFY_BAD;
3422 pr_info(
"memory offlining canceled");
3425 pr_info(
"memory is going online");
3432 pr_info(
"memory is going offline");
3442 ehea_update_firmware_handles();
3451 .notifier_call = ehea_mem_notifier,
3455 unsigned long action,
void *
unused)
3458 pr_info(
"Reboot: freeing all eHEA resources\n");
3465 .notifier_call = ehea_reboot_notifier,
3468 static int check_module_parm(
void)
3474 pr_info(
"Bad parameter: rq1_entries\n");
3479 pr_info(
"Bad parameter: rq2_entries\n");
3484 pr_info(
"Bad parameter: rq3_entries\n");
3489 pr_info(
"Bad parameter: sq_entries\n");
3503 ehea_show_capabilities,
NULL);
3505 static int __init ehea_module_init(
void)
3511 memset(&ehea_fw_handles, 0,
sizeof(ehea_fw_handles));
3512 memset(&ehea_bcmc_regs, 0,
sizeof(ehea_bcmc_regs));
3517 ret = check_module_parm();
3527 pr_info(
"failed registering reboot notifier\n");
3531 pr_info(
"failed registering memory remove notifier\n");
3535 pr_info(
"failed registering crash handler\n");
3539 pr_err(
"failed registering eHEA device driver on ebus\n");
3544 &driver_attr_capabilities);
3546 pr_err(
"failed to register capabilities attribute, ret=%d\n",
3563 static void __exit ehea_module_exit(
void)
3572 pr_info(
"failed unregistering crash handler\n");
3574 kfree(ehea_fw_handles.arr);
3575 kfree(ehea_bcmc_regs.arr);