27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
40 #include <linux/ethtool.h>
43 #include <linux/ipv6.h>
44 #include <linux/slab.h>
48 #include <asm/iommu.h>
53 static irqreturn_t ibmveth_interrupt(
int irq,
void *dev_instance);
55 static unsigned long ibmveth_get_desired_dma(
struct vio_dev *vdev);
60 static const char ibmveth_driver_name[] =
"ibmveth";
61 static const char ibmveth_driver_string[] =
"IBM Power Virtual Ethernet Driver";
62 #define ibmveth_driver_version "1.04"
72 "Maximum size of packet that is copied to a new buffer on transmit");
77 "Maximum size of packet that is copied to a new buffer on receive");
88 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
89 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
94 {
"replenish_add_buff_failure",
96 {
"replenish_add_buff_success",
118 static inline int ibmveth_rxq_pending_buffer(
struct ibmveth_adapter *adapter)
120 return ibmveth_rxq_toggle(adapter) == adapter->
rx_queue.toggle;
123 static inline int ibmveth_rxq_buffer_valid(
struct ibmveth_adapter *adapter)
128 static inline int ibmveth_rxq_frame_offset(
struct ibmveth_adapter *adapter)
133 static inline int ibmveth_rxq_frame_length(
struct ibmveth_adapter *adapter)
138 static inline int ibmveth_rxq_csum_good(
struct ibmveth_adapter *adapter)
148 pool->
size = pool_size;
152 pool->
active = pool_active;
185 for (i = 0; i < pool->
size; ++
i)
195 static inline void ibmveth_flush_buffer(
void *
addr,
unsigned long length)
200 asm(
"dcbfl %0,%1" ::
"b" (addr),
"r" (offset));
206 static void ibmveth_replenish_buffer_pool(
struct ibmveth_adapter *adapter,
211 u32 buffers_added = 0;
213 unsigned int free_index,
index;
215 unsigned long lpar_rc;
220 for (i = 0; i <
count; ++
i) {
227 "replenish: unable to allocate skb\n");
252 *(
u64 *)skb->data = correlator;
255 desc.fields.address = dma_addr;
261 ibmveth_flush_buffer(skb->data, len);
266 if (lpar_rc != H_SUCCESS) {
308 ibmveth_replenish_buffer_pool(adapter, pool);
325 for (i = 0; i < pool->
size; ++
i) {
350 static void ibmveth_remove_buffer_from_pool(
struct ibmveth_adapter *adapter,
353 unsigned int pool = correlator >> 32;
354 unsigned int index = correlator & 0xffffffff
UL;
355 unsigned int free_index;
372 free_index = adapter->
rx_buff_pool[pool].producer_index;
388 unsigned int pool = correlator >> 32;
389 unsigned int index = correlator & 0xffffffff
UL;
401 u64 correlator = adapter->
rx_queue.queue_addr[q_index].correlator;
402 unsigned int pool = correlator >> 32;
403 unsigned int index = correlator & 0xffffffff
UL;
405 unsigned long lpar_rc;
412 ibmveth_rxq_harvest_buffer(adapter);
413 ibmveth_free_buffer_pool(adapter, &adapter->
rx_buff_pool[pool]);
423 if (lpar_rc != H_SUCCESS) {
425 "during recycle rc=%ld", lpar_rc);
426 ibmveth_remove_buffer_from_pool(adapter, adapter->
rx_queue.queue_addr[adapter->
rx_queue.index].correlator);
439 static void ibmveth_rxq_harvest_buffer(
struct ibmveth_adapter *adapter)
441 ibmveth_remove_buffer_from_pool(adapter, adapter->
rx_queue.queue_addr[adapter->
rx_queue.index].correlator);
483 ibmveth_free_buffer_pool(adapter,
499 static int ibmveth_register_logical_lan(
struct ibmveth_adapter *adapter,
502 int rc, try_again = 1;
514 if (rc != H_SUCCESS && try_again) {
517 }
while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
526 static int ibmveth_open(
struct net_device *netdev)
531 unsigned long lpar_rc;
539 napi_enable(&adapter->
napi);
548 netdev_err(netdev,
"unable to allocate filter or buffer list "
554 dev = &adapter->
vdev->dev;
562 if (!adapter->
rx_queue.queue_addr) {
563 netdev_err(netdev,
"unable to allocate rx queue pages\n");
575 netdev_err(netdev,
"unable to map filter or buffer list "
582 adapter->
rx_queue.num_slots = rxq_entries;
586 mac_address = mac_address >> 16;
596 h_vio_signal(adapter->
vdev->unit_address, VIO_IRQ_DISABLE);
598 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
600 if (lpar_rc != H_SUCCESS) {
601 netdev_err(netdev,
"h_register_logical_lan failed with %ld\n",
603 netdev_err(netdev,
"buffer TCE:0x%llx filter TCE:0x%llx rxq "
604 "desc:0x%llx MAC:0x%llx\n",
616 if (ibmveth_alloc_buffer_pool(&adapter->
rx_buff_pool[i])) {
617 netdev_err(netdev,
"unable to alloc pool\n");
628 netdev_err(netdev,
"unable to request irq 0x%x, rc %d\n",
632 }
while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
640 netdev_err(netdev,
"unable to allocate bounce buffer\n");
642 goto err_out_free_irq;
648 netdev_err(netdev,
"unable to map bounce buffer\n");
650 goto err_out_free_irq;
653 netdev_dbg(netdev,
"initial replenish cycle\n");
654 ibmveth_interrupt(netdev->
irq, netdev);
656 netif_start_queue(netdev);
665 ibmveth_cleanup(adapter);
666 napi_disable(&adapter->
napi);
670 static int ibmveth_close(
struct net_device *netdev)
677 napi_disable(&adapter->
napi);
680 netif_stop_queue(netdev);
682 h_vio_signal(adapter->
vdev->unit_address, VIO_IRQ_DISABLE);
686 }
while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
688 if (lpar_rc != H_SUCCESS) {
689 netdev_err(netdev,
"h_free_logical_lan failed with %lx, "
690 "continuing with close\n", lpar_rc);
698 ibmveth_cleanup(adapter);
722 static void netdev_get_drvinfo(
struct net_device *dev,
751 unsigned long set_attr, clr_attr, ret_attr;
752 unsigned long set_attr6, clr_attr6;
753 long ret, ret4, ret6;
754 int rc1 = 0, rc2 = 0;
757 if (netif_running(dev)) {
777 ret = h_illan_attributes(adapter->
vdev->unit_address, 0, 0, &ret_attr);
782 ret4 = h_illan_attributes(adapter->
vdev->unit_address, clr_attr,
783 set_attr, &ret_attr);
785 if (ret4 != H_SUCCESS) {
786 netdev_err(dev,
"unable to change IPv4 checksum "
787 "offload settings. %d rc=%ld\n",
790 h_illan_attributes(adapter->
vdev->unit_address,
791 set_attr, clr_attr, &ret_attr);
800 ret6 = h_illan_attributes(adapter->
vdev->unit_address,
801 clr_attr6, set_attr6, &ret_attr);
803 if (ret6 != H_SUCCESS) {
804 netdev_err(dev,
"unable to change IPv6 checksum "
805 "offload settings. %d rc=%ld\n",
808 h_illan_attributes(adapter->
vdev->unit_address,
809 set_attr6, clr_attr6, &ret_attr);
817 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
823 netdev_err(dev,
"unable to change checksum offload settings."
824 " %d rc=%ld ret_attr=%lx\n", data, ret,
829 rc2 = ibmveth_open(dev);
831 return rc1 ? rc1 : rc2;
834 static int ibmveth_set_features(
struct net_device *dev,
841 if (rx_csum == adapter->
rx_csum)
844 rc = ibmveth_set_csum_offload(dev, rx_csum);
851 static void ibmveth_get_strings(
struct net_device *dev,
u32 stringset,
u8 *data)
862 static int ibmveth_get_sset_count(
struct net_device *dev,
int sset)
872 static void ibmveth_get_ethtool_stats(
struct net_device *dev,
878 for (i = 0; i <
ARRAY_SIZE(ibmveth_stats); i++)
882 static const struct ethtool_ops netdev_ethtool_ops = {
883 .get_drvinfo = netdev_get_drvinfo,
884 .get_settings = netdev_get_settings,
886 .get_strings = ibmveth_get_strings,
887 .get_sset_count = ibmveth_get_sset_count,
888 .get_ethtool_stats = ibmveth_get_ethtool_stats,
896 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
901 unsigned long correlator;
912 ret = h_send_logical_lan(adapter->
vdev->unit_address,
916 correlator, &correlator);
917 }
while ((ret == H_BUSY) && (retry_count--));
919 if (ret != H_SUCCESS && ret != H_DROPPED) {
920 netdev_err(adapter->
netdev,
"tx: h_send_logical_lan failed "
921 "with rc=%ld\n", ret);
932 unsigned int desc_flags;
935 int force_bounce = 0;
942 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
943 netdev->
stats.tx_dropped++;
955 netdev_err(netdev,
"tx: failed to checksum packet\n");
956 netdev->
stats.tx_dropped++;
963 unsigned char *
buf = skb_transport_header(skb) +
974 memset(descs, 0,
sizeof(descs));
981 if (force_bounce || (!skb_is_nonlinear(skb) &&
982 (skb->
len < tx_copybreak))) {
986 descs[0].fields.flags_len = desc_flags | skb->
len;
989 if (ibmveth_send(adapter, descs)) {
991 netdev->
stats.tx_dropped++;
993 netdev->
stats.tx_packets++;
1006 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1007 descs[0].fields.address =
dma_addr;
1010 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1013 dma_addr = skb_frag_dma_map(&adapter->
vdev->dev, frag, 0,
1017 goto map_failed_frags;
1019 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1020 descs[i+1].fields.address =
dma_addr;
1023 if (ibmveth_send(adapter, descs)) {
1025 netdev->
stats.tx_dropped++;
1027 netdev->
stats.tx_packets++;
1028 netdev->
stats.tx_bytes += skb->
len;
1032 descs[0].fields.address,
1036 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1047 for (i = 0; i < last; i++)
1049 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1053 if (!firmware_has_feature(FW_FEATURE_CMO))
1054 netdev_err(netdev,
"tx: unable to map xmit buffer\n");
1066 int frames_processed = 0;
1067 unsigned long lpar_rc;
1071 if (!ibmveth_rxq_pending_buffer(adapter))
1075 if (!ibmveth_rxq_buffer_valid(adapter)) {
1078 netdev_dbg(netdev,
"recycling invalid buffer\n");
1079 ibmveth_rxq_recycle_buffer(adapter);
1082 int length = ibmveth_rxq_frame_length(adapter);
1083 int offset = ibmveth_rxq_frame_offset(adapter);
1084 int csum_good = ibmveth_rxq_csum_good(adapter);
1086 skb = ibmveth_rxq_get_buffer(adapter);
1089 if (length < rx_copybreak)
1090 new_skb = netdev_alloc_skb(netdev, length);
1093 skb_copy_to_linear_data(new_skb,
1097 ibmveth_flush_buffer(skb->
data,
1099 if (!ibmveth_rxq_recycle_buffer(adapter))
1103 ibmveth_rxq_harvest_buffer(adapter);
1104 skb_reserve(skb, offset);
1115 netdev->
stats.rx_packets++;
1119 }
while (frames_processed < budget);
1121 ibmveth_replenish_task(adapter);
1123 if (frames_processed < budget) {
1127 lpar_rc = h_vio_signal(adapter->
vdev->unit_address,
1130 BUG_ON(lpar_rc != H_SUCCESS);
1134 if (ibmveth_rxq_pending_buffer(adapter) &&
1135 napi_reschedule(napi)) {
1136 lpar_rc = h_vio_signal(adapter->
vdev->unit_address,
1142 return frames_processed;
1145 static irqreturn_t ibmveth_interrupt(
int irq,
void *dev_instance)
1149 unsigned long lpar_rc;
1151 if (napi_schedule_prep(&adapter->
napi)) {
1152 lpar_rc = h_vio_signal(adapter->
vdev->unit_address,
1154 BUG_ON(lpar_rc != H_SUCCESS);
1160 static void ibmveth_set_multicast_list(
struct net_device *netdev)
1163 unsigned long lpar_rc;
1171 if (lpar_rc != H_SUCCESS) {
1172 netdev_err(netdev,
"h_multicast_ctrl rc=%ld when "
1173 "entering promisc mode\n", lpar_rc);
1183 if (lpar_rc != H_SUCCESS) {
1184 netdev_err(netdev,
"h_multicast_ctrl rc=%ld when "
1185 "attempting to clear filter table\n",
1191 unsigned long mcast_addr = 0;
1192 memcpy(((
char *)&mcast_addr)+2, ha->
addr, 6);
1196 if (lpar_rc != H_SUCCESS) {
1197 netdev_err(netdev,
"h_multicast_ctrl rc=%ld "
1198 "when adding an entry to the filter "
1199 "table\n", lpar_rc);
1207 if (lpar_rc != H_SUCCESS) {
1208 netdev_err(netdev,
"h_multicast_ctrl rc=%ld when "
1209 "enabling filtering\n", lpar_rc);
1214 static int ibmveth_change_mtu(
struct net_device *dev,
int new_mtu)
1220 int need_restart = 0;
1226 if (new_mtu_oh < adapter->rx_buff_pool[i].
buff_size)
1229 if (i == IBMVETH_NUM_BUFF_POOLS)
1234 if (netif_running(adapter->
netdev)) {
1237 ibmveth_close(adapter->
netdev);
1245 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1248 ibmveth_get_desired_dma
1251 return ibmveth_open(adapter->
netdev);
1257 if (need_restart && (rc = ibmveth_open(adapter->
netdev)))
1263 #ifdef CONFIG_NET_POLL_CONTROLLER
1264 static void ibmveth_poll_controller(
struct net_device *dev)
1266 ibmveth_replenish_task(netdev_priv(dev));
1267 ibmveth_interrupt(dev->
irq, dev);
1279 static unsigned long ibmveth_get_desired_dma(
struct vio_dev *vdev)
1291 adapter = netdev_priv(netdev);
1294 ret += IOMMU_PAGE_ALIGN(netdev->
mtu);
1312 .ndo_open = ibmveth_open,
1313 .ndo_stop = ibmveth_close,
1314 .ndo_start_xmit = ibmveth_start_xmit,
1315 .ndo_set_rx_mode = ibmveth_set_multicast_list,
1316 .ndo_do_ioctl = ibmveth_ioctl,
1317 .ndo_change_mtu = ibmveth_change_mtu,
1318 .ndo_fix_features = ibmveth_fix_features,
1319 .ndo_set_features = ibmveth_set_features,
1322 #ifdef CONFIG_NET_POLL_CONTROLLER
1323 .ndo_poll_controller = ibmveth_poll_controller,
1333 unsigned char *mac_addr_p;
1334 unsigned int *mcastFilterSize_p;
1336 dev_dbg(&dev->
dev,
"entering ibmveth_probe for UA 0x%x\n",
1342 dev_err(&dev->
dev,
"Can't find VETH_MAC_ADDR attribute\n");
1347 VETH_MCAST_FILTER_SIZE,
NULL);
1348 if (!mcastFilterSize_p) {
1349 dev_err(&dev->
dev,
"Can't find VETH_MCAST_FILTER_SIZE "
1359 adapter = netdev_priv(netdev);
1377 if ((*mac_addr_p & 0x3) != 0x02)
1383 netdev->
irq = dev->irq;
1398 pool_count[i], pool_size[i],
1401 &dev->
dev.kobj,
"pool%d", i);
1406 netdev_dbg(netdev,
"adapter @ 0x%p\n", adapter);
1412 netdev_dbg(netdev,
"registering netdev...\n");
1414 ibmveth_set_features(netdev, netdev->
features);
1419 netdev_dbg(netdev,
"failed to register netdev rc=%d\n", rc);
1446 static struct attribute veth_active_attr;
1457 if (attr == &veth_active_attr)
1459 else if (attr == &veth_num_attr)
1461 else if (attr == &veth_size_attr)
1467 const char *buf,
size_t count)
1478 if (attr == &veth_active_attr) {
1479 if (value && !pool->
active) {
1480 if (netif_running(netdev)) {
1481 if (ibmveth_alloc_buffer_pool(pool)) {
1483 "unable to alloc pool\n");
1488 ibmveth_close(netdev);
1490 if ((rc = ibmveth_open(netdev)))
1495 }
else if (!value && pool->
active) {
1509 if (i == IBMVETH_NUM_BUFF_POOLS) {
1510 netdev_err(netdev,
"no active pool >= MTU\n");
1514 if (netif_running(netdev)) {
1516 ibmveth_close(netdev);
1519 if ((rc = ibmveth_open(netdev)))
1524 }
else if (attr == &veth_num_attr) {
1528 if (netif_running(netdev)) {
1530 ibmveth_close(netdev);
1533 if ((rc = ibmveth_open(netdev)))
1539 }
else if (attr == &veth_size_attr) {
1543 if (netif_running(netdev)) {
1545 ibmveth_close(netdev);
1548 if ((rc = ibmveth_open(netdev)))
1557 ibmveth_interrupt(netdev->
irq, netdev);
1562 #define ATTR(_name, _mode) \
1563 struct attribute veth_##_name##_attr = { \
1564 .name = __stringify(_name), .mode = _mode, \
1568 static ATTR(num, 0644);
1571 static struct attribute *veth_pool_attrs[] = {
1578 static const struct sysfs_ops veth_pool_ops = {
1579 .show = veth_pool_show,
1580 .store = veth_pool_store,
1583 static struct kobj_type ktype_veth_pool = {
1585 .sysfs_ops = &veth_pool_ops,
1586 .default_attrs = veth_pool_attrs,
1589 static int ibmveth_resume(
struct device *dev)
1592 ibmveth_interrupt(netdev->
irq, netdev);
1597 {
"network",
"IBM,l-lan"},
1603 .resume = ibmveth_resume
1607 .id_table = ibmveth_device_table,
1608 .probe = ibmveth_probe,
1609 .remove = ibmveth_remove,
1610 .get_desired_dma = ibmveth_get_desired_dma,
1611 .name = ibmveth_driver_name,
1612 .pm = &ibmveth_pm_ops,
1615 static int __init ibmveth_module_init(
void)
1623 static void __exit ibmveth_module_exit(
void)