11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
18 #include <linux/tcp.h>
21 #include <linux/ethtool.h>
87 #define EFX_MAX_MTU (9 * 1024)
109 static unsigned int separate_tx_channels;
112 "Use separate channels for TX and RX");
117 static int napi_weight = 64;
124 static unsigned int efx_monitor_interval = 1 *
HZ;
132 static unsigned int rx_irq_mod_usec = 60;
143 static unsigned int tx_irq_mod_usec = 150;
150 static unsigned int interrupt_mode;
159 static unsigned int rss_cpus;
161 MODULE_PARM_DESC(rss_cpus,
"Number of CPUs to use for Receive-Side Scaling");
163 static int phy_flash_cfg;
167 static unsigned irq_adapt_low_thresh = 8000;
170 "Threshold score for reducing IRQ moderation");
172 static unsigned irq_adapt_high_thresh = 16000;
175 "Threshold score for increasing IRQ moderation");
190 static void efx_start_interrupts(
struct efx_nic *efx,
bool may_keep_eventq);
191 static void efx_stop_interrupts(
struct efx_nic *efx,
bool may_keep_eventq);
193 static void efx_remove_channels(
struct efx_nic *efx);
195 static void efx_remove_port(
struct efx_nic *efx);
197 static void efx_fini_napi(
struct efx_nic *efx);
199 static void efx_fini_struct(
struct efx_nic *efx);
200 static void efx_start_all(
struct efx_nic *efx);
201 static void efx_stop_all(
struct efx_nic *efx);
203 #define EFX_ASSERT_RESET_SERIALISED(efx) \
205 if ((efx->state == STATE_READY) || \
206 (efx->state == STATE_DISABLED)) \
210 static int efx_check_disabled(
struct efx_nic *efx)
214 "device is disabled due to earlier errors\n");
241 if (spent && efx_channel_has_rx_queue(channel)) {
243 efx_channel_get_rx_queue(channel);
265 static inline void efx_channel_processed(
struct efx_channel *channel)
289 "channel %d NAPI poll executing on CPU %d\n",
292 spent = efx_process_channel(channel, budget);
294 if (spent < budget) {
295 if (efx_channel_has_rx_queue(channel) &&
299 irq_adapt_low_thresh)) {
302 efx->
type->push_irq_moderation(channel);
305 irq_adapt_high_thresh)) {
309 efx->
type->push_irq_moderation(channel);
316 efx_filter_rfs_expire(channel);
324 efx_channel_processed(channel);
360 efx_process_channel(channel, channel->
eventq_mask + 1);
364 efx_channel_processed(channel);
377 static int efx_probe_eventq(
struct efx_channel *channel)
383 "chan %d create event queue\n", channel->
channel);
395 static void efx_init_eventq(
struct efx_channel *channel)
398 "chan %d init event queue\n", channel->
channel);
406 static void efx_start_eventq(
struct efx_channel *channel)
409 "chan %d start event queue\n", channel->
channel);
424 static void efx_stop_eventq(
struct efx_channel *channel)
433 static void efx_fini_eventq(
struct efx_channel *channel)
436 "chan %d fini event queue\n", channel->
channel);
441 static void efx_remove_eventq(
struct efx_channel *channel)
444 "chan %d remove event queue\n", channel->
channel);
464 channel = kzalloc(
sizeof(*channel),
GFP_KERNEL);
470 channel->
type = &efx_default_channel_type;
475 tx_queue->
queue = i * EFX_TXQ_TYPES +
j;
482 (
unsigned long)rx_queue);
491 efx_copy_channel(
const struct efx_channel *old_channel)
502 *channel = *old_channel;
519 (
unsigned long)rx_queue);
524 static int efx_probe_channel(
struct efx_channel *channel)
531 "creating channel %d\n", channel->
channel);
533 rc = channel->
type->pre_probe(channel);
537 rc = efx_probe_eventq(channel);
558 efx_remove_channel(channel);
563 efx_get_channel_name(
struct efx_channel *channel,
char *
buf,
size_t len)
581 static void efx_set_channel_names(
struct efx_nic *efx)
586 channel->type->get_name(channel,
587 efx->channel_name[channel->channel],
588 sizeof(efx->channel_name[0]));
597 efx->next_buffer_table = 0;
605 rc = efx_probe_channel(channel);
608 "failed to create channel %d\n",
613 efx_set_channel_names(efx);
618 efx_remove_channels(efx);
626 static void efx_start_datapath(
struct efx_nic *efx)
638 efx->
type->rx_buffer_hash_size +
639 efx->
type->rx_buffer_padding);
670 if (netif_device_present(efx->
net_dev))
671 netif_tx_wake_all_queues(efx->
net_dev);
674 static void efx_stop_datapath(
struct efx_nic *efx)
695 "Resetting to recover from flush failure\n");
701 "successfully flushed all queues\n");
712 if (efx_channel_has_rx_queue(channel)) {
713 efx_stop_eventq(channel);
714 efx_start_eventq(channel);
729 netif_dbg(channel->efx, drv, channel->efx->net_dev,
730 "destroy chan %d\n", channel->channel);
736 efx_remove_eventq(channel);
737 channel->type->post_remove(channel);
745 efx_remove_channel(channel);
752 u32 old_rxq_entries, old_txq_entries;
753 unsigned i, next_buffer_table = 0;
756 rc = efx_check_disabled(efx);
767 if (channel->
type->copy)
769 next_buffer_table =
max(next_buffer_table,
773 next_buffer_table =
max(next_buffer_table,
774 rx_queue->
rxd.index +
775 rx_queue->
rxd.entries);
777 next_buffer_table =
max(next_buffer_table,
778 tx_queue->
txd.index +
779 tx_queue->
txd.entries);
783 efx_stop_interrupts(efx,
true);
786 memset(other_channel, 0,
sizeof(other_channel));
787 for (i = 0; i < efx->n_channels; i++) {
789 if (channel->
type->copy)
790 channel = channel->
type->copy(channel);
799 old_rxq_entries = efx->rxq_entries;
800 old_txq_entries = efx->txq_entries;
801 efx->rxq_entries = rxq_entries;
802 efx->txq_entries = txq_entries;
803 for (i = 0; i < efx->n_channels; i++) {
805 efx->channel[
i] = other_channel[
i];
810 efx->next_buffer_table = next_buffer_table;
812 for (i = 0; i < efx->n_channels; i++) {
814 if (!channel->
type->copy)
816 rc = efx_probe_channel(channel);
819 efx_init_napi_channel(efx->channel[i]);
824 for (i = 0; i < efx->n_channels; i++) {
825 channel = other_channel[
i];
826 if (channel && channel->
type->copy) {
827 efx_fini_napi_channel(channel);
828 efx_remove_channel(channel);
833 efx_start_interrupts(efx,
true);
839 efx->rxq_entries = old_rxq_entries;
840 efx->txq_entries = old_txq_entries;
841 for (i = 0; i < efx->n_channels; i++) {
843 efx->channel[
i] = other_channel[
i];
857 .get_name = efx_get_channel_name,
858 .copy = efx_copy_channel,
859 .keep_eventq =
false,
889 if (!netif_running(efx->
net_dev))
892 if (link_state->
up != netif_carrier_ok(efx->
net_dev)) {
904 "link up at %uMbps %s-duplex (MTU %d)%s\n",
905 link_state->
speed, link_state->
fd ?
"full" :
"half",
940 static void efx_fini_port(
struct efx_nic *efx);
957 netif_addr_lock_bh(efx->
net_dev);
958 netif_addr_unlock_bh(efx->
net_dev);
967 rc = efx->
type->reconfigure_port(efx);
999 efx->
type->reconfigure_mac(efx);
1003 static int efx_probe_port(
struct efx_nic *efx)
1013 rc = efx->
type->probe_port(efx);
1023 static int efx_init_port(
struct efx_nic *efx)
1031 rc = efx->
phy_op->init(efx);
1039 efx->
type->reconfigure_mac(efx);
1042 rc = efx->
phy_op->reconfigure(efx);
1056 static void efx_start_port(
struct efx_nic *efx)
1066 efx->
type->reconfigure_mac(efx);
1072 static void efx_stop_port(
struct efx_nic *efx)
1081 netif_addr_lock_bh(efx->
net_dev);
1082 netif_addr_unlock_bh(efx->
net_dev);
1085 static void efx_fini_port(
struct efx_nic *efx)
1099 static void efx_remove_port(
struct efx_nic *efx)
1103 efx->
type->remove_port(efx);
1113 static int efx_init_io(
struct efx_nic *efx)
1124 "failed to enable PCI device\n");
1135 while (dma_mask > 0x7fffffffUL) {
1145 "could not find a suitable DMA mask\n");
1149 "using DMA mask %llx\n", (
unsigned long long) dma_mask);
1157 "failed to set consistent DMA mask\n");
1165 "request for memory BAR failed\n");
1170 efx->
type->mem_map_size);
1173 "could not map memory BAR at %llx+%x\n",
1175 efx->
type->mem_map_size);
1180 "memory BAR at %llx+%x (virtual %p)\n",
1196 static void efx_fini_io(
struct efx_nic *efx)
1213 static unsigned int efx_wanted_parallelism(
struct efx_nic *efx)
1224 "RSS disabled due to allocation failure\n");
1232 cpumask_or(thread_mask, thread_mask,
1237 free_cpumask_var(thread_mask);
1243 if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
1244 count > efx_vf_size(efx)) {
1246 "Reducing number of RSS channels from %u to %u for "
1247 "VF support. Increase vf-msix-limit to use more "
1248 "channels on the PF.\n",
1249 count, efx_vf_size(efx));
1250 count = efx_vf_size(efx);
1257 efx_init_rx_cpu_rmap(
struct efx_nic *efx,
struct msix_entry *xentries)
1259 #ifdef CONFIG_RFS_ACCEL
1264 if (!efx->
net_dev->rx_cpu_rmap)
1267 rc = irq_cpu_rmap_add(efx->
net_dev->rx_cpu_rmap,
1268 xentries[i].vector);
1270 free_irq_cpu_rmap(efx->
net_dev->rx_cpu_rmap);
1282 static int efx_probe_interrupts(
struct efx_nic *efx)
1284 unsigned int max_channels =
1286 unsigned int extra_channels = 0;
1296 unsigned int n_channels;
1298 n_channels = efx_wanted_parallelism(efx);
1299 if (separate_tx_channels)
1301 n_channels += extra_channels;
1302 n_channels =
min(n_channels, max_channels);
1304 for (i = 0; i < n_channels; i++)
1305 xentries[i].
entry = i;
1309 "WARNING: Insufficient MSI-X vectors"
1310 " available (%d < %u).\n", rc, n_channels);
1312 "WARNING: Performance may be reduced.\n");
1321 if (n_channels > extra_channels)
1322 n_channels -= extra_channels;
1323 if (separate_tx_channels) {
1332 rc = efx_init_rx_cpu_rmap(efx, xentries);
1338 efx_get_channel(efx, i)->irq =
1344 "could not enable MSI-X\n");
1353 rc = pci_enable_msi(efx->
pci_dev);
1355 efx_get_channel(efx, 0)->irq = efx->
pci_dev->irq;
1358 "could not enable MSI\n");
1365 efx->
n_channels = 1 + (separate_tx_channels ? 1 : 0);
1381 efx_get_channel(efx, j)->type =
1394 static void efx_start_interrupts(
struct efx_nic *efx,
bool may_keep_eventq)
1405 if (!channel->
type->keep_eventq || !may_keep_eventq)
1406 efx_init_eventq(channel);
1407 efx_start_eventq(channel);
1413 static void efx_stop_interrupts(
struct efx_nic *efx,
bool may_keep_eventq)
1432 efx_stop_eventq(channel);
1433 if (!channel->
type->keep_eventq || !may_keep_eventq)
1434 efx_fini_eventq(channel);
1438 static void efx_remove_interrupts(
struct efx_nic *efx)
1449 efx->legacy_irq = 0;
1457 efx->tx_channel_offset =
1458 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1465 if (channel->
channel < efx->n_rx_channels)
1471 tx_queue->
queue -= (efx->tx_channel_offset *
1481 netif_dbg(efx, probe, efx->net_dev,
"creating NIC\n");
1484 rc = efx->type->probe(efx);
1490 rc = efx_probe_interrupts(efx);
1494 efx->type->dimension_resources(efx);
1496 if (efx->n_channels > 1)
1498 for (i = 0; i <
ARRAY_SIZE(efx->rx_indir_table); i++)
1499 efx->rx_indir_table[i] =
1500 ethtool_rxfh_indir_default(i, efx->rss_spread);
1502 efx_set_channels(efx);
1504 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1513 efx->type->remove(efx);
1517 static void efx_remove_nic(
struct efx_nic *efx)
1521 efx_remove_interrupts(efx);
1522 efx->
type->remove(efx);
1531 static int efx_probe_all(
struct efx_nic *efx)
1535 rc = efx_probe_nic(efx);
1541 rc = efx_probe_port(efx);
1557 "failed to create filter tables\n");
1561 rc = efx_probe_channels(efx);
1570 efx_remove_port(efx);
1572 efx_remove_nic(efx);
1584 static void efx_start_all(
struct efx_nic *efx)
1594 efx_start_port(efx);
1595 efx_start_datapath(efx);
1602 efx_monitor_interval);
1605 if (efx->
phy_op->poll(efx))
1610 efx->
type->start_stats(efx);
1616 static void efx_flush_all(
struct efx_nic *efx)
1630 static void efx_stop_all(
struct efx_nic *efx)
1638 efx->
type->stop_stats(efx);
1646 netif_tx_disable(efx->
net_dev);
1648 efx_stop_datapath(efx);
1651 static void efx_remove_all(
struct efx_nic *efx)
1653 efx_remove_channels(efx);
1655 efx_remove_port(efx);
1656 efx_remove_nic(efx);
1665 static unsigned int irq_mod_ticks(
unsigned int usecs,
unsigned int quantum_ns)
1669 if (usecs * 1000 < quantum_ns)
1671 return usecs * 1000 / quantum_ns;
1676 unsigned int rx_usecs,
bool rx_adaptive,
1677 bool rx_may_override_tx)
1683 unsigned int tx_ticks;
1684 unsigned int rx_ticks;
1688 if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
1695 !rx_may_override_tx) {
1697 "RX and TX IRQ moderation must be equal\n");
1704 if (efx_channel_has_rx_queue(channel))
1706 else if (efx_channel_has_tx_queues(channel))
1714 unsigned int *rx_usecs,
bool *rx_adaptive)
1730 *tx_usecs = *rx_usecs;
1751 "hardware monitor executing on CPU %d\n",
1760 efx->
type->monitor(efx);
1765 efx_monitor_interval);
1779 struct efx_nic *efx = netdev_priv(net_dev);
1787 (data->
phy_id & 0xfc00) == 0x0400)
1799 static void efx_init_napi_channel(
struct efx_channel *channel)
1805 efx_poll, napi_weight);
1808 static void efx_init_napi(
struct efx_nic *efx)
1813 efx_init_napi_channel(channel);
1818 if (channel->napi_dev)
1820 channel->napi_dev =
NULL;
1823 static void efx_fini_napi(
struct efx_nic *efx)
1828 efx_fini_napi_channel(channel);
1837 #ifdef CONFIG_NET_POLL_CONTROLLER
1843 static void efx_netpoll(
struct net_device *net_dev)
1845 struct efx_nic *efx = netdev_priv(net_dev);
1849 efx_schedule_channel(channel);
1861 static int efx_net_open(
struct net_device *net_dev)
1863 struct efx_nic *efx = netdev_priv(net_dev);
1869 rc = efx_check_disabled(efx);
1890 static int efx_net_stop(
struct net_device *net_dev)
1892 struct efx_nic *efx = netdev_priv(net_dev);
1907 struct efx_nic *efx = netdev_priv(net_dev);
1912 efx->
type->update_stats(efx);
1942 static void efx_watchdog(
struct net_device *net_dev)
1944 struct efx_nic *efx = netdev_priv(net_dev);
1947 "TX stuck with port_enabled=%d: resetting channels\n",
1955 static int efx_change_mtu(
struct net_device *net_dev,
int new_mtu)
1957 struct efx_nic *efx = netdev_priv(net_dev);
1960 rc = efx_check_disabled(efx);
1971 net_dev->
mtu = new_mtu;
1972 efx->
type->reconfigure_mac(efx);
1979 static int efx_set_mac_address(
struct net_device *net_dev,
void *data)
1981 struct efx_nic *efx = netdev_priv(net_dev);
1983 char *new_addr = addr->
sa_data;
1985 if (!is_valid_ether_addr(new_addr)) {
1987 "invalid ethernet MAC address requested: %pM\n",
1997 efx->
type->reconfigure_mac(efx);
2004 static void efx_set_rx_mode(
struct net_device *net_dev)
2006 struct efx_nic *efx = netdev_priv(net_dev);
2016 memset(mc_hash, 0xff,
sizeof(*mc_hash));
2018 memset(mc_hash, 0x00,
sizeof(*mc_hash));
2022 __set_bit_le(bit, mc_hash);
2029 __set_bit_le(0xff, mc_hash);
2039 struct efx_nic *efx = netdev_priv(net_dev);
2049 .ndo_open = efx_net_open,
2050 .ndo_stop = efx_net_stop,
2051 .ndo_get_stats64 = efx_net_stats,
2052 .ndo_tx_timeout = efx_watchdog,
2055 .ndo_do_ioctl = efx_ioctl,
2056 .ndo_change_mtu = efx_change_mtu,
2057 .ndo_set_mac_address = efx_set_mac_address,
2058 .ndo_set_rx_mode = efx_set_rx_mode,
2059 .ndo_set_features = efx_set_features,
2060 #ifdef CONFIG_SFC_SRIOV
2066 #ifdef CONFIG_NET_POLL_CONTROLLER
2067 .ndo_poll_controller = efx_netpoll,
2070 #ifdef CONFIG_RFS_ACCEL
2071 .ndo_rx_flow_steer = efx_filter_rfs,
2075 static void efx_update_name(
struct efx_nic *efx)
2079 efx_set_channel_names(efx);
2087 if (net_dev->
netdev_ops == &efx_netdev_ops &&
2089 efx_update_name(netdev_priv(net_dev));
2095 .notifier_call = efx_netdev_event,
2106 static int efx_register_netdev(
struct efx_nic *efx)
2128 "aborting probe due to scheduled reset\n");
2136 efx_update_name(efx);
2156 "failed to init net dev attributes\n");
2157 goto fail_registered;
2164 unregister_netdevice(net_dev);
2172 static void efx_unregister_netdev(
struct efx_nic *efx)
2194 unregister_netdevice(efx->net_dev);
2212 efx_stop_interrupts(efx,
false);
2216 efx->phy_op->fini(efx);
2217 efx->type->fini(efx);
2231 rc = efx->
type->init(efx);
2241 rc = efx->
phy_op->init(efx);
2244 if (efx->
phy_op->reconfigure(efx))
2246 "could not restore PHY settings\n");
2249 efx->
type->reconfigure_mac(efx);
2251 efx_start_interrupts(efx,
false);
2285 rc = efx->
type->reset(efx, method);
2326 static void efx_reset_work(
struct work_struct *data)
2360 method = efx->
type->map_reset_reason(type);
2362 "scheduling %s reset for %s\n",
2419 static bool efx_port_dummy_op_poll(
struct efx_nic *efx)
2427 .poll = efx_port_dummy_op_poll,
2440 static int efx_init_struct(
struct efx_nic *efx,
2441 struct pci_dev *pci_dev,
struct net_device *net_dev)
2447 #ifdef CONFIG_SFC_MTD
2448 INIT_LIST_HEAD(&efx->mtd_list);
2461 efx->
phy_op = &efx_dummy_phy_operations;
2462 efx->
mdio.dev = net_dev;
2488 efx_fini_struct(efx);
2492 static void efx_fini_struct(
struct efx_nic *efx)
2514 static void efx_pci_remove_main(
struct efx_nic *efx)
2522 #ifdef CONFIG_RFS_ACCEL
2523 free_irq_cpu_rmap(efx->
net_dev->rx_cpu_rmap);
2526 efx_stop_interrupts(efx,
false);
2529 efx->
type->fini(efx);
2531 efx_remove_all(efx);
2537 static void efx_pci_remove(
struct pci_dev *pci_dev)
2541 efx = pci_get_drvdata(pci_dev);
2548 efx_stop_interrupts(efx,
false);
2552 efx_unregister_netdev(efx);
2556 efx_pci_remove_main(efx);
2561 efx_fini_struct(efx);
2562 pci_set_drvdata(pci_dev,
NULL);
2571 #define SFC_VPD_LEN 512
2572 static void efx_print_product_vpd(
struct efx_nic *efx)
2574 struct pci_dev *dev = efx->
pci_dev;
2580 vpd_size =
pci_read_vpd(dev, 0,
sizeof(vpd_data), vpd_data);
2581 if (vpd_size <= 0) {
2593 j = pci_vpd_lrdt_size(&vpd_data[i]);
2595 if (i + j > vpd_size)
2605 j = pci_vpd_info_field_size(&vpd_data[i]);
2607 if (i + j > vpd_size) {
2613 "Part Number : %.*s\n", j, &vpd_data[i]);
2620 static int efx_pci_probe_main(
struct efx_nic *efx)
2625 rc = efx_probe_all(efx);
2631 rc = efx->
type->init(efx);
2634 "failed to initialise NIC\n");
2638 rc = efx_init_port(efx);
2641 "failed to initialise port\n");
2648 efx_start_interrupts(efx,
false);
2655 efx->
type->fini(efx);
2658 efx_remove_all(efx);
2672 static int __devinit efx_pci_probe(
struct pci_dev *pci_dev,
2684 efx = netdev_priv(net_dev);
2697 pci_set_drvdata(pci_dev, efx);
2699 rc = efx_init_struct(efx, pci_dev, net_dev);
2704 "Solarflare NIC detected\n");
2706 efx_print_product_vpd(efx);
2709 rc = efx_init_io(efx);
2713 rc = efx_pci_probe_main(efx);
2717 rc = efx_register_netdev(efx);
2724 "SR-IOV can't be enabled rc %d\n", rc);
2734 "failed to create MTDs (%d)\n", rc);
2739 efx_pci_remove_main(efx);
2743 efx_fini_struct(efx);
2745 pci_set_drvdata(pci_dev,
NULL);
2752 static int efx_pm_freeze(
struct device *dev)
2764 efx_stop_interrupts(efx,
false);
2772 static int efx_pm_thaw(
struct device *dev)
2779 efx_start_interrupts(efx,
false);
2782 efx->
phy_op->reconfigure(efx);
2791 efx->
type->resume_wol(efx);
2802 static int efx_pm_poweroff(
struct device *dev)
2805 struct efx_nic *efx = pci_get_drvdata(pci_dev);
2807 efx->
type->fini(efx);
2816 static int efx_pm_resume(
struct device *dev)
2819 struct efx_nic *efx = pci_get_drvdata(pci_dev);
2833 rc = efx->
type->init(efx);
2840 static int efx_pm_suspend(
struct device *dev)
2845 rc = efx_pm_poweroff(dev);
2851 static const struct dev_pm_ops efx_pm_ops = {
2852 .suspend = efx_pm_suspend,
2853 .resume = efx_pm_resume,
2854 .freeze = efx_pm_freeze,
2855 .thaw = efx_pm_thaw,
2856 .poweroff = efx_pm_poweroff,
2857 .restore = efx_pm_resume,
2861 .name = KBUILD_MODNAME,
2862 .id_table = efx_pci_table,
2863 .probe = efx_pci_probe,
2864 .remove = efx_pci_remove,
2865 .driver.pm = &efx_pm_ops,
2876 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2878 static int __init efx_init_module(
void)
2893 if (!reset_workqueue) {
2898 rc = pci_register_driver(&efx_pci_driver);
2914 static void __exit efx_exit_module(
void)