10 #include <linux/module.h>
22 #define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
25 #define VF_MAX_RX_QUEUES 63
163 #define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \
164 ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid))
165 #define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \
166 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
167 (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
168 #define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \
169 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
170 (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
172 #define EFX_FIELD_MASK(_field) \
173 ((1 << _field ## _WIDTH) - 1)
176 static unsigned int vf_max_tx_channels = 2;
179 "Limit the number of TX channels VFs can use");
181 static int max_vfs = -1;
184 "Reduce the number of VFs initialized by the driver");
198 unsigned *vi_scale_out,
unsigned *vf_total_out)
202 unsigned vi_scale, vf_total;
217 vf_total =
MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL);
218 vi_scale =
MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE);
223 *vi_scale_out = vi_scale;
225 *vf_total_out = vf_total;
235 FRF_CZ_USREV_DIS, enabled ? 0 : 1,
236 FRF_CZ_DFLT_EVQ, efx->vfdi_channel->
channel);
245 u32 from_rid, from_hi, from_lo;
261 while (count-- > 0) {
309 static void efx_sriov_reset_tx_filter(
struct efx_vf *
vf)
324 if (is_zero_ether_addr(vf->
addr.mac_addr))
334 efx_filter_init_tx(&
filter, abs_index(vf, 0));
343 "Unable to migrate tx filter for vf %s\n",
353 static void efx_sriov_reset_rx_filter(
struct efx_vf *vf)
383 "Unable to insert rx filter for vf %s\n",
392 static void __efx_sriov_update_vf_addr(
struct efx_vf *vf)
394 efx_sriov_reset_tx_filter(vf);
395 efx_sriov_reset_rx_filter(vf);
404 static void __efx_sriov_push_vf_status(
struct efx_vf *vf)
420 memset(copy,
'\0',
sizeof(copy));
432 copy[1].from_rid = efx->
pci_dev->devfn;
433 copy[1].from_addr = efx->vfdi_status.dma_addr +
data_offset;
450 copy[
pos].from_addr = epp->
addr;
456 efx_sriov_memcpy(efx, copy,
ARRAY_SIZE(copy));
468 efx_sriov_memcpy(efx, copy, pos + 1);
480 static void efx_sriov_bufs(
struct efx_nic *efx,
unsigned offset,
488 FRF_AZ_BUF_ADR_REGION, 0,
490 addr ? addr[pos] >> 12 : 0,
491 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
497 static bool bad_vf_index(
struct efx_nic *efx,
unsigned index)
499 return index >= efx_vf_size(efx);
502 static bool bad_buf_count(
unsigned buf_count,
unsigned max_entry_count)
504 unsigned max_buf_count = max_entry_count *
507 return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count);
513 static bool map_vi_index(
struct efx_nic *efx,
unsigned abs_index,
514 struct efx_vf **vf_out,
unsigned *rel_index_out)
520 vf_i = (abs_index -
EFX_VI_BASE) / efx_vf_size(efx);
521 if (vf_i >= efx->vf_init_count)
525 *vf_out = efx->vf + vf_i;
527 *rel_index_out = abs_index % efx_vf_size(efx);
531 static int efx_vfdi_init_evq(
struct efx_vf *vf)
536 unsigned buf_count = req->
u.
init_evq.buf_count;
537 unsigned abs_evq = abs_index(vf, vf_evq);
541 if (bad_vf_index(efx, vf_evq) ||
545 "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n",
550 efx_sriov_bufs(efx, buftbl, req->
u.
init_evq.addr, buf_count);
553 FRF_CZ_TIMER_Q_EN, 1,
554 FRF_CZ_HOST_NOTIFY_MODE, 0,
559 FRF_AZ_EVQ_SIZE,
__ffs(buf_count),
560 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
565 buf_count *
sizeof(
u64));
572 static int efx_vfdi_init_rxq(
struct efx_vf *vf)
578 unsigned buf_count = req->
u.
init_rxq.buf_count;
583 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
588 "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d "
589 "buf_count %d\n", vf->
pci_name, vf_rxq,
595 efx_sriov_bufs(efx, buftbl, req->
u.
init_rxq.addr, buf_count);
599 FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl,
600 FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
601 FRF_AZ_RX_DESCQ_LABEL, label,
602 FRF_AZ_RX_DESCQ_SIZE,
__ffs(buf_count),
603 FRF_AZ_RX_DESCQ_JUMBO,
606 FRF_AZ_RX_DESCQ_EN, 1);
608 abs_index(vf, vf_rxq));
613 static int efx_vfdi_init_txq(
struct efx_vf *vf)
619 unsigned buf_count = req->
u.
init_txq.buf_count;
621 unsigned label, eth_filt_en;
624 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) ||
625 vf_txq >= vf_max_tx_channels ||
629 "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d "
630 "buf_count %d\n", vf->
pci_name, vf_txq,
639 efx_sriov_bufs(efx, buftbl, req->
u.
init_txq.addr, buf_count);
645 FRF_CZ_TX_DPT_Q_MASK_WIDTH,
min(efx->vi_scale, 1
U),
646 FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en,
647 FRF_AZ_TX_DESCQ_EN, 1,
648 FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl,
649 FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
650 FRF_AZ_TX_DESCQ_LABEL, label,
651 FRF_AZ_TX_DESCQ_SIZE,
__ffs(buf_count),
652 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
654 abs_index(vf, vf_txq));
660 static bool efx_vfdi_flush_wake(
struct efx_vf *vf)
669 static void efx_vfdi_flush_clear(
struct efx_vf *vf)
679 static int efx_vfdi_fini_all_queues(
struct efx_vf *vf)
683 unsigned count = efx_vf_size(efx);
686 unsigned index, rxqs_count;
707 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
708 FRF_AZ_TX_FLUSH_DESCQ,
713 rxqs[rxqs_count++] =
cpu_to_le32(vf_offset + index);
719 rxqs_count *
sizeof(*rxqs),
NULL, 0,
NULL);
723 efx_vfdi_flush_wake(vf),
755 efx_vfdi_flush_clear(vf);
762 static int efx_vfdi_insert_filter(
struct efx_vf *vf)
772 "ERROR: Invalid INSERT_FILTER from %s: rxq %d "
773 "flags 0x%x\n", vf->
pci_name, vf_rxq,
787 efx_sriov_reset_rx_filter(vf);
793 static int efx_vfdi_remove_all_filters(
struct efx_vf *vf)
796 efx_sriov_reset_rx_filter(vf);
802 static int efx_vfdi_set_status_page(
struct efx_vf *vf)
815 "ERROR: Invalid SET_STATUS_PAGE from %s\n",
834 page_count *
sizeof(
u64));
839 __efx_sriov_push_vf_status(vf);
846 static int efx_vfdi_clear_status_page(
struct efx_vf *vf)
877 memset(copy,
'\0',
sizeof(copy));
878 copy[0].from_rid = vf->
pci_rid;
880 copy[0].to_rid = efx->
pci_dev->devfn;
881 copy[0].to_addr = vf->
buf.dma_addr;
883 rc = efx_sriov_memcpy(efx, copy, 1);
888 "ERROR: Unable to fetch VFDI request from %s rc %d\n",
895 rc = vfdi_ops[req->
op](
vf);
898 "vfdi request %d from %s ok\n",
903 "ERROR: Unrecognised request %d from VF %s addr "
917 memset(copy,
'\0',
sizeof(copy));
918 copy[0].from_buf = &req->
rc;
921 copy[0].length =
sizeof(req->
rc);
922 copy[1].from_buf = &req->
op;
925 copy[1].length =
sizeof(req->
op);
941 unsigned int pos,
count,
k, buftbl, abs_evq;
959 memcpy(buffer->
addr + pos, &event,
sizeof(event));
964 for (k = 0; k <
count; k++) {
965 copy_req[
k].from_buf =
NULL;
966 copy_req[
k].from_rid = efx->
pci_dev->devfn;
967 copy_req[
k].from_addr = buffer->
dma_addr;
972 rc = efx_sriov_memcpy(efx, copy_req, count);
976 "ERROR: Unable to notify %s of reset"
983 abs_evq = abs_index(vf, 0);
988 FRF_CZ_TIMER_Q_EN, 1,
989 FRF_CZ_HOST_NOTIFY_MODE, 0,
995 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
1003 static void efx_sriov_reset_vf_work(
struct work_struct *work)
1010 efx_sriov_reset_vf(vf, &buf);
1015 static void efx_sriov_handle_no_channel(
struct efx_nic *efx)
1018 "ERROR: IOV requires MSI-X and 1 additional interrupt"
1019 "vector. IOV disabled\n");
1036 .handle_no_channel = efx_sriov_handle_no_channel,
1037 .pre_probe = efx_sriov_probe_channel,
1039 .get_name = efx_sriov_get_channel_name,
1041 .keep_eventq =
true,
1051 if (efx_sriov_cmd(efx,
false, &efx->vi_scale, &count))
1053 if (count > 0 && count > max_vfs)
1057 efx->vf_count =
count;
1075 unsigned int peer_space;
1076 unsigned int peer_count;
1082 INIT_LIST_HEAD(&
pages);
1083 list_splice_tail_init(&efx->local_page_list, &
pages);
1088 peer = vfdi_status->
peers + 1;
1091 for (pos = 0; pos < efx->vf_count; ++
pos) {
1110 if (--peer_space == 0) {
1111 if (list_empty(&
pages)) {
1116 &efx->
pci_dev->dev, EFX_PAGE_SIZE,
1137 while (!list_empty(&
pages)) {
1147 for (pos = 0; pos < efx->vf_count; ++
pos) {
1152 __efx_sriov_push_vf_status(vf);
1157 static void efx_sriov_free_local(
struct efx_nic *efx)
1162 while (!list_empty(&efx->local_addr_list)) {
1169 while (!list_empty(&efx->local_page_list)) {
1179 static int efx_sriov_vf_alloc(
struct efx_nic *efx)
1188 for (index = 0; index < efx->vf_count; ++
index) {
1189 vf = efx->vf +
index;
1206 static void efx_sriov_vfs_fini(
struct efx_nic *efx)
1211 for (pos = 0; pos < efx->vf_count; ++
pos) {
1223 static int efx_sriov_vfs_init(
struct efx_nic *efx)
1238 buftbl_base = efx->vf_buftbl_base;
1240 for (index = 0; index < efx->vf_count; ++
index) {
1241 vf = efx->vf +
index;
1249 "%04x:%02x:%02x.%d",
1263 efx_sriov_vfs_fini(efx);
1270 struct vfdi_status *vfdi_status;
1278 if (efx->vf_count == 0)
1281 rc = efx_sriov_cmd(efx,
true,
NULL,
NULL);
1288 vfdi_status = efx->vfdi_status.addr;
1289 memset(vfdi_status, 0,
sizeof(*vfdi_status));
1291 vfdi_status->
length =
sizeof(*vfdi_status);
1293 vfdi_status->
vi_scale = efx->vi_scale;
1298 rc = efx_sriov_vf_alloc(efx);
1303 INIT_WORK(&efx->peer_work, efx_sriov_peer_work);
1304 INIT_LIST_HEAD(&efx->local_addr_list);
1305 INIT_LIST_HEAD(&efx->local_page_list);
1307 rc = efx_sriov_vfs_init(efx);
1314 efx->vf_init_count = efx->vf_count;
1317 efx_sriov_usrev(efx,
true);
1326 "enabled SR-IOV for %d VFs, %d VI per VF\n",
1327 efx->vf_count, efx_vf_size(efx));
1331 efx_sriov_usrev(efx,
false);
1333 efx->vf_init_count = 0;
1335 efx_sriov_vfs_fini(efx);
1338 efx_sriov_free_local(efx);
1343 efx_sriov_cmd(efx,
false,
NULL,
NULL);
1353 if (efx->vf_init_count == 0)
1357 BUG_ON(efx->vfdi_channel->enabled);
1358 efx_sriov_usrev(efx,
false);
1360 efx->vf_init_count = 0;
1364 for (pos = 0; pos < efx->vf_count; ++
pos) {
1374 efx_sriov_vfs_fini(efx);
1375 efx_sriov_free_local(efx);
1378 efx_sriov_cmd(efx,
false,
NULL,
NULL);
1396 "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n",
1397 qid, seq, type, data);
1399 if (map_vi_index(efx, qid, &vf,
NULL))
1431 "ERROR: Screaming VFDI request from %s\n",
1442 if (vf_i > efx->vf_init_count)
1444 vf = efx->vf + vf_i;
1449 efx_vfdi_remove_all_filters(vf);
1450 efx_vfdi_flush_clear(vf);
1457 struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
1459 if (!efx->vf_init_count)
1472 if (map_vi_index(efx, queue, &vf, &qid))
1481 if (efx_vfdi_flush_wake(vf))
1492 FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1493 if (map_vi_index(efx, queue, &vf, &qid))
1505 if (efx_vfdi_flush_wake(vf))
1515 if (map_vi_index(efx, dmaq, &vf, &rel))
1520 "VF %d DMA Q %d reports descriptor fetch error.\n",
1534 if (efx->vf_init_count == 0)
1537 efx_sriov_usrev(efx,
true);
1543 for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
1544 vf = efx->vf + vf_i;
1545 efx_sriov_reset_vf(vf, &buf);
1558 if (!vfdi_workqueue)
1571 struct efx_nic *efx = netdev_priv(net_dev);
1574 if (vf_i >= efx->vf_init_count)
1576 vf = efx->vf + vf_i;
1580 __efx_sriov_update_vf_addr(vf);
1589 struct efx_nic *efx = netdev_priv(net_dev);
1593 if (vf_i >= efx->vf_init_count)
1595 vf = efx->vf + vf_i;
1600 __efx_sriov_update_vf_addr(vf);
1609 struct efx_nic *efx = netdev_priv(net_dev);
1613 if (vf_i >= efx->vf_init_count)
1615 vf = efx->vf + vf_i;
1633 struct efx_nic *efx = netdev_priv(net_dev);
1637 if (vf_i >= efx->vf_init_count)
1639 vf = efx->vf + vf_i;