11 #include <linux/bitops.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
39 #define TX_DC_ENTRIES 16
40 #define TX_DC_ENTRIES_ORDER 1
42 #define RX_DC_ENTRIES 64
43 #define RX_DC_ENTRIES_ORDER 3
49 #define EFX_INT_ERROR_EXPIRE 3600
50 #define EFX_MAX_INT_ERRORS 5
53 #define EFX_RX_FLUSH_COUNT 4
56 #define _EFX_CHANNEL_MAGIC_TEST 0x000101
57 #define _EFX_CHANNEL_MAGIC_FILL 0x000102
58 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
59 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
61 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
62 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
64 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
66 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
68 efx_rx_queue_index(_rx_queue))
69 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
71 efx_rx_queue_index(_rx_queue))
72 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
85 efx_sram_writeq(efx, efx->
membase + efx->
type->buf_tbl_base,
116 return ((a->
u64[0] ^ b->
u64[0]) & mask->
u64[0]) ||
117 ((a->
u64[1] ^ b->
u64[1]) & mask->
u64[1]);
127 for (
i = 0;
i < n_regs; ++
i) {
129 mask = imask = regs[
i].
mask;
132 efx_reado(efx, &original, address);
135 for (j = 0; j < 128; j++) {
143 efx_writeo(efx, ®, address);
144 efx_reado(efx, &buf, address);
146 if (efx_masked_compare_oword(®, &buf, &mask))
153 efx_writeo(efx, ®, address);
154 efx_reado(efx, &buf, address);
156 if (efx_masked_compare_oword(®, &buf, &mask))
160 efx_writeo(efx, &original, address);
199 for (i = 0; i < buffer->
entries; i++) {
200 index = buffer->
index +
i;
203 "mapping special buffer %d at %llx\n",
204 index, (
unsigned long long)dma_addr);
206 FRF_AZ_BUF_ADR_REGION, 0,
207 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
208 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
209 efx_write_buf_tbl(efx, &buf_desc, index);
228 FRF_AZ_BUF_UPD_CMD, 0,
229 FRF_AZ_BUF_CLR_CMD, 1,
230 FRF_AZ_BUF_CLR_END_ID, end,
231 FRF_AZ_BUF_CLR_START_ID, start);
244 static int efx_alloc_special_buffer(
struct efx_nic *efx,
264 #ifdef CONFIG_SFC_SRIOV
265 BUG_ON(efx_sriov_enabled(efx) &&
270 "allocating special buffers %d-%d at %llx+%x "
271 "(virt %p phys %llx)\n", buffer->
index,
286 "deallocating special buffers %d-%d at %llx+%x "
287 "(virt %p phys %llx)\n", buffer->
index,
342 static inline void efx_notify_tx_desc(
struct efx_tx_queue *tx_queue)
354 static inline void efx_push_tx_desc(
struct efx_tx_queue *tx_queue,
365 FRF_AZ_TX_DESC_WPTR, write_ptr);
372 efx_may_push_tx_desc(
struct efx_tx_queue *tx_queue,
unsigned int write_count)
374 unsigned empty_read_count =
ACCESS_ONCE(tx_queue->empty_read_count);
376 if (empty_read_count == 0)
379 tx_queue->empty_read_count = 0;
399 buffer = &tx_queue->
buffer[write_ptr];
400 txd = efx_tx_desc(tx_queue, write_ptr);
408 FSF_AZ_TX_KER_BYTE_COUNT, buffer->
len,
409 FSF_AZ_TX_KER_BUF_REGION, 0,
410 FSF_AZ_TX_KER_BUF_ADDR, buffer->
dma_addr);
411 }
while (tx_queue->
write_count != tx_queue->insert_count);
415 if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
416 txd = efx_tx_desc(tx_queue,
417 old_write_count & tx_queue->
ptr_mask);
418 efx_push_tx_desc(tx_queue, txd);
421 efx_notify_tx_desc(tx_queue);
428 struct efx_nic *efx = tx_queue->efx;
432 return efx_alloc_special_buffer(efx, &tx_queue->
txd,
438 struct efx_nic *efx = tx_queue->efx;
442 efx_init_special_buffer(efx, &tx_queue->
txd);
446 FRF_AZ_TX_DESCQ_EN, 1,
447 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
448 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
449 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->
txd.index,
450 FRF_AZ_TX_DESCQ_EVQ_ID,
452 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
453 FRF_AZ_TX_DESCQ_LABEL, tx_queue->
queue,
454 FRF_AZ_TX_DESCQ_SIZE,
456 FRF_AZ_TX_DESCQ_TYPE, 0,
457 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
466 efx_writeo_table(efx, ®, efx->
type->txd_ptr_tbl_base,
475 __clear_bit_le(tx_queue->
queue, ®);
477 __set_bit_le(tx_queue->
queue, ®);
492 static void efx_flush_tx_queue(
struct efx_tx_queue *tx_queue)
494 struct efx_nic *efx = tx_queue->efx;
498 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
499 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->
queue);
505 struct efx_nic *efx = tx_queue->efx;
510 efx_writeo_table(efx, &tx_desc_ptr, efx->
type->txd_ptr_tbl_base,
514 efx_fini_special_buffer(efx, &tx_queue->
txd);
520 efx_free_special_buffer(tx_queue->efx, &tx_queue->
txd);
538 efx_build_rx_desc(
struct efx_rx_queue *rx_queue,
unsigned index)
543 rxd = efx_rx_desc(rx_queue, index);
546 FSF_AZ_RX_KER_BUF_SIZE,
548 rx_queue->
efx->type->rx_buffer_padding,
549 FSF_AZ_RX_KER_BUF_REGION, 0,
550 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->
dma_addr);
573 efx_rx_queue_index(rx_queue));
582 return efx_alloc_special_buffer(efx, &rx_queue->
rxd,
591 bool iscsi_digest_en = is_b0;
594 "RX queue %d ring in special buffers %d-%d\n",
595 efx_rx_queue_index(rx_queue), rx_queue->
rxd.index,
596 rx_queue->
rxd.index + rx_queue->
rxd.entries - 1);
599 efx_init_special_buffer(efx, &rx_queue->
rxd);
603 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
604 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
605 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->
rxd.index,
606 FRF_AZ_RX_DESCQ_EVQ_ID,
607 efx_rx_queue_channel(rx_queue)->channel,
608 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
609 FRF_AZ_RX_DESCQ_LABEL,
610 efx_rx_queue_index(rx_queue),
611 FRF_AZ_RX_DESCQ_SIZE,
613 FRF_AZ_RX_DESCQ_TYPE, 0 ,
615 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
616 FRF_AZ_RX_DESCQ_EN, 1);
617 efx_writeo_table(efx, &rx_desc_ptr, efx->
type->rxd_ptr_tbl_base,
618 efx_rx_queue_index(rx_queue));
621 static void efx_flush_rx_queue(
struct efx_rx_queue *rx_queue)
627 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
628 FRF_AZ_RX_FLUSH_DESCQ,
629 efx_rx_queue_index(rx_queue));
640 efx_writeo_table(efx, &rx_desc_ptr, efx->
type->rxd_ptr_tbl_base,
641 efx_rx_queue_index(rx_queue));
644 efx_fini_special_buffer(efx, &rx_queue->
rxd);
650 efx_free_special_buffer(rx_queue->
efx, &rx_queue->
rxd);
662 static bool efx_flush_wake(
struct efx_nic *efx)
684 efx->
type->prepare_flush(efx);
689 efx_flush_tx_queue(tx_queue);
703 if (efx_sriov_enabled(efx)) {
723 efx_flush_rx_queue(rx_queue);
769 efx_writed_table(efx, ®, efx->
type->evq_rptr_tbl_base,
781 drv_ev_reg.
u32[0] =
event->u32[0];
782 drv_ev_reg.
u32[1] =
event->u32[1];
783 drv_ev_reg.
u32[2] = 0;
784 drv_ev_reg.
u32[3] = 0;
795 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
807 unsigned int tx_ev_desc_ptr;
808 unsigned int tx_ev_q_label;
820 tx_queue = efx_channel_get_tx_queue(
822 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
828 tx_queue = efx_channel_get_tx_queue(
832 efx_notify_tx_desc(tx_queue);
839 "channel %d unexpected TX event "
851 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
853 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
854 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
855 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
856 bool rx_ev_other_err, rx_ev_pause_frm;
857 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
858 unsigned rx_ev_pkt_type;
865 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
867 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
869 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
877 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
878 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
879 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
885 else if (rx_ev_tobe_disc)
888 if (rx_ev_ip_hdr_chksum_err)
890 else if (rx_ev_tcp_udp_chksum_err)
901 " RX queue %d unexpected RX event "
904 rx_ev_buf_owner_id_err ?
" [OWNER_ID_ERR]" :
"",
905 rx_ev_ip_hdr_chksum_err ?
906 " [IP_HDR_CHKSUM_ERR]" :
"",
907 rx_ev_tcp_udp_chksum_err ?
908 " [TCP_UDP_CHKSUM_ERR]" :
"",
909 rx_ev_eth_crc_err ?
" [ETH_CRC_ERR]" :
"",
910 rx_ev_frm_trunc ?
" [FRM_TRUNC]" :
"",
911 rx_ev_drib_nib ?
" [DRIB_NIB]" :
"",
912 rx_ev_tobe_disc ?
" [TOBE_DISC]" :
"",
913 rx_ev_pause_frm ?
" [PAUSE]" :
"");
918 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
919 rx_ev_tobe_disc | rx_ev_pause_frm) ?
925 efx_handle_rx_bad_index(
struct efx_rx_queue *rx_queue,
unsigned index)
928 unsigned expected, dropped;
931 dropped = (index - expected) & rx_queue->
ptr_mask;
933 "dropped %d events (index=%d expected=%d)\n",
934 dropped, index, expected);
950 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
951 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
952 unsigned expected_ptr;
970 rx_queue = efx_channel_get_rx_queue(channel);
974 if (
unlikely(rx_ev_desc_ptr != expected_ptr))
975 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
977 if (
likely(rx_ev_pkt_ok)) {
985 flags = efx_handle_rx_not_ok(rx_queue, event);
990 if (rx_ev_mcast_pkt) {
991 unsigned int rx_ev_mcast_hash_match =
994 if (
unlikely(!rx_ev_mcast_hash_match)) {
1003 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
1017 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1021 efx_magic_event(tx_queue->
channel,
1042 channel = efx_get_channel(efx, qid);
1043 if (!efx_channel_has_rx_queue(channel))
1045 rx_queue = efx_channel_get_rx_queue(channel);
1049 "RXQ %d flush retry\n", qid);
1053 efx_magic_event(efx_rx_queue_channel(rx_queue),
1057 if (efx_flush_wake(efx))
1062 efx_handle_drain_event(
struct efx_channel *channel)
1068 if (efx_flush_wake(efx))
1077 efx_channel_has_rx_queue(channel) ?
1078 efx_channel_get_rx_queue(channel) :
NULL;
1093 efx_handle_drain_event(channel);
1095 efx_handle_drain_event(channel);
1107 unsigned int ev_sub_code;
1108 unsigned int ev_sub_data;
1113 switch (ev_sub_code) {
1116 channel->
channel, ev_sub_data);
1117 efx_handle_tx_flush_done(efx, event);
1122 channel->
channel, ev_sub_data);
1123 efx_handle_rx_flush_done(efx, event);
1128 "channel %d EVQ %d initialised\n",
1129 channel->
channel, ev_sub_data);
1133 "channel %d SRAM update done\n", channel->
channel);
1137 "channel %d RXQ %d wakeup event\n",
1138 channel->
channel, ev_sub_data);
1142 "channel %d RX queue %d timer expired\n",
1143 channel->
channel, ev_sub_data);
1147 "channel %d seen DRIVER RX_RESET event. "
1148 "Resetting.\n", channel->
channel);
1158 "RX DMA Q %d reports descriptor fetch error."
1159 " RX Q %d is disabled.\n", ev_sub_data,
1168 "TX DMA Q %d reports descriptor fetch error."
1169 " TX Q %d is disabled.\n", ev_sub_data,
1177 "channel %d unknown driver event code %d "
1178 "data %04x\n", channel->
channel, ev_sub_code,
1187 unsigned int read_ptr;
1196 p_event = efx_event(channel, read_ptr);
1199 if (!efx_event_present(&event))
1216 efx_handle_rx_event(channel, &event);
1217 if (++spent == budget)
1221 tx_packets += efx_handle_tx_event(channel, &event);
1228 efx_handle_generated_event(channel, &event);
1231 efx_handle_driver_event(channel, &event);
1240 if (efx->
type->handle_global_event &&
1241 efx->
type->handle_global_event(channel, &event))
1246 "channel %d unknown event type %d (data "
1262 return efx_event_present(efx_event(channel, channel->
eventq_read_ptr));
1272 return efx_alloc_special_buffer(efx, &channel->
eventq,
1282 "channel %d event queue in special buffers %d-%d\n",
1288 FRF_CZ_TIMER_Q_EN, 1,
1289 FRF_CZ_HOST_NOTIFY_MODE, 0,
1295 efx_init_special_buffer(efx, &channel->
eventq);
1304 FRF_AZ_EVQ_BUF_BASE_ID, channel->
eventq.index);
1305 efx_writeo_table(efx, ®, efx->
type->evq_ptr_tbl_base,
1308 efx->
type->push_irq_moderation(channel);
1318 efx_writeo_table(efx, ®, efx->
type->evq_ptr_tbl_base,
1324 efx_fini_special_buffer(efx, &channel->
eventq);
1330 efx_free_special_buffer(channel->
efx, &channel->
eventq);
1343 efx_magic_event(efx_rx_queue_channel(rx_queue),
1356 static inline void efx_nic_interrupts(
struct efx_nic *efx,
1362 FRF_AZ_KER_INT_LEVE_SEL, efx->
irq_level,
1363 FRF_AZ_KER_INT_KER, force,
1364 FRF_AZ_DRV_INT_EN_KER, enabled);
1373 efx_nic_interrupts(efx,
true,
false);
1379 efx_nic_interrupts(efx,
false,
false);
1390 efx_nic_interrupts(efx,
true,
true);
1401 int error, mem_perr;
1409 error ?
"disabling bus mastering" :
"no recognised error");
1418 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT
"\n",
1424 if (efx_nic_is_dual_func(efx))
1437 "SYSTEM ERROR - reset scheduled\n");
1441 "SYSTEM ERROR - max number of errors seen."
1442 "NIC will be disabled\n");
1487 efx_schedule_channel_irq(channel);
1503 if (efx_event_present(event))
1504 efx_schedule_channel_irq(channel);
1525 static irqreturn_t efx_msi_interrupt(
int irq,
void *dev_id)
1545 efx_schedule_channel_irq(channel);
1583 handler = efx_legacy_interrupt;
1591 "failed to hook legacy IRQ %d\n",
1606 "failed to hook IRQ %d\n", channel->
irq);
1653 unsigned vi_count, buftbl_min;
1664 #ifdef CONFIG_SFC_SRIOV
1665 if (efx_sriov_wanted(efx)) {
1666 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
1668 efx->vf_buftbl_base = buftbl_min;
1672 buftbl_free = (sram_lim_qw - buftbl_min -
1673 vi_count * vi_dc_entries);
1677 vf_limit =
min(buftbl_free / entries_per_vf,
1680 if (efx->vf_count > vf_limit) {
1682 "Reducing VF count from from %d to %d\n",
1683 efx->vf_count, vf_limit);
1684 efx->vf_count = vf_limit;
1686 vi_count += efx->vf_count * efx_vf_size(efx);
1727 FRF_AZ_NORM_INT_VEC_DIS_KER,
1729 FRF_AZ_INT_ADR_KER, efx->
irq_status.dma_addr);
1746 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1747 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1748 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1779 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1780 FRF_BZ_TX_PACE_SB_AF, 0
xb,
1781 FRF_BZ_TX_PACE_FB_BASE, 0,
1784 FRF_BZ_TX_PACE_BIN_TH,
1792 #define REGISTER_REVISION_A 1
1793 #define REGISTER_REVISION_B 2
1794 #define REGISTER_REVISION_C 3
1795 #define REGISTER_REVISION_Z 3
1802 #define REGISTER(name, min_rev, max_rev) { \
1803 FR_ ## min_rev ## max_rev ## _ ## name, \
1804 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1806 #define REGISTER_AA(name) REGISTER(name, A, A)
1807 #define REGISTER_AB(name) REGISTER(name, A, B)
1808 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1809 #define REGISTER_BB(name) REGISTER(name, B, B)
1810 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1811 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1813 static const struct efx_nic_reg efx_nic_regs[] = {
1926 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1928 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1931 #define REGISTER_TABLE(name, min_rev, max_rev) \
1932 REGISTER_TABLE_DIMENSIONS( \
1933 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1935 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
1936 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1937 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1938 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1939 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1940 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1941 #define REGISTER_TABLE_BB_CZ(name) \
1942 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
1943 FR_BZ_ ## name ## _STEP, \
1944 FR_BB_ ## name ## _ROWS), \
1945 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
1946 FR_BZ_ ## name ## _STEP, \
1947 FR_CZ_ ## name ## _ROWS)
1948 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1987 for (reg = efx_nic_regs;
1988 reg < efx_nic_regs +
ARRAY_SIZE(efx_nic_regs);
1994 for (table = efx_nic_reg_tables;
1995 table < efx_nic_reg_tables +
ARRAY_SIZE(efx_nic_reg_tables);
2009 for (reg = efx_nic_regs;
2010 reg < efx_nic_regs +
ARRAY_SIZE(efx_nic_regs);
2019 for (table = efx_nic_reg_tables;
2020 table < efx_nic_reg_tables +
ARRAY_SIZE(efx_nic_reg_tables);
2030 for (i = 0; i < table->
rows; i++) {
2031 switch (table->
step) {
2033 efx_readd_table(efx, buf, table->
offset, i);
2041 efx_reado_table(efx, buf, table->
offset, i);
2044 efx_reado_table(efx, buf, table->
offset, 2 * i);