31 #ifdef CONFIG_IXGBE_DCB
49 u8 tcs = netdev_get_num_tc(adapter->
netdev);
63 if ((reg_idx & ~vmdq->
mask) >= tcs)
65 adapter->
rx_ring[
i]->reg_idx = reg_idx;
71 if ((reg_idx & ~vmdq->
mask) >= tcs)
73 adapter->tx_ring[
i]->reg_idx = reg_idx;
91 for (i = fcoe->
offset; i < adapter->num_rx_queues; i++) {
93 adapter->
rx_ring[
i]->reg_idx = reg_idx;
98 for (i = fcoe->
offset; i < adapter->num_tx_queues; i++) {
100 adapter->tx_ring[
i]->reg_idx = reg_idx;
111 unsigned int *
tx,
unsigned int *
rx)
115 u8 num_tcs = netdev_get_num_tc(dev);
120 switch (hw->
mac.type) {
165 static bool ixgbe_cache_ring_dcb(
struct ixgbe_adapter *adapter)
168 unsigned int tx_idx, rx_idx;
170 u8 num_tcs = netdev_get_num_tc(dev);
178 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
179 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
180 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
181 adapter->tx_ring[offset +
i]->reg_idx = tx_idx;
182 adapter->
rx_ring[offset +
i]->reg_idx = rx_idx;
183 adapter->tx_ring[offset +
i]->dcb_tc =
tc;
200 static bool ixgbe_cache_ring_sriov(
struct ixgbe_adapter *adapter)
225 adapter->
rx_ring[
i]->reg_idx = reg_idx;
231 adapter->
rx_ring[i]->reg_idx = reg_idx;
244 adapter->tx_ring[
i]->reg_idx = reg_idx;
250 adapter->tx_ring[i]->reg_idx = reg_idx;
264 static bool ixgbe_cache_ring_rss(
struct ixgbe_adapter *adapter)
269 adapter->
rx_ring[i]->reg_idx = i;
271 adapter->tx_ring[i]->reg_idx = i;
287 static void ixgbe_cache_ring_register(
struct ixgbe_adapter *adapter)
290 adapter->
rx_ring[0]->reg_idx = 0;
291 adapter->tx_ring[0]->reg_idx = 0;
293 #ifdef CONFIG_IXGBE_DCB
294 if (ixgbe_cache_ring_dcb_sriov(adapter))
297 if (ixgbe_cache_ring_dcb(adapter))
301 if (ixgbe_cache_ring_sriov(adapter))
304 ixgbe_cache_ring_rss(adapter);
307 #define IXGBE_RSS_16Q_MASK 0xF
308 #define IXGBE_RSS_8Q_MASK 0x7
309 #define IXGBE_RSS_4Q_MASK 0x3
310 #define IXGBE_RSS_2Q_MASK 0x1
311 #define IXGBE_RSS_DISABLED_MASK 0x0
313 #ifdef CONFIG_IXGBE_DCB
323 static bool ixgbe_set_dcb_sriov_queues(
struct ixgbe_adapter *adapter)
331 u8 tcs = netdev_get_num_tc(adapter->
netdev);
395 fcoe->
offset = vmdq_i * tcs;
400 }
else if (tcs > 1) {
414 for (i = 0; i < tcs; i++)
415 netdev_set_tc_queue(adapter->
netdev, i, 1, i);
420 static bool ixgbe_set_dcb_queues(
struct ixgbe_adapter *adapter)
428 tcs = netdev_get_num_tc(dev);
440 }
else if (tcs > 4) {
474 for (i = 0; i < tcs; i++)
475 netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
493 static bool ixgbe_set_sriov_queues(
struct ixgbe_adapter *adapter)
514 if ((vmdq_i > 32) || (rss_i < 4)) {
564 if (vmdq_i > 1 && fcoe_i) {
570 fcoe->
offset = vmdq_i * rss_i;
603 static bool ixgbe_set_rss_queues(
struct ixgbe_adapter *adapter)
680 static void ixgbe_set_num_queues(
struct ixgbe_adapter *adapter)
688 #ifdef CONFIG_IXGBE_DCB
689 if (ixgbe_set_dcb_sriov_queues(adapter))
692 if (ixgbe_set_dcb_queues(adapter))
696 if (ixgbe_set_sriov_queues(adapter))
699 ixgbe_set_rss_queues(adapter);
702 static void ixgbe_acquire_msix_vectors(
struct ixgbe_adapter *adapter,
705 int err, vector_threshold;
719 while (vectors >= vector_threshold) {
730 if (vectors < vector_threshold) {
736 "Unable to allocate MSI-X interrupts\n");
772 static int ixgbe_alloc_q_vector(
struct ixgbe_adapter *adapter,
773 int v_count,
int v_idx,
774 int txr_count,
int txr_idx,
775 int rxr_count,
int rxr_idx)
781 int ring_count,
size;
783 ring_count = txr_count + rxr_count;
796 q_vector = kzalloc_node(size,
GFP_KERNEL, node);
822 ring = q_vector->ring;
826 ring->
dev = &adapter->
pdev->dev;
833 ixgbe_add_ring(ring, &q_vector->
tx);
840 adapter->tx_ring[txr_idx] = ring;
852 ring->
dev = &adapter->
pdev->dev;
859 ixgbe_add_ring(ring, &q_vector->rx);
872 if ((rxr_idx >= f->
offset) &&
873 (rxr_idx < f->offset + f->
indices))
883 adapter->
rx_ring[rxr_idx] = ring;
905 static void ixgbe_free_q_vector(
struct ixgbe_adapter *adapter,
int v_idx)
916 adapter->q_vector[v_idx] = NULL;
935 int q_vectors = adapter->num_q_vectors;
936 int rxr_remaining = adapter->num_rx_queues;
937 int txr_remaining = adapter->num_tx_queues;
938 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
945 if (q_vectors >= (rxr_remaining + txr_remaining)) {
946 for (; rxr_remaining; v_idx++) {
947 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
959 for (; v_idx < q_vectors; v_idx++) {
960 int rqpv =
DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
961 int tqpv =
DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
962 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
970 rxr_remaining -= rqpv;
971 txr_remaining -= tqpv;
979 adapter->num_tx_queues = 0;
980 adapter->num_rx_queues = 0;
981 adapter->num_q_vectors = 0;
984 ixgbe_free_q_vector(adapter, v_idx);
997 static void ixgbe_free_q_vectors(
struct ixgbe_adapter *adapter)
1006 ixgbe_free_q_vector(adapter, v_idx);
1009 static void ixgbe_reset_interrupt_capability(
struct ixgbe_adapter *adapter)
1029 static void ixgbe_set_interrupt_capability(
struct ixgbe_adapter *adapter)
1052 v_budget =
min_t(
int, v_budget, hw->
mac.max_msix_vectors);
1059 for (vector = 0; vector < v_budget; vector++)
1062 ixgbe_acquire_msix_vectors(adapter, v_budget);
1069 if (netdev_get_num_tc(adapter->
netdev) > 1) {
1070 e_err(probe,
"num TCs exceeds number of queues - disabling DCB\n");
1071 netdev_reset_tc(adapter->
netdev);
1078 adapter->
dcb_cfg.pfc_mode_enable =
false;
1080 adapter->
dcb_cfg.num_tcs.pg_tcs = 1;
1081 adapter->
dcb_cfg.num_tcs.pfc_tcs = 1;
1089 ixgbe_set_num_queues(adapter);
1092 err = pci_enable_msi(adapter->
pdev);
1095 "Unable to allocate MSI interrupt, "
1096 "falling back to legacy. Error: %d\n", err);
1117 ixgbe_set_num_queues(adapter);
1120 ixgbe_set_interrupt_capability(adapter);
1122 err = ixgbe_alloc_q_vectors(adapter);
1124 e_dev_err(
"Unable to allocate memory for queue vectors\n");
1125 goto err_alloc_q_vectors;
1128 ixgbe_cache_ring_register(adapter);
1130 e_dev_info(
"Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
1138 err_alloc_q_vectors:
1139 ixgbe_reset_interrupt_capability(adapter);
1155 ixgbe_free_q_vectors(adapter);
1156 ixgbe_reset_interrupt_capability(adapter);
1160 u32 fcoe_sof_eof,
u32 type_tucmd,
u32 mss_l4len_idx)