35 #include <linux/slab.h>
38 #include <linux/if_ether.h>
39 #include <linux/if_vlan.h>
68 page_alloc[
i].page =
page;
69 page_alloc[
i].dma =
dma;
72 page_alloc[
i].page = ring_alloc[
i].
page;
73 get_page(ring_alloc[i].page);
74 page_alloc[
i].dma = ring_alloc[
i].
dma;
75 page_alloc[
i].offset = ring_alloc[
i].
offset +
81 frags[
i] = ring_alloc[
i];
83 ring_alloc[
i] = page_alloc[
i];
101 static void mlx4_en_free_frag(
struct mlx4_en_priv *priv,
115 static int mlx4_en_init_allocator(
struct mlx4_en_priv *priv,
125 if (!page_alloc->
page)
136 en_dbg(DRV, priv,
"Initialized allocator:%d with page:%p\n",
137 i, page_alloc->
page);
152 static void mlx4_en_destroy_allocator(
struct mlx4_en_priv *priv,
160 en_dbg(DRV, priv,
"Freeing allocator:%d count:%d\n",
161 i, page_count(page_alloc->
page));
170 static void mlx4_en_init_rx_desc(
struct mlx4_en_priv *priv,
179 rx_desc->
data[
i].byte_count =
188 for (i = priv->
num_frags; i < possible_frags; i++) {
189 rx_desc->
data[
i].byte_count = 0;
191 rx_desc->
data[
i].addr = 0;
195 static int mlx4_en_prepare_rx_desc(
struct mlx4_en_priv *priv,
202 return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->
page_alloc);
205 static inline void mlx4_en_update_rx_prod_db(
struct mlx4_en_rx_ring *ring)
210 static void mlx4_en_free_rx_desc(
struct mlx4_en_priv *priv,
218 for (nr = 0; nr < priv->
num_frags; nr++) {
219 en_dbg(DRV, priv,
"Freeing fragment:%d\n", nr);
220 mlx4_en_free_frag(priv, frags, nr);
224 static int mlx4_en_fill_rx_buffers(
struct mlx4_en_priv *priv)
231 for (buf_ind = 0; buf_ind < priv->
prof->rx_ring_size; buf_ind++) {
232 for (ring_ind = 0; ring_ind < priv->
rx_ring_num; ring_ind++) {
233 ring = &priv->
rx_ring[ring_ind];
235 if (mlx4_en_prepare_rx_desc(priv, ring,
238 en_err(priv,
"Failed to allocate "
239 "enough rx buffers\n");
243 en_warn(priv,
"Only %d buffers allocated "
244 "reducing ring size to %d",
256 for (ring_ind = 0; ring_ind < priv->
rx_ring_num; ring_ind++) {
257 ring = &priv->
rx_ring[ring_ind];
261 mlx4_en_free_rx_desc(priv, ring, ring->
actual_size);
268 static void mlx4_en_free_rx_buf(
struct mlx4_en_priv *priv,
273 en_dbg(DRV, priv,
"Freeing Rx buf - cons:%d prod:%d\n",
280 en_dbg(DRV, priv,
"Processing descriptor:%d\n", index);
281 mlx4_en_free_rx_desc(priv, ring, index);
307 en_dbg(DRV, priv,
"Allocated rx_info ring at addr:%p size:%d\n",
317 en_err(priv,
"Failed to map RX buffer\n");
320 ring->
buf = ring->
wqres.buf.direct.buf;
341 for (ring_ind = 0; ring_ind < priv->
rx_ring_num; ring_ind++) {
342 ring = &priv->
rx_ring[ring_ind];
347 ring->
cqn = priv->
rx_cq[ring_ind].mcq.cqn;
357 mlx4_en_update_rx_prod_db(ring);
360 for (i = 0; i < ring->
size; i++)
361 mlx4_en_init_rx_desc(priv, ring, i);
364 err = mlx4_en_init_allocator(priv, ring);
366 en_err(priv,
"Failed initializing ring allocator\n");
373 err = mlx4_en_fill_rx_buffers(priv);
377 for (ring_ind = 0; ring_ind < priv->
rx_ring_num; ring_ind++) {
378 ring = &priv->
rx_ring[ring_ind];
381 mlx4_en_update_rx_prod_db(ring);
387 for (ring_ind = 0; ring_ind < priv->
rx_ring_num; ring_ind++)
388 mlx4_en_free_rx_buf(priv, &priv->
rx_ring[ring_ind]);
392 while (ring_ind >= 0) {
395 mlx4_en_destroy_allocator(priv, &priv->
rx_ring[ring_ind]);
410 #ifdef CONFIG_RFS_ACCEL
411 mlx4_en_cleanup_filters(priv, ring);
418 mlx4_en_free_rx_buf(priv, ring);
421 mlx4_en_destroy_allocator(priv, ring);
425 static int mlx4_en_complete_rx_desc(
struct mlx4_en_priv *priv,
437 for (nr = 0; nr < priv->
num_frags; nr++) {
449 get_page(frags[nr].page);
450 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
451 skb_frag_size_set(&skb_frags_rx[nr], frag_info->
frag_size);
457 skb_frag_size_set(&skb_frags_rx[nr - 1],
458 length - priv->
frag_info[nr - 1].frag_prefix_size);
464 __skb_frag_unref(&skb_frags_rx[nr]);
498 skb_copy_to_linear_data(skb, va, length);
502 used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
508 skb_shinfo(skb)->nr_frags = used_frags;
530 if (*(skb->
data + offset) != (
unsigned char) (i & 0xff))
540 static void mlx4_en_refill_rx_buffers(
struct mlx4_en_priv *priv,
546 if (mlx4_en_prepare_rx_desc(priv, ring, index))
581 cq->
mcq.cons_index & cq->
size)) {
594 en_err(priv,
"CQE completed in error - vendor "
595 "syndrom:%d syndrom:%d\n",
616 if (s_mac == priv->
mac &&
643 nr = mlx4_en_complete_rx_desc(priv,
644 rx_desc, frags, gro_skb,
649 skb_shinfo(gro_skb)->nr_frags =
nr;
658 __vlan_hwaccel_put_tag(gro_skb, vid);
664 skb_record_rx_queue(gro_skb, cq->
ring);
681 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
683 priv->
stats.rx_dropped++;
688 validate_loopback(priv, skb);
694 skb_record_rx_queue(skb, cq->
ring);
708 mlx4_en_free_frag(priv, frags, nr);
710 ++cq->
mcq.cons_index;
712 cqe = &cq->
buf[index];
713 if (++polled == budget) {
722 mlx4_cq_set_ci(&cq->
mcq);
724 ring->
cons = cq->
mcq.cons_index;
725 mlx4_en_refill_rx_buffers(priv, ring);
726 mlx4_en_update_rx_prod_db(ring);
737 napi_schedule(&cq->
napi);
771 en_dbg(DRV, priv,
"Calculated last offset for stride:%d align:%d "
772 "res:%d offset:%d\n", stride, align, res, offset);
777 static int frag_sizes[] = {
791 while (buf_size < eff_mtu) {
793 (eff_mtu > buf_size + frag_sizes[
i]) ?
794 frag_sizes[i] : eff_mtu - buf_size;
805 priv->
frag_info[
i].last_offset = mlx4_en_last_alloc_offset(
816 en_dbg(DRV, priv,
"Rx buffer scatter-list (effective-mtu:%d "
817 "num_frags:%d):\n", eff_mtu, priv->
num_frags);
819 en_dbg(DRV, priv,
" frag:%d - size:%d prefix:%d align:%d "
820 "stride:%d last_offset:%d\n", i,
831 static int mlx4_en_config_rss_qp(
struct mlx4_en_priv *priv,
int qpn,
842 en_err(priv,
"Failed to allocate qp context\n");
848 en_err(priv,
"Failed to allocate qp #%x\n", qpn);
853 memset(context, 0,
sizeof *context);
855 qpn, ring->
cqn, -1, context);
870 mlx4_en_update_rx_prod_db(ring);
883 en_err(priv,
"Failed reserving drop qpn\n");
888 en_err(priv,
"Failed allocating drop qp\n");
920 static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
921 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
922 0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
924 en_dbg(DRV, priv,
"Configuring rss steering\n");
935 err = mlx4_en_config_rss_qp(priv, qpn, &priv->
rx_ring[i],
947 en_err(priv,
"Failed to allocate RSS indirection QP\n");
952 priv->
rx_ring[0].cqn, -1, &context);
957 rss_rings = priv->
prof->rss_rings;
965 if (priv->
mdev->profile.udp_rss) {
969 rss_context->
flags = rss_mask;
971 for (i = 0; i < 10; i++)
987 for (i = 0; i < good_qps; i++) {