11 #include <linux/socket.h>
13 #include <linux/slab.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <linux/prefetch.h>
28 #define EFX_RX_BATCH 8
31 #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
34 #define EFX_SKB_HEADERS 64u
71 #define RX_ALLOC_LEVEL_GRO 0x2000
72 #define RX_ALLOC_LEVEL_MAX 0x3000
73 #define RX_ALLOC_FACTOR_GRO 1
74 #define RX_ALLOC_FACTOR_SKB (-2)
79 static unsigned int rx_refill_threshold;
87 #define EFX_RXD_HEAD_ROOM 2
90 static inline unsigned int efx_rx_buf_offset(
struct efx_nic *efx,
97 efx->
type->rx_buffer_hash_size;
99 static inline unsigned int efx_rx_buf_size(
struct efx_nic *efx)
109 return (
u8 *)buf->
u.
skb->data + efx->
type->rx_buffer_hash_size;
112 static inline u32 efx_rx_buf_hash(
const u8 *eh)
115 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
116 return __le32_to_cpup((
const __le32 *)(eh - 4));
119 return (
u32)data[0] |
149 rx_buf->
u.
skb = skb = netdev_alloc_skb(net_dev, skb_len);
185 static int efx_init_rx_buffers_page(
struct efx_rx_queue *rx_queue)
204 efx_rx_buf_size(efx),
242 static void efx_unmap_rx_buffer(
struct efx_nic *efx,
249 if (--state->
refcnt == 0) {
252 efx_rx_buf_size(efx),
261 static void efx_free_rx_buffer(
struct efx_nic *efx,
273 static void efx_fini_rx_buffer(
struct efx_rx_queue *rx_queue,
276 efx_unmap_rx_buffer(rx_queue->
efx, rx_buf);
277 efx_free_rx_buffer(rx_queue->
efx, rx_buf);
282 static void efx_resurrect_rx_buffer(
struct efx_rx_queue *rx_queue,
287 unsigned fill_level,
index;
300 get_page(rx_buf->
u.
page);
306 new_buf->
len = rx_buf->
len;
317 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
325 page_count(rx_buf->
u.
page) == 1)
326 efx_resurrect_rx_buffer(rx_queue, rx_buf);
331 memcpy(new_buf, rx_buf,
sizeof(*new_buf));
350 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
361 if (
unlikely(fill_level < rx_queue->min_fill)) {
366 space = rx_queue->
max_fill - fill_level;
370 "RX queue %d fast-filling descriptor ring from"
371 " level %d to level %d using %s allocation\n",
372 efx_rx_queue_index(rx_queue), fill_level,
378 rc = efx_init_rx_buffers_page(rx_queue);
380 rc = efx_init_rx_buffers_skb(rx_queue);
387 }
while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
390 "RX queue %d fast-filled descriptor ring "
391 "to level %d\n", efx_rx_queue_index(rx_queue),
408 static void efx_rx_packet__check_len(
struct efx_rx_queue *rx_queue,
410 int len,
bool *leak_packet)
413 unsigned max_len = rx_buf->
len - efx->
type->rx_buffer_padding;
415 if (
likely(len <= max_len))
426 " RX queue %d seriously overlength "
427 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
428 efx_rx_queue_index(rx_queue), len, max_len,
429 efx->
type->rx_buffer_padding);
439 " RX queue %d overlength RX event "
441 efx_rx_queue_index(rx_queue), len, max_len);
444 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
450 static void efx_rx_packet_gro(
struct efx_channel *channel,
459 struct page *page = rx_buf->
u.
page;
471 skb->
rxhash = efx_rx_buf_hash(eh);
473 skb_fill_page_desc(skb, 0, page,
474 efx_rx_buf_offset(efx, rx_buf), rx_buf->
len);
482 skb_record_rx_queue(skb, channel->
rx_queue.core_index);
497 }
else if (gro_result !=
GRO_DROP) {
507 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
509 bool leak_packet =
false;
521 efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
524 "RX queue %d received id %x at %llx+%x %s%s\n",
525 efx_rx_queue_index(rx_queue), index,
526 (
unsigned long long)rx_buf->
dma_addr, len,
535 efx_recycle_rx_buffer(channel, rx_buf);
545 efx_unmap_rx_buffer(efx, rx_buf);
550 prefetch(efx_rx_buf_eh(efx, rx_buf));
555 rx_buf->
len = len - efx->
type->rx_buffer_hash_size;
562 static void efx_rx_deliver(
struct efx_channel *channel,
572 skb_checksum_none_assert(skb);
575 skb_record_rx_queue(skb, channel->
rx_queue.core_index);
578 if (channel->
type->receive_skb)
579 channel->
type->receive_skb(channel, skb);
591 u8 *eh = efx_rx_buf_eh(efx, rx_buf);
598 efx_free_rx_buffer(efx, rx_buf);
607 skb_reserve(skb, efx->
type->rx_buffer_hash_size);
611 skb->
rxhash = efx_rx_buf_hash(eh);
617 skb_record_rx_queue(skb, channel->
rx_queue.core_index);
624 !channel->
type->receive_skb)
625 efx_rx_packet_gro(channel, rx_buf, eh);
627 efx_rx_deliver(channel, rx_buf);
634 if (channel->
type->receive_skb) {
670 "creating RX queue %d size %#x mask %#x\n",
675 rx_queue->
buffer = kcalloc(entries,
sizeof(*rx_queue->
buffer),
691 unsigned int max_fill,
trigger, max_trigger;
694 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
705 if (rx_refill_threshold != 0) {
706 trigger = max_fill *
min(rx_refill_threshold, 100
U) / 100
U;
707 if (trigger > max_trigger)
708 trigger = max_trigger;
710 trigger = max_trigger;
727 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
737 for (i = 0; i <= rx_queue->
ptr_mask; i++) {
739 efx_fini_rx_buffer(rx_queue, rx_buf);
747 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
761 "RX descriptor ring refill threshold (%)");