11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
18 #include <linux/if_ether.h>
27 unsigned int *pkts_compl,
28 unsigned int *bytes_compl)
31 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
45 (*bytes_compl) += buffer->
skb->len;
48 "TX queue %d transmission id %x complete\n",
49 tx_queue->
queue, tx_queue->read_count);
58 static int efx_enqueue_skb_tso(
struct efx_tx_queue *tx_queue,
61 static inline unsigned
74 len =
min_t(
unsigned, len, 512 - (dma_addr & 0xf));
92 max_descs +=
max_t(
unsigned int, MAX_SKB_FRAGS,
107 static void efx_tx_maybe_stop_queue(
struct efx_tx_queue *txq1)
111 struct efx_nic *efx = txq1->efx;
112 unsigned int fill_level;
133 netif_tx_stop_queue(txq1->
core_txq);
141 if (
likely(fill_level < efx->txq_stop_thresh)) {
144 netif_tx_start_queue(txq1->
core_txq);
166 struct efx_nic *efx = tx_queue->efx;
172 unsigned int dma_len;
173 unsigned short dma_flags;
178 if (skb_shinfo(skb)->gso_size)
179 return efx_enqueue_skb_tso(tx_queue, skb);
182 len = skb_headlen(skb);
211 insert_ptr = tx_queue->insert_count & tx_queue->
ptr_mask;
212 buffer = &tx_queue->
buffer[insert_ptr];
217 dma_len = efx_max_tx_len(efx, dma_addr);
218 if (
likely(dma_len >= len))
222 buffer->
len = dma_len;
227 ++tx_queue->insert_count;
236 if (i >= skb_shinfo(skb)->nr_frags)
238 fragment = &skb_shinfo(skb)->frags[
i];
239 len = skb_frag_size(fragment);
243 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
251 netdev_tx_sent_queue(tx_queue->
core_txq, skb->
len);
256 efx_tx_maybe_stop_queue(tx_queue);
262 " TX queue %d could not map skb with %d bytes %d "
263 "fragments for DMA\n", tx_queue->
queue, skb->
len,
264 skb_shinfo(skb)->nr_frags + 1);
270 while (tx_queue->insert_count != tx_queue->
write_count) {
271 unsigned int pkts_compl = 0, bytes_compl = 0;
272 --tx_queue->insert_count;
273 insert_ptr = tx_queue->insert_count & tx_queue->
ptr_mask;
274 buffer = &tx_queue->
buffer[insert_ptr];
275 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
296 static void efx_dequeue_buffers(
struct efx_tx_queue *tx_queue,
298 unsigned int *pkts_compl,
299 unsigned int *bytes_compl)
301 struct efx_nic *efx = tx_queue->efx;
302 unsigned int stop_index, read_ptr;
304 stop_index = (index + 1) & tx_queue->
ptr_mask;
305 read_ptr = tx_queue->read_count & tx_queue->
ptr_mask;
307 while (read_ptr != stop_index) {
311 "TX queue %d spurious TX completion id %x\n",
312 tx_queue->
queue, read_ptr);
317 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
319 ++tx_queue->read_count;
320 read_ptr = tx_queue->read_count & tx_queue->
ptr_mask;
336 struct efx_nic *efx = netdev_priv(net_dev);
343 if (
unlikely(efx_xmit_with_hwtstamp(skb)) &&
348 index = skb_get_queue_mapping(skb);
354 tx_queue = efx_get_tx_queue(efx, index, type);
361 struct efx_nic *efx = tx_queue->efx;
365 netdev_get_tx_queue(efx->
net_dev,
373 struct efx_nic *efx = netdev_priv(net_dev);
382 if (num_tc == net_dev->
num_tc)
385 for (tc = 0; tc < num_tc; tc++) {
390 if (num_tc > net_dev->
num_tc) {
413 max_t(
int, num_tc, 1) *
431 struct efx_nic *efx = tx_queue->efx;
433 unsigned int pkts_compl = 0, bytes_compl = 0;
437 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
438 netdev_tx_completed_queue(tx_queue->
core_txq, pkts_compl, bytes_compl);
448 txq2 = efx_tx_queue_partner(tx_queue);
449 fill_level =
max(tx_queue->insert_count - tx_queue->read_count,
450 txq2->insert_count - txq2->read_count);
451 if (fill_level <= efx->txq_wake_thresh)
452 netif_tx_wake_queue(tx_queue->
core_txq);
460 tx_queue->empty_read_count =
469 #define TSOH_STD_SIZE 128
470 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
476 static unsigned int efx_tsoh_page_count(
struct efx_tx_queue *tx_queue)
483 struct efx_nic *efx = tx_queue->efx;
493 "creating TX queue %d size %#x mask %#x\n",
497 tx_queue->
buffer = kcalloc(entries,
sizeof(*tx_queue->
buffer),
504 kcalloc(efx_tsoh_page_count(tx_queue),
530 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
531 "initialising TX queue %d\n", tx_queue->
queue);
533 tx_queue->insert_count = 0;
536 tx_queue->read_count = 0;
554 while (tx_queue->read_count != tx_queue->
write_count) {
555 unsigned int pkts_compl = 0, bytes_compl = 0;
556 buffer = &tx_queue->
buffer[tx_queue->read_count & tx_queue->
ptr_mask];
557 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
559 ++tx_queue->read_count;
561 netdev_tx_reset_queue(tx_queue->
core_txq);
569 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
570 "shutting down TX queue %d\n", tx_queue->
queue);
587 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
588 "destroying TX queue %d\n", tx_queue->
queue);
592 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
615 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
616 #define TSOH_OFFSET 0
618 #define TSOH_OFFSET NET_IP_ALIGN
621 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
687 + (tcp_hdr(skb)->doff << 2
u)) >
704 (tx_queue->insert_count & tx_queue->
ptr_mask) / 2;
714 result = (
u8 *)page_buf->
addr + offset;
741 static void efx_tx_queue_insert(
struct efx_tx_queue *tx_queue,
746 struct efx_nic *efx = tx_queue->efx;
747 unsigned dma_len, insert_ptr;
752 insert_ptr = tx_queue->insert_count & tx_queue->
ptr_mask;
753 buffer = &tx_queue->
buffer[insert_ptr];
754 ++tx_queue->insert_count;
757 tx_queue->read_count >=
766 dma_len = efx_max_tx_len(efx, dma_addr);
772 buffer->
len = dma_len;
791 static int efx_tso_put_header(
struct efx_tx_queue *tx_queue,
809 ++tx_queue->insert_count;
817 static void efx_enqueue_unwind(
struct efx_tx_queue *tx_queue)
822 while (tx_queue->insert_count != tx_queue->
write_count) {
823 --tx_queue->insert_count;
824 buffer = &tx_queue->
buffer[tx_queue->insert_count &
826 efx_dequeue_buffer(tx_queue, buffer,
NULL,
NULL);
834 st->
ip_off = skb_network_header(skb) - skb->
data;
835 st->
tcp_off = skb_transport_header(skb) - skb->
data;
863 st->
in_len = skb_frag_size(frag);
874 int len = skb_headlen(skb) - hl;
898 static void tso_fill_packet_with_fragment(
struct efx_tx_queue *tx_queue,
919 efx_tx_queue_insert(tx_queue, st->
dma_addr, n, &buffer);
949 static int tso_start_new_packet(
struct efx_tx_queue *tx_queue,
961 header = efx_tsoh_get_buffer(tx_queue, buffer, st->
header_len);
971 st->
seqnum += skb_shinfo(skb)->gso_size;
972 if (st->
out_len > skb_shinfo(skb)->gso_size) {
980 tsoh_th->fin = tcp_hdr(skb)->fin;
981 tsoh_th->psh = tcp_hdr(skb)->psh;
1000 rc = efx_tso_put_header(tx_queue, buffer, header);
1021 static int efx_enqueue_skb_tso(
struct efx_tx_queue *tx_queue,
1024 struct efx_nic *efx = tx_queue->efx;
1029 state.protocol = efx_tso_check_protocol(skb);
1033 tso_start(&
state, skb);
1038 if (skb_headlen(skb) ==
state.header_len) {
1042 rc = tso_get_fragment(&
state, efx,
1043 skb_shinfo(skb)->frags + frag_i);
1047 rc = tso_get_head_fragment(&
state, efx, skb);
1053 if (tso_start_new_packet(tx_queue, skb, &
state) < 0)
1057 tso_fill_packet_with_fragment(tx_queue, skb, &
state);
1060 if (
state.in_len == 0) {
1061 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1064 rc = tso_get_fragment(&
state, efx,
1065 skb_shinfo(skb)->frags + frag_i);
1071 if (
state.packet_space == 0 &&
1072 tso_start_new_packet(tx_queue, skb, &
state) < 0)
1076 netdev_tx_sent_queue(tx_queue->
core_txq, skb->
len);
1081 efx_tx_maybe_stop_queue(tx_queue);
1088 "Out of memory for TSO headers, or DMA mapping error\n");
1092 if (
state.unmap_len) {
1101 efx_enqueue_unwind(tx_queue);