27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
34 #include <linux/string.h>
59 #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
68 #ifndef GET_SKBUFF_QOS
69 #define GET_SKBUFF_QOS(skb) 0
72 static void cvm_oct_tx_do_cleanup(
unsigned long arg);
73 static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
76 #define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
83 cvmx_fau_atomic_add32(fau, -undo);
84 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
88 static void cvm_oct_kick_tx_poll_watchdog(
void)
92 ciu_timx.s.one_shot = 1;
100 int qos, queues_per_port;
102 int total_remaining = 0;
106 queues_per_port = cvmx_pko_get_num_queues(priv->
port);
108 for (qos = 0; qos < queues_per_port; qos++) {
112 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->
fau+qos*4);
115 total_freed += skb_to_free;
116 if (skb_to_free > 0) {
119 while (skb_to_free > 0) {
121 t->
next = to_free_list;
125 spin_unlock_irqrestore(&priv->
tx_free_list[qos].lock, flags);
127 while (to_free_list) {
129 to_free_list = to_free_list->
next;
133 total_remaining += skb_queue_len(&priv->
tx_free_list[qos]);
135 if (total_freed >= 0 && netif_queue_stopped(dev))
136 netif_wake_queue(dev);
138 cvm_oct_kick_tx_poll_watchdog();
156 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP}
queue_type;
163 #if REUSE_SKBUFFS_WITHOUT_FREE
164 unsigned char *fpa_head;
183 else if (qos >= cvmx_pko_get_num_queues(priv->
port))
210 if (
unlikely(skb_shinfo(skb)->nr_frags > 5)) {
211 if (
unlikely(__skb_linearize(skb))) {
219 skb_to_free = cvmx_fau_fetch_and_add32(priv->
fau + qos * 4,
222 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->
fau + qos * 4);
245 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index,
interface));
246 if (gmx_prt_cfg.
s.duplex == 0) {
247 int add_bytes = 64 - skb->
len;
248 if ((skb_tail_pointer(skb) + add_bytes) <=
249 skb_end_pointer(skb))
250 memset(__skb_put(skb, add_bytes), 0,
258 pko_command.
s.n2 = 1;
259 pko_command.
s.segs = 1;
260 pko_command.
s.total_bytes = skb->
len;
262 pko_command.
s.subone0 = 1;
264 pko_command.
s.dontfree = 1;
268 if (skb_shinfo(skb)->nr_frags == 0) {
270 hw_buffer.
s.pool = 0;
271 hw_buffer.
s.size = skb->
len;
274 hw_buffer.
s.pool = 0;
275 hw_buffer.
s.size = skb_headlen(skb);
277 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
280 hw_buffer.
s.size = fs->
size;
284 hw_buffer.
s.size = skb_shinfo(skb)->nr_frags + 1;
285 pko_command.
s.segs = skb_shinfo(skb)->nr_frags + 1;
286 pko_command.
s.gather = 1;
287 goto dont_put_skbuff_in_hw;
300 #if REUSE_SKBUFFS_WITHOUT_FREE
301 fpa_head = skb->
head + 256 - ((
unsigned long)skb->
head & 0x7f);
307 goto dont_put_skbuff_in_hw;
314 goto dont_put_skbuff_in_hw;
320 goto dont_put_skbuff_in_hw;
326 goto dont_put_skbuff_in_hw;
328 if (
unlikely(skb_header_cloned(skb))) {
332 goto dont_put_skbuff_in_hw;
338 goto dont_put_skbuff_in_hw;
340 if (
unlikely(skb_shinfo(skb)->nr_frags)) {
344 goto dont_put_skbuff_in_hw;
348 sizeof(*skb) + skb_end_offset(skb))) {
352 goto dont_put_skbuff_in_hw;
359 pko_command.
s.dontfree = 0;
361 hw_buffer.
s.back = ((
unsigned long)skb->
data >> 7) - ((
unsigned long)fpa_head >> 7);
362 *(
struct sk_buff **)(fpa_head -
sizeof(
void *)) = skb;
369 skb_dst_set(skb,
NULL);
371 secpath_put(skb->sp);
376 #ifdef CONFIG_NET_SCHED
378 #ifdef CONFIG_NET_CLS_ACT
384 dont_put_skbuff_in_hw:
388 (ip_hdr(skb)->
version == 4) && (ip_hdr(skb)->ihl == 5) &&
389 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
393 pko_command.
s.ipoffp1 =
sizeof(
struct ethhdr) + 1;
403 skb_to_free = cvmx_fau_fetch_and_add32(priv->
fau + qos * 4,
409 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->
fau+qos*4);
415 if ((buffers_to_free < -100) && !pko_command.
s.dontfree)
416 pko_command.
s.dontfree = 1;
418 if (pko_command.
s.dontfree) {
420 pko_command.
s.reg0 = priv->
fau+qos*4;
433 spin_unlock_irqrestore(&priv->
tx_free_list[qos].lock, flags);
434 netif_stop_queue(dev);
443 cvmx_pko_send_packet_prepare(priv->
port, priv->
queue + qos,
449 pko_command, hw_buffer,
459 skb->
next = to_free_list;
461 priv->
stats.tx_dropped++;
473 while (skb_to_free > 0) {
475 t->
next = to_free_list;
480 spin_unlock_irqrestore(&priv->
tx_free_list[qos].lock, flags);
483 while (to_free_list) {
485 to_free_list = to_free_list->
next;
499 if (total_to_clean & 0x3ff) {
506 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
509 cvm_oct_kick_tx_poll_watchdog();
531 "queue entry\n", dev->
name);
532 priv->
stats.tx_dropped++;
543 priv->
stats.tx_dropped++;
556 copy_location = packet_buffer +
sizeof(
uint64_t);
571 work->hw_chksum = skb->
csum;
572 work->len = skb->
len;
573 work->ipprt = priv->
port;
574 work->qos = priv->
port & 0x7;
580 work->word2.s.bufs = 1;
581 work->packet_ptr.u64 = 0;
582 work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
585 work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
588 work->word2.s.ip_offset = 14;
590 work->word2.s.vlan_valid = 0;
591 work->word2.s.vlan_cfi = 0;
592 work->word2.s.vlan_id = 0;
593 work->word2.s.dec_ipcomp = 0;
595 work->word2.s.tcp_or_udp =
600 work->word2.s.dec_ipsec = 0;
602 work->word2.s.is_v6 = 0;
604 work->word2.s.software = 0;
606 work->word2.s.L4_error = 0;
608 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0)
609 || (ip_hdr(skb)->frag_off ==
613 work->word2.s.IP_exc = 0;
619 work->word2.s.not_IP = 0;
621 work->word2.s.rcv_error = 0;
623 work->word2.s.err_code = 0;
632 sizeof(work->packet_data));
635 work->word2.snoip.vlan_valid = 0;
636 work->word2.snoip.vlan_cfi = 0;
637 work->word2.snoip.vlan_id = 0;
638 work->word2.snoip.software = 0;
642 work->word2.snoip.is_bcast =
644 work->word2.snoip.is_mcast =
646 work->word2.snoip.not_IP = 1;
649 work->word2.snoip.rcv_error = 0;
651 work->word2.snoip.err_code = 0;
653 memcpy(work->packet_data, skb->
data,
sizeof(work->packet_data));
657 cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
659 priv->
stats.tx_packets++;
676 for (qos = 0; qos < 16; qos++) {
681 spin_unlock_irqrestore(&priv->
tx_free_list[qos].lock, flags);
685 static void cvm_oct_tx_do_cleanup(
unsigned long arg)
702 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
714 cvm_oct_tx_cleanup_watchdog, 0,