Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sge.c
Go to the documentation of this file.
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses. You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  * Redistribution and use in source and binary forms, with or
13  * without modification, are permitted provided that the following
14  * conditions are met:
15  *
16  * - Redistributions of source code must retain the above
17  * copyright notice, this list of conditions and the following
18  * disclaimer.
19  *
20  * - Redistributions in binary form must reproduce the above
21  * copyright notice, this list of conditions and the following
22  * disclaimer in the documentation and/or other materials
23  * provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
39 #include <linux/ip.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
44 #include <net/ipv6.h>
45 #include <net/tcp.h>
46 #include "cxgb4.h"
47 #include "t4_regs.h"
48 #include "t4_msg.h"
49 #include "t4fw_api.h"
50 
51 /*
52  * Rx buffer size. We use largish buffers if possible but settle for single
53  * pages under memory shortage.
54  */
55 #if PAGE_SHIFT >= 16
56 # define FL_PG_ORDER 0
57 #else
58 # define FL_PG_ORDER (16 - PAGE_SHIFT)
59 #endif
60 
61 /* RX_PULL_LEN should be <= RX_COPY_THRES */
62 #define RX_COPY_THRES 256
63 #define RX_PULL_LEN 128
64 
65 /*
66  * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
67  * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
68  */
69 #define RX_PKT_SKB_LEN 512
70 
71 /*
72  * Max number of Tx descriptors we clean up at a time. Should be modest as
73  * freeing skbs isn't cheap and it happens while holding locks. We just need
74  * to free packets faster than they arrive, we eventually catch up and keep
75  * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
76  */
77 #define MAX_TX_RECLAIM 16
78 
79 /*
80  * Max number of Rx buffers we replenish at a time. Again keep this modest,
81  * allocating buffers isn't cheap either.
82  */
83 #define MAX_RX_REFILL 16U
84 
85 /*
86  * Period of the Rx queue check timer. This timer is infrequent as it has
87  * something to do only when the system experiences severe memory shortage.
88  */
89 #define RX_QCHECK_PERIOD (HZ / 2)
90 
91 /*
92  * Period of the Tx queue check timer.
93  */
94 #define TX_QCHECK_PERIOD (HZ / 2)
95 
96 /*
97  * Max number of Tx descriptors to be reclaimed by the Tx timer.
98  */
99 #define MAX_TIMER_TX_RECLAIM 100
100 
101 /*
102  * Timer index used when backing off due to memory shortage.
103  */
104 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
105 
106 /*
107  * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
108  * attempt to refill it.
109  */
110 #define FL_STARVE_THRES 4
111 
112 /*
113  * Suspend an Ethernet Tx queue with fewer available descriptors than this.
114  * This is the same as calc_tx_descs() for a TSO packet with
115  * nr_frags == MAX_SKB_FRAGS.
116  */
117 #define ETHTXQ_STOP_THRES \
118  (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
119 
120 /*
121  * Suspension threshold for non-Ethernet Tx queues. We require enough room
122  * for a full sized WR.
123  */
124 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
125 
126 /*
127  * Max Tx descriptor space we allow for an Ethernet packet to be inlined
128  * into a WR.
129  */
130 #define MAX_IMM_TX_PKT_LEN 128
131 
132 /*
133  * Max size of a WR sent through a control Tx queue.
134  */
135 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
136 
137 struct tx_sw_desc { /* SW state per Tx descriptor */
138  struct sk_buff *skb;
139  struct ulptx_sgl *sgl;
140 };
141 
142 struct rx_sw_desc { /* SW state per Rx descriptor */
143  struct page *page;
145 };
146 
147 /*
148  * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
149  * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
150  * We could easily support more but there doesn't seem to be much need for
151  * that ...
152  */
153 #define FL_MTU_SMALL 1500
154 #define FL_MTU_LARGE 9000
155 
156 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
157  unsigned int mtu)
158 {
159  struct sge *s = &adapter->sge;
160 
161  return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
162 }
163 
164 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
165 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
166 
167 /*
168  * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
169  * these to specify the buffer size as an index into the SGE Free List Buffer
170  * Size register array. We also use bit 4, when the buffer has been unmapped
171  * for DMA, but this is of course never sent to the hardware and is only used
172  * to prevent double unmappings. All of the above requires that the Free List
173  * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
174  * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
175  * Free List Buffer alignment is 32 bytes, this works out for us ...
176  */
177 enum {
178  RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
179  RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
180  RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
181 
182  /*
183  * XXX We shouldn't depend on being able to use these indices.
184  * XXX Especially when some other Master PF has initialized the
185  * XXX adapter or we use the Firmware Configuration File. We
186  * XXX should really search through the Host Buffer Size register
187  * XXX array for the appropriately sized buffer indices.
188  */
189  RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
190  RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
191 
192  RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
193  RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
194 };
195 
196 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
197 {
198  return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
199 }
200 
201 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
202 {
203  return !(d->dma_addr & RX_UNMAPPED_BUF);
204 }
205 
213 static inline unsigned int txq_avail(const struct sge_txq *q)
214 {
215  return q->size - 1 - q->in_use;
216 }
217 
226 static inline unsigned int fl_cap(const struct sge_fl *fl)
227 {
228  return fl->size - 8; /* 1 descriptor = 8 buffers */
229 }
230 
231 static inline bool fl_starving(const struct sge_fl *fl)
232 {
233  return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
234 }
235 
236 static int map_skb(struct device *dev, const struct sk_buff *skb,
237  dma_addr_t *addr)
238 {
239  const skb_frag_t *fp, *end;
240  const struct skb_shared_info *si;
241 
242  *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
243  if (dma_mapping_error(dev, *addr))
244  goto out_err;
245 
246  si = skb_shinfo(skb);
247  end = &si->frags[si->nr_frags];
248 
249  for (fp = si->frags; fp < end; fp++) {
250  *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
251  DMA_TO_DEVICE);
252  if (dma_mapping_error(dev, *addr))
253  goto unwind;
254  }
255  return 0;
256 
257 unwind:
258  while (fp-- > si->frags)
259  dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
260 
261  dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
262 out_err:
263  return -ENOMEM;
264 }
265 
266 #ifdef CONFIG_NEED_DMA_MAP_STATE
267 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
268  const dma_addr_t *addr)
269 {
270  const skb_frag_t *fp, *end;
271  const struct skb_shared_info *si;
272 
273  dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
274 
275  si = skb_shinfo(skb);
276  end = &si->frags[si->nr_frags];
277  for (fp = si->frags; fp < end; fp++)
278  dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
279 }
280 
289 static void deferred_unmap_destructor(struct sk_buff *skb)
290 {
291  unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
292 }
293 #endif
294 
295 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
296  const struct ulptx_sgl *sgl, const struct sge_txq *q)
297 {
298  const struct ulptx_sge_pair *p;
299  unsigned int nfrags = skb_shinfo(skb)->nr_frags;
300 
301  if (likely(skb_headlen(skb)))
302  dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
303  DMA_TO_DEVICE);
304  else {
305  dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
306  DMA_TO_DEVICE);
307  nfrags--;
308  }
309 
310  /*
311  * the complexity below is because of the possibility of a wrap-around
312  * in the middle of an SGL
313  */
314  for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
315  if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
316 unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
317  ntohl(p->len[0]), DMA_TO_DEVICE);
318  dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
319  ntohl(p->len[1]), DMA_TO_DEVICE);
320  p++;
321  } else if ((u8 *)p == (u8 *)q->stat) {
322  p = (const struct ulptx_sge_pair *)q->desc;
323  goto unmap;
324  } else if ((u8 *)p + 8 == (u8 *)q->stat) {
325  const __be64 *addr = (const __be64 *)q->desc;
326 
327  dma_unmap_page(dev, be64_to_cpu(addr[0]),
328  ntohl(p->len[0]), DMA_TO_DEVICE);
329  dma_unmap_page(dev, be64_to_cpu(addr[1]),
330  ntohl(p->len[1]), DMA_TO_DEVICE);
331  p = (const struct ulptx_sge_pair *)&addr[2];
332  } else {
333  const __be64 *addr = (const __be64 *)q->desc;
334 
335  dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
336  ntohl(p->len[0]), DMA_TO_DEVICE);
337  dma_unmap_page(dev, be64_to_cpu(addr[0]),
338  ntohl(p->len[1]), DMA_TO_DEVICE);
339  p = (const struct ulptx_sge_pair *)&addr[1];
340  }
341  }
342  if (nfrags) {
343  __be64 addr;
344 
345  if ((u8 *)p == (u8 *)q->stat)
346  p = (const struct ulptx_sge_pair *)q->desc;
347  addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
348  *(const __be64 *)q->desc;
349  dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
350  DMA_TO_DEVICE);
351  }
352 }
353 
364 static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
365  unsigned int n, bool unmap)
366 {
367  struct tx_sw_desc *d;
368  unsigned int cidx = q->cidx;
369  struct device *dev = adap->pdev_dev;
370 
371  d = &q->sdesc[cidx];
372  while (n--) {
373  if (d->skb) { /* an SGL is present */
374  if (unmap)
375  unmap_sgl(dev, d->skb, d->sgl, q);
376  kfree_skb(d->skb);
377  d->skb = NULL;
378  }
379  ++d;
380  if (++cidx == q->size) {
381  cidx = 0;
382  d = q->sdesc;
383  }
384  }
385  q->cidx = cidx;
386 }
387 
388 /*
389  * Return the number of reclaimable descriptors in a Tx queue.
390  */
391 static inline int reclaimable(const struct sge_txq *q)
392 {
393  int hw_cidx = ntohs(q->stat->cidx);
394  hw_cidx -= q->cidx;
395  return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
396 }
397 
408 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
409  bool unmap)
410 {
411  int avail = reclaimable(q);
412 
413  if (avail) {
414  /*
415  * Limit the amount of clean up work we do at a time to keep
416  * the Tx lock hold time O(1).
417  */
418  if (avail > MAX_TX_RECLAIM)
419  avail = MAX_TX_RECLAIM;
420 
421  free_tx_desc(adap, q, avail, unmap);
422  q->in_use -= avail;
423  }
424 }
425 
426 static inline int get_buf_size(struct adapter *adapter,
427  const struct rx_sw_desc *d)
428 {
429  struct sge *s = &adapter->sge;
430  unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
431  int buf_size;
432 
433  switch (rx_buf_size_idx) {
434  case RX_SMALL_PG_BUF:
435  buf_size = PAGE_SIZE;
436  break;
437 
438  case RX_LARGE_PG_BUF:
439  buf_size = PAGE_SIZE << s->fl_pg_order;
440  break;
441 
442  case RX_SMALL_MTU_BUF:
443  buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
444  break;
445 
446  case RX_LARGE_MTU_BUF:
447  buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
448  break;
449 
450  default:
451  BUG_ON(1);
452  }
453 
454  return buf_size;
455 }
456 
466 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
467 {
468  while (n--) {
469  struct rx_sw_desc *d = &q->sdesc[q->cidx];
470 
471  if (is_buf_mapped(d))
472  dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
473  get_buf_size(adap, d),
475  put_page(d->page);
476  d->page = NULL;
477  if (++q->cidx == q->size)
478  q->cidx = 0;
479  q->avail--;
480  }
481 }
482 
494 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
495 {
496  struct rx_sw_desc *d = &q->sdesc[q->cidx];
497 
498  if (is_buf_mapped(d))
499  dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
500  get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
501  d->page = NULL;
502  if (++q->cidx == q->size)
503  q->cidx = 0;
504  q->avail--;
505 }
506 
507 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
508 {
509  if (q->pend_cred >= 8) {
510  wmb();
511  t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
512  QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
513  q->pend_cred &= 7;
514  }
515 }
516 
517 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
519 {
520  sd->page = pg;
521  sd->dma_addr = mapping; /* includes size low bits */
522 }
523 
538 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
539  gfp_t gfp)
540 {
541  struct sge *s = &adap->sge;
542  struct page *pg;
544  unsigned int cred = q->avail;
545  __be64 *d = &q->desc[q->pidx];
546  struct rx_sw_desc *sd = &q->sdesc[q->pidx];
547 
548  gfp |= __GFP_NOWARN | __GFP_COLD;
549 
550  if (s->fl_pg_order == 0)
551  goto alloc_small_pages;
552 
553  /*
554  * Prefer large buffers
555  */
556  while (n) {
557  pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
558  if (unlikely(!pg)) {
559  q->large_alloc_failed++;
560  break; /* fall back to single pages */
561  }
562 
563  mapping = dma_map_page(adap->pdev_dev, pg, 0,
564  PAGE_SIZE << s->fl_pg_order,
566  if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
567  __free_pages(pg, s->fl_pg_order);
568  goto out; /* do not try small pages for this error */
569  }
570  mapping |= RX_LARGE_PG_BUF;
571  *d++ = cpu_to_be64(mapping);
572 
573  set_rx_sw_desc(sd, pg, mapping);
574  sd++;
575 
576  q->avail++;
577  if (++q->pidx == q->size) {
578  q->pidx = 0;
579  sd = q->sdesc;
580  d = q->desc;
581  }
582  n--;
583  }
584 
585 alloc_small_pages:
586  while (n--) {
587  pg = __skb_alloc_page(gfp, NULL);
588  if (unlikely(!pg)) {
589  q->alloc_failed++;
590  break;
591  }
592 
593  mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
595  if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
596  put_page(pg);
597  goto out;
598  }
599  *d++ = cpu_to_be64(mapping);
600 
601  set_rx_sw_desc(sd, pg, mapping);
602  sd++;
603 
604  q->avail++;
605  if (++q->pidx == q->size) {
606  q->pidx = 0;
607  sd = q->sdesc;
608  d = q->desc;
609  }
610  }
611 
612 out: cred = q->avail - cred;
613  q->pend_cred += cred;
614  ring_fl_db(adap, q);
615 
616  if (unlikely(fl_starving(q))) {
617  smp_wmb();
618  set_bit(q->cntxt_id - adap->sge.egr_start,
619  adap->sge.starving_fl);
620  }
621 
622  return cred;
623 }
624 
625 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
626 {
627  refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
628  GFP_ATOMIC);
629 }
630 
650 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
651  size_t sw_size, dma_addr_t *phys, void *metadata,
652  size_t stat_size, int node)
653 {
654  size_t len = nelem * elem_size + stat_size;
655  void *s = NULL;
656  void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
657 
658  if (!p)
659  return NULL;
660  if (sw_size) {
661  s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
662 
663  if (!s) {
664  dma_free_coherent(dev, len, p, *phys);
665  return NULL;
666  }
667  }
668  if (metadata)
669  *(void **)metadata = s;
670  memset(p, 0, len);
671  return p;
672 }
673 
681 static inline unsigned int sgl_len(unsigned int n)
682 {
683  n--;
684  return (3 * n) / 2 + (n & 1) + 2;
685 }
686 
694 static inline unsigned int flits_to_desc(unsigned int n)
695 {
696  BUG_ON(n > SGE_MAX_WR_LEN / 8);
697  return DIV_ROUND_UP(n, 8);
698 }
699 
707 static inline int is_eth_imm(const struct sk_buff *skb)
708 {
709  return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
710 }
711 
719 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
720 {
721  unsigned int flits;
722 
723  if (is_eth_imm(skb))
724  return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
725 
726  flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
727  if (skb_shinfo(skb)->gso_size)
728  flits += 2;
729  return flits;
730 }
731 
739 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
740 {
741  return flits_to_desc(calc_tx_flits(skb));
742 }
743 
761 static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
762  struct ulptx_sgl *sgl, u64 *end, unsigned int start,
763  const dma_addr_t *addr)
764 {
765  unsigned int i, len;
766  struct ulptx_sge_pair *to;
767  const struct skb_shared_info *si = skb_shinfo(skb);
768  unsigned int nfrags = si->nr_frags;
769  struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
770 
771  len = skb_headlen(skb) - start;
772  if (likely(len)) {
773  sgl->len0 = htonl(len);
774  sgl->addr0 = cpu_to_be64(addr[0] + start);
775  nfrags++;
776  } else {
777  sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
778  sgl->addr0 = cpu_to_be64(addr[1]);
779  }
780 
781  sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
782  if (likely(--nfrags == 0))
783  return;
784  /*
785  * Most of the complexity below deals with the possibility we hit the
786  * end of the queue in the middle of writing the SGL. For this case
787  * only we create the SGL in a temporary buffer and then copy it.
788  */
789  to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
790 
791  for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
792  to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
793  to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
794  to->addr[0] = cpu_to_be64(addr[i]);
795  to->addr[1] = cpu_to_be64(addr[++i]);
796  }
797  if (nfrags) {
798  to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
799  to->len[1] = cpu_to_be32(0);
800  to->addr[0] = cpu_to_be64(addr[i + 1]);
801  }
802  if (unlikely((u8 *)end > (u8 *)q->stat)) {
803  unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
804 
805  if (likely(part0))
806  memcpy(sgl->sge, buf, part0);
807  part1 = (u8 *)end - (u8 *)q->stat;
808  memcpy(q->desc, (u8 *)buf + part0, part1);
809  end = (void *)q->desc + part1;
810  }
811  if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
812  *end = 0;
813 }
814 
823 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
824 {
825  wmb(); /* write descriptors before telling HW */
826  spin_lock(&q->db_lock);
827  if (!q->db_disabled) {
828  t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
829  QID(q->cntxt_id) | PIDX(n));
830  }
831  q->db_pidx = q->pidx;
832  spin_unlock(&q->db_lock);
833 }
834 
846 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
847  void *pos)
848 {
849  u64 *p;
850  int left = (void *)q->stat - pos;
851 
852  if (likely(skb->len <= left)) {
853  if (likely(!skb->data_len))
854  skb_copy_from_linear_data(skb, pos, skb->len);
855  else
856  skb_copy_bits(skb, 0, pos, skb->len);
857  pos += skb->len;
858  } else {
859  skb_copy_bits(skb, 0, pos, left);
860  skb_copy_bits(skb, left, q->desc, skb->len - left);
861  pos = (void *)q->desc + (skb->len - left);
862  }
863 
864  /* 0-pad to multiple of 16 */
865  p = PTR_ALIGN(pos, 8);
866  if ((uintptr_t)p & 8)
867  *p = 0;
868 }
869 
870 /*
871  * Figure out what HW csum a packet wants and return the appropriate control
872  * bits.
873  */
874 static u64 hwcsum(const struct sk_buff *skb)
875 {
876  int csum_type;
877  const struct iphdr *iph = ip_hdr(skb);
878 
879  if (iph->version == 4) {
880  if (iph->protocol == IPPROTO_TCP)
881  csum_type = TX_CSUM_TCPIP;
882  else if (iph->protocol == IPPROTO_UDP)
883  csum_type = TX_CSUM_UDPIP;
884  else {
885 nocsum: /*
886  * unknown protocol, disable HW csum
887  * and hope a bad packet is detected
888  */
889  return TXPKT_L4CSUM_DIS;
890  }
891  } else {
892  /*
893  * this doesn't work with extension headers
894  */
895  const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
896 
897  if (ip6h->nexthdr == IPPROTO_TCP)
898  csum_type = TX_CSUM_TCPIP6;
899  else if (ip6h->nexthdr == IPPROTO_UDP)
900  csum_type = TX_CSUM_UDPIP6;
901  else
902  goto nocsum;
903  }
904 
905  if (likely(csum_type >= TX_CSUM_TCPIP))
906  return TXPKT_CSUM_TYPE(csum_type) |
907  TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
908  TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
909  else {
910  int start = skb_transport_offset(skb);
911 
912  return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
913  TXPKT_CSUM_LOC(start + skb->csum_offset);
914  }
915 }
916 
917 static void eth_txq_stop(struct sge_eth_txq *q)
918 {
919  netif_tx_stop_queue(q->txq);
920  q->q.stops++;
921 }
922 
923 static inline void txq_advance(struct sge_txq *q, unsigned int n)
924 {
925  q->in_use += n;
926  q->pidx += n;
927  if (q->pidx >= q->size)
928  q->pidx -= q->size;
929 }
930 
938 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
939 {
940  u32 wr_mid;
941  u64 cntrl, *end;
942  int qidx, credits;
943  unsigned int flits, ndesc;
944  struct adapter *adap;
945  struct sge_eth_txq *q;
946  const struct port_info *pi;
947  struct fw_eth_tx_pkt_wr *wr;
948  struct cpl_tx_pkt_core *cpl;
949  const struct skb_shared_info *ssi;
950  dma_addr_t addr[MAX_SKB_FRAGS + 1];
951 
952  /*
953  * The chip min packet length is 10 octets but play safe and reject
954  * anything shorter than an Ethernet header.
955  */
956  if (unlikely(skb->len < ETH_HLEN)) {
957 out_free: dev_kfree_skb(skb);
958  return NETDEV_TX_OK;
959  }
960 
961  pi = netdev_priv(dev);
962  adap = pi->adapter;
963  qidx = skb_get_queue_mapping(skb);
964  q = &adap->sge.ethtxq[qidx + pi->first_qset];
965 
966  reclaim_completed_tx(adap, &q->q, true);
967 
968  flits = calc_tx_flits(skb);
969  ndesc = flits_to_desc(flits);
970  credits = txq_avail(&q->q) - ndesc;
971 
972  if (unlikely(credits < 0)) {
973  eth_txq_stop(q);
974  dev_err(adap->pdev_dev,
975  "%s: Tx ring %u full while queue awake!\n",
976  dev->name, qidx);
977  return NETDEV_TX_BUSY;
978  }
979 
980  if (!is_eth_imm(skb) &&
981  unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
982  q->mapping_err++;
983  goto out_free;
984  }
985 
986  wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
987  if (unlikely(credits < ETHTXQ_STOP_THRES)) {
988  eth_txq_stop(q);
989  wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
990  }
991 
992  wr = (void *)&q->q.desc[q->q.pidx];
993  wr->equiq_to_len16 = htonl(wr_mid);
994  wr->r3 = cpu_to_be64(0);
995  end = (u64 *)wr + flits;
996 
997  ssi = skb_shinfo(skb);
998  if (ssi->gso_size) {
999  struct cpl_tx_pkt_lso *lso = (void *)wr;
1000  bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1001  int l3hdr_len = skb_network_header_len(skb);
1002  int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1003 
1005  FW_WR_IMMDLEN(sizeof(*lso)));
1006  lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
1008  LSO_IPV6(v6) |
1009  LSO_ETHHDR_LEN(eth_xtra_len / 4) |
1010  LSO_IPHDR_LEN(l3hdr_len / 4) |
1011  LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
1012  lso->c.ipid_ofst = htons(0);
1013  lso->c.mss = htons(ssi->gso_size);
1014  lso->c.seqno_offset = htonl(0);
1015  lso->c.len = htonl(skb->len);
1016  cpl = (void *)(lso + 1);
1017  cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1018  TXPKT_IPHDR_LEN(l3hdr_len) |
1019  TXPKT_ETHHDR_LEN(eth_xtra_len);
1020  q->tso++;
1021  q->tx_cso += ssi->gso_segs;
1022  } else {
1023  int len;
1024 
1025  len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1027  FW_WR_IMMDLEN(len));
1028  cpl = (void *)(wr + 1);
1029  if (skb->ip_summed == CHECKSUM_PARTIAL) {
1030  cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
1031  q->tx_cso++;
1032  } else
1034  }
1035 
1036  if (vlan_tx_tag_present(skb)) {
1037  q->vlan_ins++;
1038  cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
1039  }
1040 
1042  TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
1043  cpl->pack = htons(0);
1044  cpl->len = htons(skb->len);
1045  cpl->ctrl1 = cpu_to_be64(cntrl);
1046 
1047  if (is_eth_imm(skb)) {
1048  inline_tx_skb(skb, &q->q, cpl + 1);
1049  dev_kfree_skb(skb);
1050  } else {
1051  int last_desc;
1052 
1053  write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1054  addr);
1055  skb_orphan(skb);
1056 
1057  last_desc = q->q.pidx + ndesc - 1;
1058  if (last_desc >= q->q.size)
1059  last_desc -= q->q.size;
1060  q->q.sdesc[last_desc].skb = skb;
1061  q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1062  }
1063 
1064  txq_advance(&q->q, ndesc);
1065 
1066  ring_tx_db(adap, &q->q, ndesc);
1067  return NETDEV_TX_OK;
1068 }
1069 
1078 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1079 {
1080  int hw_cidx = ntohs(q->stat->cidx);
1081  int reclaim = hw_cidx - q->cidx;
1082 
1083  if (reclaim < 0)
1084  reclaim += q->size;
1085 
1086  q->in_use -= reclaim;
1087  q->cidx = hw_cidx;
1088 }
1089 
1096 static inline int is_imm(const struct sk_buff *skb)
1097 {
1098  return skb->len <= MAX_CTRL_WR_LEN;
1099 }
1100 
1111 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1112 {
1113  reclaim_completed_tx_imm(&q->q);
1114  if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1115  wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1116  q->q.stops++;
1117  q->full = 1;
1118  }
1119 }
1120 
1129 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1130 {
1131  unsigned int ndesc;
1132  struct fw_wr_hdr *wr;
1133 
1134  if (unlikely(!is_imm(skb))) {
1135  WARN_ON(1);
1136  dev_kfree_skb(skb);
1137  return NET_XMIT_DROP;
1138  }
1139 
1140  ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1141  spin_lock(&q->sendq.lock);
1142 
1143  if (unlikely(q->full)) {
1144  skb->priority = ndesc; /* save for restart */
1145  __skb_queue_tail(&q->sendq, skb);
1146  spin_unlock(&q->sendq.lock);
1147  return NET_XMIT_CN;
1148  }
1149 
1150  wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1151  inline_tx_skb(skb, &q->q, wr);
1152 
1153  txq_advance(&q->q, ndesc);
1154  if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1155  ctrlq_check_stop(q, wr);
1156 
1157  ring_tx_db(q->adap, &q->q, ndesc);
1158  spin_unlock(&q->sendq.lock);
1159 
1160  kfree_skb(skb);
1161  return NET_XMIT_SUCCESS;
1162 }
1163 
1170 static void restart_ctrlq(unsigned long data)
1171 {
1172  struct sk_buff *skb;
1173  unsigned int written = 0;
1174  struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1175 
1176  spin_lock(&q->sendq.lock);
1177  reclaim_completed_tx_imm(&q->q);
1178  BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
1179 
1180  while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1181  struct fw_wr_hdr *wr;
1182  unsigned int ndesc = skb->priority; /* previously saved */
1183 
1184  /*
1185  * Write descriptors and free skbs outside the lock to limit
1186  * wait times. q->full is still set so new skbs will be queued.
1187  */
1188  spin_unlock(&q->sendq.lock);
1189 
1190  wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1191  inline_tx_skb(skb, &q->q, wr);
1192  kfree_skb(skb);
1193 
1194  written += ndesc;
1195  txq_advance(&q->q, ndesc);
1196  if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1197  unsigned long old = q->q.stops;
1198 
1199  ctrlq_check_stop(q, wr);
1200  if (q->q.stops != old) { /* suspended anew */
1201  spin_lock(&q->sendq.lock);
1202  goto ringdb;
1203  }
1204  }
1205  if (written > 16) {
1206  ring_tx_db(q->adap, &q->q, written);
1207  written = 0;
1208  }
1209  spin_lock(&q->sendq.lock);
1210  }
1211  q->full = 0;
1212 ringdb: if (written)
1213  ring_tx_db(q->adap, &q->q, written);
1214  spin_unlock(&q->sendq.lock);
1215 }
1216 
1224 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1225 {
1226  int ret;
1227 
1228  local_bh_disable();
1229  ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1230  local_bh_enable();
1231  return ret;
1232 }
1233 
1241 static inline int is_ofld_imm(const struct sk_buff *skb)
1242 {
1243  return skb->len <= MAX_IMM_TX_PKT_LEN;
1244 }
1245 
1254 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1255 {
1256  unsigned int flits, cnt;
1257 
1258  if (is_ofld_imm(skb))
1259  return DIV_ROUND_UP(skb->len, 8);
1260 
1261  flits = skb_transport_offset(skb) / 8U; /* headers */
1262  cnt = skb_shinfo(skb)->nr_frags;
1263  if (skb->tail != skb->transport_header)
1264  cnt++;
1265  return flits + sgl_len(cnt);
1266 }
1267 
1277 static void txq_stop_maperr(struct sge_ofld_txq *q)
1278 {
1279  q->mapping_err++;
1280  q->q.stops++;
1281  set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1282  q->adap->sge.txq_maperr);
1283 }
1284 
1293 static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1294 {
1295  struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1296 
1297  wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1298  q->q.stops++;
1299  q->full = 1;
1300 }
1301 
1309 static void service_ofldq(struct sge_ofld_txq *q)
1310 {
1311  u64 *pos;
1312  int credits;
1313  struct sk_buff *skb;
1314  unsigned int written = 0;
1315  unsigned int flits, ndesc;
1316 
1317  while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1318  /*
1319  * We drop the lock but leave skb on sendq, thus retaining
1320  * exclusive access to the state of the queue.
1321  */
1322  spin_unlock(&q->sendq.lock);
1323 
1324  reclaim_completed_tx(q->adap, &q->q, false);
1325 
1326  flits = skb->priority; /* previously saved */
1327  ndesc = flits_to_desc(flits);
1328  credits = txq_avail(&q->q) - ndesc;
1329  BUG_ON(credits < 0);
1330  if (unlikely(credits < TXQ_STOP_THRES))
1331  ofldtxq_stop(q, skb);
1332 
1333  pos = (u64 *)&q->q.desc[q->q.pidx];
1334  if (is_ofld_imm(skb))
1335  inline_tx_skb(skb, &q->q, pos);
1336  else if (map_skb(q->adap->pdev_dev, skb,
1337  (dma_addr_t *)skb->head)) {
1338  txq_stop_maperr(q);
1339  spin_lock(&q->sendq.lock);
1340  break;
1341  } else {
1342  int last_desc, hdr_len = skb_transport_offset(skb);
1343 
1344  memcpy(pos, skb->data, hdr_len);
1345  write_sgl(skb, &q->q, (void *)pos + hdr_len,
1346  pos + flits, hdr_len,
1347  (dma_addr_t *)skb->head);
1348 #ifdef CONFIG_NEED_DMA_MAP_STATE
1349  skb->dev = q->adap->port[0];
1350  skb->destructor = deferred_unmap_destructor;
1351 #endif
1352  last_desc = q->q.pidx + ndesc - 1;
1353  if (last_desc >= q->q.size)
1354  last_desc -= q->q.size;
1355  q->q.sdesc[last_desc].skb = skb;
1356  }
1357 
1358  txq_advance(&q->q, ndesc);
1359  written += ndesc;
1360  if (unlikely(written > 32)) {
1361  ring_tx_db(q->adap, &q->q, written);
1362  written = 0;
1363  }
1364 
1365  spin_lock(&q->sendq.lock);
1366  __skb_unlink(skb, &q->sendq);
1367  if (is_ofld_imm(skb))
1368  kfree_skb(skb);
1369  }
1370  if (likely(written))
1371  ring_tx_db(q->adap, &q->q, written);
1372 }
1373 
1381 static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1382 {
1383  skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
1384  spin_lock(&q->sendq.lock);
1385  __skb_queue_tail(&q->sendq, skb);
1386  if (q->sendq.qlen == 1)
1387  service_ofldq(q);
1388  spin_unlock(&q->sendq.lock);
1389  return NET_XMIT_SUCCESS;
1390 }
1391 
1398 static void restart_ofldq(unsigned long data)
1399 {
1400  struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1401 
1402  spin_lock(&q->sendq.lock);
1403  q->full = 0; /* the queue actually is completely empty now */
1404  service_ofldq(q);
1405  spin_unlock(&q->sendq.lock);
1406 }
1407 
1415 static inline unsigned int skb_txq(const struct sk_buff *skb)
1416 {
1417  return skb->queue_mapping >> 1;
1418 }
1419 
1427 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1428 {
1429  return skb->queue_mapping & 1;
1430 }
1431 
1432 static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1433 {
1434  unsigned int idx = skb_txq(skb);
1435 
1436  if (unlikely(is_ctrl_pkt(skb)))
1437  return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1438  return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1439 }
1440 
1450 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1451 {
1452  int ret;
1453 
1454  local_bh_disable();
1455  ret = ofld_send(adap, skb);
1456  local_bh_enable();
1457  return ret;
1458 }
1459 
1468 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1469 {
1470  return t4_ofld_send(netdev2adap(dev), skb);
1471 }
1473 
1474 static inline void copy_frags(struct sk_buff *skb,
1475  const struct pkt_gl *gl, unsigned int offset)
1476 {
1477  int i;
1478 
1479  /* usually there's just one frag */
1480  __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1481  gl->frags[0].offset + offset,
1482  gl->frags[0].size - offset);
1483  skb_shinfo(skb)->nr_frags = gl->nfrags;
1484  for (i = 1; i < gl->nfrags; i++)
1485  __skb_fill_page_desc(skb, i, gl->frags[i].page,
1486  gl->frags[i].offset,
1487  gl->frags[i].size);
1488 
1489  /* get a reference to the last page, we don't own it */
1490  get_page(gl->frags[gl->nfrags - 1].page);
1491 }
1492 
1502 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1503  unsigned int skb_len, unsigned int pull_len)
1504 {
1505  struct sk_buff *skb;
1506 
1507  /*
1508  * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1509  * size, which is expected since buffers are at least PAGE_SIZEd.
1510  * In this case packets up to RX_COPY_THRES have only one fragment.
1511  */
1512  if (gl->tot_len <= RX_COPY_THRES) {
1513  skb = dev_alloc_skb(gl->tot_len);
1514  if (unlikely(!skb))
1515  goto out;
1516  __skb_put(skb, gl->tot_len);
1517  skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1518  } else {
1519  skb = dev_alloc_skb(skb_len);
1520  if (unlikely(!skb))
1521  goto out;
1522  __skb_put(skb, pull_len);
1523  skb_copy_to_linear_data(skb, gl->va, pull_len);
1524 
1525  copy_frags(skb, gl, pull_len);
1526  skb->len = gl->tot_len;
1527  skb->data_len = skb->len - pull_len;
1528  skb->truesize += skb->data_len;
1529  }
1530 out: return skb;
1531 }
1533 
1541 static void t4_pktgl_free(const struct pkt_gl *gl)
1542 {
1543  int n;
1544  const struct page_frag *p;
1545 
1546  for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1547  put_page(p->page);
1548 }
1549 
1550 /*
1551  * Process an MPS trace packet. Give it an unused protocol number so it won't
1552  * be delivered to anyone and send it to the stack for capture.
1553  */
1554 static noinline int handle_trace_pkt(struct adapter *adap,
1555  const struct pkt_gl *gl)
1556 {
1557  struct sk_buff *skb;
1558  struct cpl_trace_pkt *p;
1559 
1561  if (unlikely(!skb)) {
1562  t4_pktgl_free(gl);
1563  return 0;
1564  }
1565 
1566  p = (struct cpl_trace_pkt *)skb->data;
1567  __skb_pull(skb, sizeof(*p));
1568  skb_reset_mac_header(skb);
1569  skb->protocol = htons(0xffff);
1570  skb->dev = adap->port[0];
1571  netif_receive_skb(skb);
1572  return 0;
1573 }
1574 
1575 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1576  const struct cpl_rx_pkt *pkt)
1577 {
1578  struct adapter *adapter = rxq->rspq.adap;
1579  struct sge *s = &adapter->sge;
1580  int ret;
1581  struct sk_buff *skb;
1582 
1583  skb = napi_get_frags(&rxq->rspq.napi);
1584  if (unlikely(!skb)) {
1585  t4_pktgl_free(gl);
1586  rxq->stats.rx_drops++;
1587  return;
1588  }
1589 
1590  copy_frags(skb, gl, s->pktshift);
1591  skb->len = gl->tot_len - s->pktshift;
1592  skb->data_len = skb->len;
1593  skb->truesize += skb->data_len;
1595  skb_record_rx_queue(skb, rxq->rspq.idx);
1596  if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1597  skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1598 
1599  if (unlikely(pkt->vlan_ex)) {
1600  __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1601  rxq->stats.vlan_ex++;
1602  }
1603  ret = napi_gro_frags(&rxq->rspq.napi);
1604  if (ret == GRO_HELD)
1605  rxq->stats.lro_pkts++;
1606  else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1607  rxq->stats.lro_merged++;
1608  rxq->stats.pkts++;
1609  rxq->stats.rx_cso++;
1610 }
1611 
1620 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1621  const struct pkt_gl *si)
1622 {
1623  bool csum_ok;
1624  struct sk_buff *skb;
1625  const struct cpl_rx_pkt *pkt;
1626  struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1627  struct sge *s = &q->adap->sge;
1628 
1629  if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1630  return handle_trace_pkt(q->adap, si);
1631 
1632  pkt = (const struct cpl_rx_pkt *)rsp;
1633  csum_ok = pkt->csum_calc && !pkt->err_vec;
1634  if ((pkt->l2info & htonl(RXF_TCP)) &&
1635  (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1636  do_gro(rxq, si, pkt);
1637  return 0;
1638  }
1639 
1641  if (unlikely(!skb)) {
1642  t4_pktgl_free(si);
1643  rxq->stats.rx_drops++;
1644  return 0;
1645  }
1646 
1647  __skb_pull(skb, s->pktshift); /* remove ethernet header padding */
1648  skb->protocol = eth_type_trans(skb, q->netdev);
1649  skb_record_rx_queue(skb, q->idx);
1650  if (skb->dev->features & NETIF_F_RXHASH)
1651  skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1652 
1653  rxq->stats.pkts++;
1654 
1655  if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
1656  (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1657  if (!pkt->ip_frag) {
1659  rxq->stats.rx_cso++;
1660  } else if (pkt->l2info & htonl(RXF_IP)) {
1661  __sum16 c = (__force __sum16)pkt->csum;
1662  skb->csum = csum_unfold(c);
1664  rxq->stats.rx_cso++;
1665  }
1666  } else
1667  skb_checksum_none_assert(skb);
1668 
1669  if (unlikely(pkt->vlan_ex)) {
1670  __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1671  rxq->stats.vlan_ex++;
1672  }
1673  netif_receive_skb(skb);
1674  return 0;
1675 }
1676 
1692 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1693  int frags)
1694 {
1695  struct rx_sw_desc *d;
1696 
1697  while (frags--) {
1698  if (q->cidx == 0)
1699  q->cidx = q->size - 1;
1700  else
1701  q->cidx--;
1702  d = &q->sdesc[q->cidx];
1703  d->page = si->frags[frags].page;
1704  d->dma_addr |= RX_UNMAPPED_BUF;
1705  q->avail++;
1706  }
1707 }
1708 
1717 static inline bool is_new_response(const struct rsp_ctrl *r,
1718  const struct sge_rspq *q)
1719 {
1720  return RSPD_GEN(r->type_gen) == q->gen;
1721 }
1722 
1729 static inline void rspq_next(struct sge_rspq *q)
1730 {
1731  q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1732  if (unlikely(++q->cidx == q->size)) {
1733  q->cidx = 0;
1734  q->gen ^= 1;
1735  q->cur_desc = q->desc;
1736  }
1737 }
1738 
1752 static int process_responses(struct sge_rspq *q, int budget)
1753 {
1754  int ret, rsp_type;
1755  int budget_left = budget;
1756  const struct rsp_ctrl *rc;
1757  struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1758  struct adapter *adapter = q->adap;
1759  struct sge *s = &adapter->sge;
1760 
1761  while (likely(budget_left)) {
1762  rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1763  if (!is_new_response(rc, q))
1764  break;
1765 
1766  rmb();
1767  rsp_type = RSPD_TYPE(rc->type_gen);
1768  if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1769  struct page_frag *fp;
1770  struct pkt_gl si;
1771  const struct rx_sw_desc *rsd;
1772  u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1773 
1774  if (len & RSPD_NEWBUF) {
1775  if (likely(q->offset > 0)) {
1776  free_rx_bufs(q->adap, &rxq->fl, 1);
1777  q->offset = 0;
1778  }
1779  len = RSPD_LEN(len);
1780  }
1781  si.tot_len = len;
1782 
1783  /* gather packet fragments */
1784  for (frags = 0, fp = si.frags; ; frags++, fp++) {
1785  rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1786  bufsz = get_buf_size(adapter, rsd);
1787  fp->page = rsd->page;
1788  fp->offset = q->offset;
1789  fp->size = min(bufsz, len);
1790  len -= fp->size;
1791  if (!len)
1792  break;
1793  unmap_rx_buf(q->adap, &rxq->fl);
1794  }
1795 
1796  /*
1797  * Last buffer remains mapped so explicitly make it
1798  * coherent for CPU access.
1799  */
1800  dma_sync_single_for_cpu(q->adap->pdev_dev,
1801  get_buf_addr(rsd),
1802  fp->size, DMA_FROM_DEVICE);
1803 
1804  si.va = page_address(si.frags[0].page) +
1805  si.frags[0].offset;
1806  prefetch(si.va);
1807 
1808  si.nfrags = frags + 1;
1809  ret = q->handler(q, q->cur_desc, &si);
1810  if (likely(ret == 0))
1811  q->offset += ALIGN(fp->size, s->fl_align);
1812  else
1813  restore_rx_bufs(&si, &rxq->fl, frags);
1814  } else if (likely(rsp_type == RSP_TYPE_CPL)) {
1815  ret = q->handler(q, q->cur_desc, NULL);
1816  } else {
1817  ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1818  }
1819 
1820  if (unlikely(ret)) {
1821  /* couldn't process descriptor, back off for recovery */
1823  break;
1824  }
1825 
1826  rspq_next(q);
1827  budget_left--;
1828  }
1829 
1830  if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
1831  __refill_fl(q->adap, &rxq->fl);
1832  return budget - budget_left;
1833 }
1834 
1846 static int napi_rx_handler(struct napi_struct *napi, int budget)
1847 {
1848  unsigned int params;
1849  struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1850  int work_done = process_responses(q, budget);
1851 
1852  if (likely(work_done < budget)) {
1853  napi_complete(napi);
1854  params = q->next_intr_params;
1855  q->next_intr_params = q->intr_params;
1856  } else
1857  params = QINTR_TIMER_IDX(7);
1858 
1859  t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
1860  INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
1861  return work_done;
1862 }
1863 
1864 /*
1865  * The MSI-X interrupt handler for an SGE response queue.
1866  */
1868 {
1869  struct sge_rspq *q = cookie;
1870 
1871  napi_schedule(&q->napi);
1872  return IRQ_HANDLED;
1873 }
1874 
1875 /*
1876  * Process the indirect interrupt entries in the interrupt queue and kick off
1877  * NAPI for each queue that has generated an entry.
1878  */
1879 static unsigned int process_intrq(struct adapter *adap)
1880 {
1881  unsigned int credits;
1882  const struct rsp_ctrl *rc;
1883  struct sge_rspq *q = &adap->sge.intrq;
1884 
1885  spin_lock(&adap->sge.intrq_lock);
1886  for (credits = 0; ; credits++) {
1887  rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1888  if (!is_new_response(rc, q))
1889  break;
1890 
1891  rmb();
1892  if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
1893  unsigned int qid = ntohl(rc->pldbuflen_qid);
1894 
1895  qid -= adap->sge.ingr_start;
1896  napi_schedule(&adap->sge.ingr_map[qid]->napi);
1897  }
1898 
1899  rspq_next(q);
1900  }
1901 
1902  t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
1904  spin_unlock(&adap->sge.intrq_lock);
1905  return credits;
1906 }
1907 
1908 /*
1909  * The MSI interrupt handler, which handles data events from SGE response queues
1910  * as well as error and other async events as they all use the same MSI vector.
1911  */
1912 static irqreturn_t t4_intr_msi(int irq, void *cookie)
1913 {
1914  struct adapter *adap = cookie;
1915 
1916  t4_slow_intr_handler(adap);
1917  process_intrq(adap);
1918  return IRQ_HANDLED;
1919 }
1920 
1921 /*
1922  * Interrupt handler for legacy INTx interrupts.
1923  * Handles data events from SGE response queues as well as error and other
1924  * async events as they all use the same interrupt line.
1925  */
1926 static irqreturn_t t4_intr_intx(int irq, void *cookie)
1927 {
1928  struct adapter *adap = cookie;
1929 
1930  t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
1931  if (t4_slow_intr_handler(adap) | process_intrq(adap))
1932  return IRQ_HANDLED;
1933  return IRQ_NONE; /* probably shared interrupt */
1934 }
1935 
1943 irq_handler_t t4_intr_handler(struct adapter *adap)
1944 {
1945  if (adap->flags & USING_MSIX)
1946  return t4_sge_intr_msix;
1947  if (adap->flags & USING_MSI)
1948  return t4_intr_msi;
1949  return t4_intr_intx;
1950 }
1951 
1952 static void sge_rx_timer_cb(unsigned long data)
1953 {
1954  unsigned long m;
1955  unsigned int i, cnt[2];
1956  struct adapter *adap = (struct adapter *)data;
1957  struct sge *s = &adap->sge;
1958 
1959  for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
1960  for (m = s->starving_fl[i]; m; m &= m - 1) {
1961  struct sge_eth_rxq *rxq;
1962  unsigned int id = __ffs(m) + i * BITS_PER_LONG;
1963  struct sge_fl *fl = s->egr_map[id];
1964 
1965  clear_bit(id, s->starving_fl);
1967 
1968  if (fl_starving(fl)) {
1969  rxq = container_of(fl, struct sge_eth_rxq, fl);
1970  if (napi_reschedule(&rxq->rspq.napi))
1971  fl->starving++;
1972  else
1973  set_bit(id, s->starving_fl);
1974  }
1975  }
1976 
1977  t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
1978  cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
1979  cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
1980 
1981  for (i = 0; i < 2; i++)
1982  if (cnt[i] >= s->starve_thres) {
1983  if (s->idma_state[i] || cnt[i] == 0xffffffff)
1984  continue;
1985  s->idma_state[i] = 1;
1986  t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
1987  m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
1988  dev_warn(adap->pdev_dev,
1989  "SGE idma%u starvation detected for "
1990  "queue %lu\n", i, m & 0xffff);
1991  } else if (s->idma_state[i])
1992  s->idma_state[i] = 0;
1993 
1994  mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
1995 }
1996 
1997 static void sge_tx_timer_cb(unsigned long data)
1998 {
1999  unsigned long m;
2000  unsigned int i, budget;
2001  struct adapter *adap = (struct adapter *)data;
2002  struct sge *s = &adap->sge;
2003 
2004  for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
2005  for (m = s->txq_maperr[i]; m; m &= m - 1) {
2006  unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2007  struct sge_ofld_txq *txq = s->egr_map[id];
2008 
2009  clear_bit(id, s->txq_maperr);
2010  tasklet_schedule(&txq->qresume_tsk);
2011  }
2012 
2013  budget = MAX_TIMER_TX_RECLAIM;
2014  i = s->ethtxq_rover;
2015  do {
2016  struct sge_eth_txq *q = &s->ethtxq[i];
2017 
2018  if (q->q.in_use &&
2019  time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
2020  __netif_tx_trylock(q->txq)) {
2021  int avail = reclaimable(&q->q);
2022 
2023  if (avail) {
2024  if (avail > budget)
2025  avail = budget;
2026 
2027  free_tx_desc(adap, &q->q, avail, true);
2028  q->q.in_use -= avail;
2029  budget -= avail;
2030  }
2031  __netif_tx_unlock(q->txq);
2032  }
2033 
2034  if (++i >= s->ethqsets)
2035  i = 0;
2036  } while (budget && i != s->ethtxq_rover);
2037  s->ethtxq_rover = i;
2038  mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2039 }
2040 
2041 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2042  struct net_device *dev, int intr_idx,
2043  struct sge_fl *fl, rspq_handler_t hnd)
2044 {
2045  int ret, flsz = 0;
2046  struct fw_iq_cmd c;
2047  struct sge *s = &adap->sge;
2048  struct port_info *pi = netdev_priv(dev);
2049 
2050  /* Size needs to be multiple of 16, including status entry. */
2051  iq->size = roundup(iq->size, 16);
2052 
2053  iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
2054  &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
2055  if (!iq->desc)
2056  return -ENOMEM;
2057 
2058  memset(&c, 0, sizeof(c));
2061  FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
2063  FW_LEN16(c));
2065  FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
2066  FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
2067  FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
2068  -intr_idx - 1));
2072  FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
2073  c.iqsize = htons(iq->size);
2074  c.iqaddr = cpu_to_be64(iq->phys_addr);
2075 
2076  if (fl) {
2077  fl->size = roundup(fl->size, 8);
2078  fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2079  sizeof(struct rx_sw_desc), &fl->addr,
2080  &fl->sdesc, s->stat_len, NUMA_NO_NODE);
2081  if (!fl->desc)
2082  goto fl_nomem;
2083 
2084  flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2087  FW_IQ_CMD_FL0DATARO(1) |
2090  FW_IQ_CMD_FL0FBMAX(3));
2091  c.fl0size = htons(flsz);
2092  c.fl0addr = cpu_to_be64(fl->addr);
2093  }
2094 
2095  ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2096  if (ret)
2097  goto err;
2098 
2099  netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2100  iq->cur_desc = iq->desc;
2101  iq->cidx = 0;
2102  iq->gen = 1;
2103  iq->next_intr_params = iq->intr_params;
2104  iq->cntxt_id = ntohs(c.iqid);
2105  iq->abs_id = ntohs(c.physiqid);
2106  iq->size--; /* subtract status entry */
2107  iq->adap = adap;
2108  iq->netdev = dev;
2109  iq->handler = hnd;
2110 
2111  /* set offset to -1 to distinguish ingress queues without FL */
2112  iq->offset = fl ? 0 : -1;
2113 
2114  adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2115 
2116  if (fl) {
2117  fl->cntxt_id = ntohs(c.fl0id);
2118  fl->avail = fl->pend_cred = 0;
2119  fl->pidx = fl->cidx = 0;
2120  fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2121  adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2122  refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2123  }
2124  return 0;
2125 
2126 fl_nomem:
2127  ret = -ENOMEM;
2128 err:
2129  if (iq->desc) {
2130  dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2131  iq->desc, iq->phys_addr);
2132  iq->desc = NULL;
2133  }
2134  if (fl && fl->desc) {
2135  kfree(fl->sdesc);
2136  fl->sdesc = NULL;
2137  dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2138  fl->desc, fl->addr);
2139  fl->desc = NULL;
2140  }
2141  return ret;
2142 }
2143 
2144 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2145 {
2146  q->in_use = 0;
2147  q->cidx = q->pidx = 0;
2148  q->stops = q->restarts = 0;
2149  q->stat = (void *)&q->desc[q->size];
2150  q->cntxt_id = id;
2151  spin_lock_init(&q->db_lock);
2152  adap->sge.egr_map[id - adap->sge.egr_start] = q;
2153 }
2154 
2155 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2156  struct net_device *dev, struct netdev_queue *netdevq,
2157  unsigned int iqid)
2158 {
2159  int ret, nentries;
2160  struct fw_eq_eth_cmd c;
2161  struct sge *s = &adap->sge;
2162  struct port_info *pi = netdev_priv(dev);
2163 
2164  /* Add status entries */
2165  nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2166 
2167  txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2168  sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2169  &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2170  netdev_queue_numa_node_read(netdevq));
2171  if (!txq->q.desc)
2172  return -ENOMEM;
2173 
2174  memset(&c, 0, sizeof(c));
2177  FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
2184  FW_EQ_ETH_CMD_IQID(iqid));
2186  FW_EQ_ETH_CMD_FBMAX(3) |
2188  FW_EQ_ETH_CMD_EQSIZE(nentries));
2189  c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2190 
2191  ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2192  if (ret) {
2193  kfree(txq->q.sdesc);
2194  txq->q.sdesc = NULL;
2196  nentries * sizeof(struct tx_desc),
2197  txq->q.desc, txq->q.phys_addr);
2198  txq->q.desc = NULL;
2199  return ret;
2200  }
2201 
2202  init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2203  txq->txq = netdevq;
2204  txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2205  txq->mapping_err = 0;
2206  return 0;
2207 }
2208 
2209 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2210  struct net_device *dev, unsigned int iqid,
2211  unsigned int cmplqid)
2212 {
2213  int ret, nentries;
2214  struct fw_eq_ctrl_cmd c;
2215  struct sge *s = &adap->sge;
2216  struct port_info *pi = netdev_priv(dev);
2217 
2218  /* Add status entries */
2219  nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2220 
2221  txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2222  sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2223  NULL, 0, NUMA_NO_NODE);
2224  if (!txq->q.desc)
2225  return -ENOMEM;
2226 
2229  FW_EQ_CTRL_CMD_PFN(adap->fn) |
2230  FW_EQ_CTRL_CMD_VFN(0));
2234  c.physeqid_pkd = htonl(0);
2238  FW_EQ_CTRL_CMD_IQID(iqid));
2242  FW_EQ_CTRL_CMD_EQSIZE(nentries));
2243  c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2244 
2245  ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2246  if (ret) {
2248  nentries * sizeof(struct tx_desc),
2249  txq->q.desc, txq->q.phys_addr);
2250  txq->q.desc = NULL;
2251  return ret;
2252  }
2253 
2254  init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
2255  txq->adap = adap;
2256  skb_queue_head_init(&txq->sendq);
2257  tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2258  txq->full = 0;
2259  return 0;
2260 }
2261 
2262 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2263  struct net_device *dev, unsigned int iqid)
2264 {
2265  int ret, nentries;
2266  struct fw_eq_ofld_cmd c;
2267  struct sge *s = &adap->sge;
2268  struct port_info *pi = netdev_priv(dev);
2269 
2270  /* Add status entries */
2271  nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2272 
2273  txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2274  sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2275  &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2276  NUMA_NO_NODE);
2277  if (!txq->q.desc)
2278  return -ENOMEM;
2279 
2280  memset(&c, 0, sizeof(c));
2283  FW_EQ_OFLD_CMD_PFN(adap->fn) |
2284  FW_EQ_OFLD_CMD_VFN(0));
2290  FW_EQ_OFLD_CMD_IQID(iqid));
2294  FW_EQ_OFLD_CMD_EQSIZE(nentries));
2295  c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2296 
2297  ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2298  if (ret) {
2299  kfree(txq->q.sdesc);
2300  txq->q.sdesc = NULL;
2302  nentries * sizeof(struct tx_desc),
2303  txq->q.desc, txq->q.phys_addr);
2304  txq->q.desc = NULL;
2305  return ret;
2306  }
2307 
2308  init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2309  txq->adap = adap;
2310  skb_queue_head_init(&txq->sendq);
2311  tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2312  txq->full = 0;
2313  txq->mapping_err = 0;
2314  return 0;
2315 }
2316 
2317 static void free_txq(struct adapter *adap, struct sge_txq *q)
2318 {
2319  struct sge *s = &adap->sge;
2320 
2322  q->size * sizeof(struct tx_desc) + s->stat_len,
2323  q->desc, q->phys_addr);
2324  q->cntxt_id = 0;
2325  q->sdesc = NULL;
2326  q->desc = NULL;
2327 }
2328 
2329 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2330  struct sge_fl *fl)
2331 {
2332  struct sge *s = &adap->sge;
2333  unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2334 
2335  adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2336  t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2337  rq->cntxt_id, fl_id, 0xffff);
2338  dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2339  rq->desc, rq->phys_addr);
2340  netif_napi_del(&rq->napi);
2341  rq->netdev = NULL;
2342  rq->cntxt_id = rq->abs_id = 0;
2343  rq->desc = NULL;
2344 
2345  if (fl) {
2346  free_rx_bufs(adap, fl, fl->avail);
2347  dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
2348  fl->desc, fl->addr);
2349  kfree(fl->sdesc);
2350  fl->sdesc = NULL;
2351  fl->cntxt_id = 0;
2352  fl->desc = NULL;
2353  }
2354 }
2355 
2362 void t4_free_sge_resources(struct adapter *adap)
2363 {
2364  int i;
2365  struct sge_eth_rxq *eq = adap->sge.ethrxq;
2366  struct sge_eth_txq *etq = adap->sge.ethtxq;
2367  struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
2368 
2369  /* clean up Ethernet Tx/Rx queues */
2370  for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2371  if (eq->rspq.desc)
2372  free_rspq_fl(adap, &eq->rspq, &eq->fl);
2373  if (etq->q.desc) {
2374  t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2375  etq->q.cntxt_id);
2376  free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2377  kfree(etq->q.sdesc);
2378  free_txq(adap, &etq->q);
2379  }
2380  }
2381 
2382  /* clean up RDMA and iSCSI Rx queues */
2383  for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
2384  if (oq->rspq.desc)
2385  free_rspq_fl(adap, &oq->rspq, &oq->fl);
2386  }
2387  for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
2388  if (oq->rspq.desc)
2389  free_rspq_fl(adap, &oq->rspq, &oq->fl);
2390  }
2391 
2392  /* clean up offload Tx queues */
2393  for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2394  struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2395 
2396  if (q->q.desc) {
2398  t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2399  q->q.cntxt_id);
2400  free_tx_desc(adap, &q->q, q->q.in_use, false);
2401  kfree(q->q.sdesc);
2402  __skb_queue_purge(&q->sendq);
2403  free_txq(adap, &q->q);
2404  }
2405  }
2406 
2407  /* clean up control Tx queues */
2408  for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2409  struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2410 
2411  if (cq->q.desc) {
2412  tasklet_kill(&cq->qresume_tsk);
2413  t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2414  cq->q.cntxt_id);
2415  __skb_queue_purge(&cq->sendq);
2416  free_txq(adap, &cq->q);
2417  }
2418  }
2419 
2420  if (adap->sge.fw_evtq.desc)
2421  free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2422 
2423  if (adap->sge.intrq.desc)
2424  free_rspq_fl(adap, &adap->sge.intrq, NULL);
2425 
2426  /* clear the reverse egress queue map */
2427  memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
2428 }
2429 
2430 void t4_sge_start(struct adapter *adap)
2431 {
2432  adap->sge.ethtxq_rover = 0;
2433  mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2434  mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2435 }
2436 
2445 void t4_sge_stop(struct adapter *adap)
2446 {
2447  int i;
2448  struct sge *s = &adap->sge;
2449 
2450  if (in_interrupt()) /* actions below require waiting */
2451  return;
2452 
2453  if (s->rx_timer.function)
2454  del_timer_sync(&s->rx_timer);
2455  if (s->tx_timer.function)
2456  del_timer_sync(&s->tx_timer);
2457 
2458  for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2459  struct sge_ofld_txq *q = &s->ofldtxq[i];
2460 
2461  if (q->q.desc)
2463  }
2464  for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2465  struct sge_ctrl_txq *cq = &s->ctrlq[i];
2466 
2467  if (cq->q.desc)
2468  tasklet_kill(&cq->qresume_tsk);
2469  }
2470 }
2471 
2493 static int t4_sge_init_soft(struct adapter *adap)
2494 {
2495  struct sge *s = &adap->sge;
2496  u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2497  u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2498  u32 ingress_rx_threshold;
2499 
2500  /*
2501  * Verify that CPL messages are going to the Ingress Queue for
2502  * process_responses() and that only packet data is going to the
2503  * Free Lists.
2504  */
2505  if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
2507  dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
2508  return -EINVAL;
2509  }
2510 
2511  /*
2512  * Validate the Host Buffer Register Array indices that we want to
2513  * use ...
2514  *
2515  * XXX Note that we should really read through the Host Buffer Size
2516  * XXX register array and find the indices of the Buffer Sizes which
2517  * XXX meet our needs!
2518  */
2519  #define READ_FL_BUF(x) \
2520  t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
2521 
2522  fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2523  fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2524  fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2525  fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2526 
2527  #undef READ_FL_BUF
2528 
2529  if (fl_small_pg != PAGE_SIZE ||
2530  (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
2531  (fl_large_pg & (fl_large_pg-1)) != 0))) {
2532  dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2533  fl_small_pg, fl_large_pg);
2534  return -EINVAL;
2535  }
2536  if (fl_large_pg)
2537  s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2538 
2539  if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
2540  fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2541  dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
2542  fl_small_mtu, fl_large_mtu);
2543  return -EINVAL;
2544  }
2545 
2546  /*
2547  * Retrieve our RX interrupt holdoff timer values and counter
2548  * threshold values from the SGE parameters.
2549  */
2550  timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
2551  timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
2552  timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
2553  s->timer_val[0] = core_ticks_to_us(adap,
2554  TIMERVALUE0_GET(timer_value_0_and_1));
2555  s->timer_val[1] = core_ticks_to_us(adap,
2556  TIMERVALUE1_GET(timer_value_0_and_1));
2557  s->timer_val[2] = core_ticks_to_us(adap,
2558  TIMERVALUE2_GET(timer_value_2_and_3));
2559  s->timer_val[3] = core_ticks_to_us(adap,
2560  TIMERVALUE3_GET(timer_value_2_and_3));
2561  s->timer_val[4] = core_ticks_to_us(adap,
2562  TIMERVALUE4_GET(timer_value_4_and_5));
2563  s->timer_val[5] = core_ticks_to_us(adap,
2564  TIMERVALUE5_GET(timer_value_4_and_5));
2565 
2566  ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
2567  s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
2568  s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
2569  s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
2570  s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
2571 
2572  return 0;
2573 }
2574 
2575 static int t4_sge_init_hard(struct adapter *adap)
2576 {
2577  struct sge *s = &adap->sge;
2578 
2579  /*
2580  * Set up our basic SGE mode to deliver CPL messages to our Ingress
2581  * Queue and Packet Date to the Free List.
2582  */
2583  t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
2584  RXPKTCPLMODE_MASK);
2585 
2586  /*
2587  * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
2588  * and generate an interrupt when this occurs so we can recover.
2589  */
2596  F_ENABLE_DROP);
2597 
2598  /*
2599  * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
2600  * t4_fixup_host_params().
2601  */
2602  s->fl_pg_order = FL_PG_ORDER;
2603  if (s->fl_pg_order)
2604  t4_write_reg(adap,
2606  PAGE_SIZE << FL_PG_ORDER);
2607  t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
2608  FL_MTU_SMALL_BUFSIZE(adap));
2609  t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
2610  FL_MTU_LARGE_BUFSIZE(adap));
2611 
2612  /*
2613  * Note that the SGE Ingress Packet Count Interrupt Threshold and
2614  * Timer Holdoff values must be supplied by our caller.
2615  */
2616  t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2617  THRESHOLD_0(s->counter_val[0]) |
2618  THRESHOLD_1(s->counter_val[1]) |
2619  THRESHOLD_2(s->counter_val[2]) |
2620  THRESHOLD_3(s->counter_val[3]));
2621  t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
2622  TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2623  TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2624  t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2625  TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
2626  TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
2627  t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2628  TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
2629  TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
2630 
2631  return 0;
2632 }
2633 
2634 int t4_sge_init(struct adapter *adap)
2635 {
2636  struct sge *s = &adap->sge;
2637  u32 sge_control;
2638  int ret;
2639 
2640  /*
2641  * Ingress Padding Boundary and Egress Status Page Size are set up by
2642  * t4_fixup_host_params().
2643  */
2644  sge_control = t4_read_reg(adap, SGE_CONTROL);
2645  s->pktshift = PKTSHIFT_GET(sge_control);
2646  s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
2647  s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
2649 
2650  if (adap->flags & USING_SOFT_PARAMS)
2651  ret = t4_sge_init_soft(adap);
2652  else
2653  ret = t4_sge_init_hard(adap);
2654  if (ret < 0)
2655  return ret;
2656 
2657  /*
2658  * A FL with <= fl_starve_thres buffers is starving and a periodic
2659  * timer will attempt to refill it. This needs to be larger than the
2660  * SGE's Egress Congestion Threshold. If it isn't, then we can get
2661  * stuck waiting for new packets while the SGE is waiting for us to
2662  * give it more Free List entries. (Note that the SGE's Egress
2663  * Congestion Threshold is in units of 2 Free List pointers.)
2664  */
2665  s->fl_starve_thres
2666  = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
2667 
2668  setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2669  setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2670  s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
2671  s->idma_state[0] = s->idma_state[1] = 0;
2673 
2674  return 0;
2675 }