37 #include <linux/netdevice.h>
39 #include <linux/if_vlan.h>
44 #include <linux/prefetch.h>
49 #include "../cxgb4/t4_regs.h"
50 #include "../cxgb4/t4fw_api.h"
51 #include "../cxgb4/t4_msg.h"
231 static inline int need_skb_unmap(
void)
233 #ifdef CONFIG_NEED_DMA_MAP_STATE
246 static inline unsigned int txq_avail(
const struct sge_txq *tq)
260 static inline unsigned int fl_cap(
const struct sge_fl *
fl)
273 static inline bool fl_starving(
const struct sge_fl *
fl)
296 si = skb_shinfo(skb);
298 for (fp = si->
frags; fp < end; fp++) {
299 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
307 while (fp-- > si->
frags)
315 static void unmap_sgl(
struct device *dev,
const struct sk_buff *skb,
319 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
321 if (
likely(skb_headlen(skb)))
334 for (p = sgl->
sge; nfrags >= 2; nfrags -= 2) {
342 }
else if ((
u8 *)p == (
u8 *)tq->
stat) {
345 }
else if ((
u8 *)p + 8 == (
u8 *)tq->
stat) {
368 addr = ((
u8 *)p + 16 <= (
u8 *)tq->
stat
387 unsigned int n,
bool unmap)
390 unsigned int cidx = tq->
cidx;
393 const int need_unmap = need_skb_unmap() &&
unmap;
395 sdesc = &tq->
sdesc[cidx];
403 unmap_sgl(dev, sdesc->
skb, sdesc->
sgl, tq);
409 if (++cidx == tq->
size) {
420 static inline int reclaimable(
const struct sge_txq *tq)
423 int reclaimable = hw_cidx - tq->
cidx;
425 reclaimable += tq->
size;
439 static inline void reclaim_completed_tx(
struct adapter *adapter,
443 int avail = reclaimable(tq);
453 free_tx_desc(adapter, tq, avail, unmap);
462 static inline int get_buf_size(
const struct rx_sw_desc *sdesc)
479 static void free_rx_bufs(
struct adapter *adapter,
struct sge_fl *
fl,
int n)
484 if (is_buf_mapped(sdesc))
508 static void unmap_rx_buf(
struct adapter *adapter,
struct sge_fl *fl)
512 if (is_buf_mapped(sdesc))
529 static inline void ring_fl_db(
struct adapter *adapter,
struct sge_fl *fl)
562 #define POISON_BUF_VAL -1
564 static inline void poison_buf(
struct page *
page,
size_t sz)
566 #if POISON_BUF_VAL >= 0
585 static unsigned int refill_fl(
struct adapter *adapter,
struct sge_fl *fl,
608 goto alloc_small_pages;
642 set_rx_sw_desc(sdesc, page, dma_addr);
671 set_rx_sw_desc(sdesc, page, dma_addr);
688 cred = fl->
avail - cred;
690 ring_fl_db(adapter, fl);
704 static inline void __refill_fl(
struct adapter *adapter,
struct sge_fl *fl)
706 refill_fl(adapter, fl,
730 static void *alloc_ring(
struct device *dev,
size_t nelem,
size_t hwsize,
731 size_t swsize,
dma_addr_t *busaddrp,
void *swringp,
737 size_t hwlen = nelem * hwsize + stat_size;
749 void *swring = kcalloc(nelem, swsize,
GFP_KERNEL);
755 *(
void **)swringp = swring;
773 static inline unsigned int sgl_len(
unsigned int n)
793 return (3 * n) / 2 + (n & 1) + 2;
803 static inline unsigned int flits_to_desc(
unsigned int flits)
816 static inline int is_eth_imm(
const struct sk_buff *skb)
835 static inline unsigned int calc_tx_flits(
const struct sk_buff *skb)
857 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
858 if (skb_shinfo(skb)->gso_size)
885 static void write_sgl(
const struct sk_buff *skb,
struct sge_txq *tq,
895 len = skb_headlen(skb) -
start;
907 if (
likely(--nfrags == 0))
916 for (i = (nfrags != si->
nr_frags); nfrags >= 2; nfrags -= 2, to++) {
948 static inline void ring_tx_db(
struct adapter *adapter,
struct sge_txq *tq,
972 static void inline_tx_skb(
const struct sk_buff *skb,
const struct sge_txq *tq,
980 skb_copy_from_linear_data(skb, pos, skb->
len);
987 pos = (
void *)tq->
desc + (skb->
len - left);
1003 const struct iphdr *iph = ip_hdr(skb);
1005 if (iph->version == 4) {
1037 int start = skb_transport_offset(skb);
1050 netif_tx_stop_queue(txq->
txq);
1057 static inline void txq_advance(
struct sge_txq *tq,
unsigned int n)
1077 unsigned int flits,
ndesc;
1085 const size_t fw_hdr_copy_len = (
sizeof(wr->
ethmacdst) +
1102 pi = netdev_priv(dev);
1104 qidx = skb_get_queue_mapping(skb);
1112 reclaim_completed_tx(adapter, &txq->
q,
true);
1119 flits = calc_tx_flits(skb);
1120 ndesc = flits_to_desc(flits);
1121 credits = txq_avail(&txq->
q) -
ndesc;
1132 "%s: TX ring %u full while queue awake!\n",
1137 if (!is_eth_imm(skb) &&
1170 wr = (
void *)&txq->
q.desc[txq->
q.pidx];
1174 skb_copy_from_linear_data(skb, (
void *)wr->
ethmacdst, fw_hdr_copy_len);
1175 end = (
u64 *)wr + flits;
1182 ssi = skb_shinfo(skb);
1186 int l3hdr_len = skb_network_header_len(skb);
1187 int eth_xtra_len = skb_network_offset(skb) -
ETH_HLEN;
1213 cpl = (
void *)(lso + 1);
1222 len = is_eth_imm(skb) ? skb->
len +
sizeof(*cpl) :
sizeof(*cpl);
1231 cpl = (
void *)(wr + 1);
1259 T4_TRACE5(adapter->tb[txq->
q.cntxt_id & 7],
1260 "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1268 if (is_eth_imm(skb)) {
1273 inline_tx_skb(skb, &txq->
q, cpl + 1);
1325 sgl = (
void *)tq->
desc;
1326 end = ((
void *)tq->
desc + ((
void *)end - (
void *)tq->
stat));
1329 write_sgl(skb, tq, sgl, end, 0, addr);
1332 last_desc = tq->
pidx + ndesc - 1;
1333 if (last_desc >= tq->
size)
1334 last_desc -= tq->
size;
1343 txq_advance(&txq->
q, ndesc);
1345 ring_tx_db(adapter, &txq->
q, ndesc);
1366 static inline void copy_frags(
struct sk_buff *skb,
1373 __skb_fill_page_desc(skb, 0, gl->
frags[0].page,
1374 gl->
frags[0].offset + offset,
1375 gl->
frags[0].size - offset);
1376 skb_shinfo(skb)->nr_frags = gl->
nfrags;
1377 for (i = 1; i < gl->
nfrags; i++)
1378 __skb_fill_page_desc(skb, i, gl->
frags[i].page,
1379 gl->
frags[i].offset,
1396 unsigned int skb_len,
unsigned int pull_len)
1417 skb_copy_to_linear_data(skb, gl->
va, gl->
tot_len);
1422 __skb_put(skb, pull_len);
1423 skb_copy_to_linear_data(skb, gl->
va, pull_len);
1425 copy_frags(skb, gl, pull_len);
1469 rxq->
stats.rx_drops++;
1478 skb_record_rx_queue(skb, rxq->
rspq.idx);
1485 rxq->
stats.lro_pkts++;
1487 rxq->
stats.lro_merged++;
1489 rxq->
stats.rx_cso++;
1504 const struct cpl_rx_pkt *pkt = (
void *)&rsp[1];
1515 do_gro(rxq, gl, pkt);
1525 rxq->
stats.rx_drops++;
1530 skb_record_rx_queue(skb, rspq->
idx);
1539 skb->
csum = csum_unfold(c);
1542 rxq->
stats.rx_cso++;
1544 skb_checksum_none_assert(skb);
1547 rxq->
stats.vlan_ex++;
1564 static inline bool is_new_response(
const struct rsp_ctrl *
rc,
1590 static void restore_rx_bufs(
const struct pkt_gl *gl,
struct sge_fl *fl,
1613 static inline void rspq_next(
struct sge_rspq *rspq)
1639 int budget_left = budget;
1641 while (
likely(budget_left)) {
1646 if (!is_new_response(rc, rspq))
1684 for (frag = 0, fp = gl.
frags; ; frag++, fp++) {
1687 sdesc = &rxq->
fl.sdesc[rxq->
fl.cidx];
1688 bufsz = get_buf_size(sdesc);
1705 get_buf_addr(sdesc),
1708 gl.
frags[0].offset);
1719 restore_rx_bufs(&gl, &rxq->
fl, frag);
1751 return budget - budget_left;
1767 unsigned int intr_params;
1771 if (
likely(work_done < budget)) {
1797 napi_schedule(&rspq->
napi);
1805 static unsigned int process_intrq(
struct adapter *adapter)
1807 struct sge *
s = &adapter->
sge;
1808 struct sge_rspq *intrq = &s->intrq;
1809 unsigned int work_done;
1811 spin_lock(&adapter->
sge.intrq_lock);
1812 for (work_done = 0; ; work_done++) {
1814 unsigned int qid, iq_idx;
1822 if (!is_new_response(rc, intrq))
1833 "Unexpected INTRQ response type %d\n",
1850 "Ingress QID %d out of range\n", qid);
1856 "Ingress QID %d RSPQ=NULL\n", qid);
1861 "Ingress QID %d refers to RSPQ %d\n",
1871 napi_schedule(&rspq->
napi);
1880 spin_unlock(&adapter->
sge.intrq_lock);
1891 struct adapter *adapter =
cookie;
1893 process_intrq(adapter);
1924 static void sge_rx_timer_cb(
unsigned long data)
1926 struct adapter *adapter = (
struct adapter *)data;
1927 struct sge *s = &adapter->
sge;
1938 for (i = 0; i <
ARRAY_SIZE(s->starving_fl); i++) {
1941 for (m = s->starving_fl[i]; m; m &= m - 1) {
1954 if (fl_starving(fl)) {
1958 if (napi_reschedule(&rxq->
rspq.napi))
1983 static void sge_tx_timer_cb(
unsigned long data)
1985 struct adapter *adapter = (
struct adapter *)data;
1986 struct sge *s = &adapter->
sge;
1987 unsigned int i, budget;
1994 if (reclaimable(&txq->
q) && __netif_tx_trylock(txq->
txq)) {
1995 int avail = reclaimable(&txq->
q);
2000 free_tx_desc(adapter, &txq->
q, avail,
true);
2002 __netif_tx_unlock(txq->
txq);
2038 struct port_info *pi = netdev_priv(dev);
2040 int ret, iqandst, flsz = 0;
2051 intr_dest = adapter->
sge.intrq.abs_id;
2074 memset(&cmd, 0,
sizeof(cmd));
2143 ret = t4vf_wr_mbox(adapter, &cmd,
sizeof(cmd), &rpl);
2160 rspq->
offset = fl ? 0 : -1;
2171 refill_fl(adapter, fl, fl_cap(fl),
GFP_KERNEL);
2186 if (fl && fl->
desc) {
2210 struct port_info *pi = netdev_priv(dev);
2222 txq->
q.desc = alloc_ring(adapter->
pdev_dev, txq->
q.size,
2236 memset(&cmd, 0,
sizeof(cmd));
2260 ret = t4vf_wr_mbox(adapter, &cmd,
sizeof(cmd), &rpl);
2267 txq->
q.sdesc =
NULL;
2269 nentries *
sizeof(
struct tx_desc),
2270 txq->
q.desc, txq->
q.phys_addr);
2278 txq->
q.stat = (
void *)&txq->
q.desc[txq->
q.size];
2287 txq->
q.restarts = 0;
2295 static void free_txq(
struct adapter *adapter,
struct sge_txq *tq)
2309 static void free_rspq_fl(
struct adapter *adapter,
struct sge_rspq *rspq,
2312 unsigned int flid = fl ? fl->
cntxt_id : 0xffff;
2325 free_rx_bufs(adapter, fl, fl->
avail);
2344 struct sge *s = &adapter->
sge;
2348 struct sge_rspq *intrq = &s->intrq;
2351 for (qs = 0; qs < adapter->
sge.ethqsets; qs++, rxq++, txq++) {
2353 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2356 free_tx_desc(adapter, &txq->
q, txq->
q.in_use,
true);
2358 free_txq(adapter, &txq->
q);
2362 free_rspq_fl(adapter, evtq,
NULL);
2364 free_rspq_fl(adapter, intrq,
NULL);
2375 adapter->
sge.ethtxq_rover = 0;
2390 struct sge *s = &adapter->
sge;
2412 struct sge *s = &adapter->
sge;
2419 if (fl0 !=
PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {