Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bnx2x_stats.c
Go to the documentation of this file.
1 /* bnx2x_stats.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2012 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <[email protected]>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include "bnx2x_stats.h"
21 #include "bnx2x_cmn.h"
22 
23 
24 /* Statistics */
25 
26 /*
27  * General service functions
28  */
29 
30 static inline long bnx2x_hilo(u32 *hiref)
31 {
32  u32 lo = *(hiref + 1);
33 #if (BITS_PER_LONG == 64)
34  u32 hi = *hiref;
35 
36  return HILO_U64(hi, lo);
37 #else
38  return lo;
39 #endif
40 }
41 
42 static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
43 {
44  u16 res = 0;
45 
46  /* 'newest' convention - shmem2 cotains the size of the port stats */
47  if (SHMEM2_HAS(bp, sizeof_port_stats)) {
48  u32 size = SHMEM2_RD(bp, sizeof_port_stats);
49  if (size)
50  res = size;
51 
52  /* prevent newer BC from causing buffer overflow */
53  if (res > sizeof(struct host_port_stats))
54  res = sizeof(struct host_port_stats);
55  }
56 
57  /* Older convention - all BCs support the port stats' fields up until
58  * the 'not_used' field
59  */
60  if (!res) {
61  res = offsetof(struct host_port_stats, not_used) + 4;
62 
63  /* if PFC stats are supported by the MFW, DMA them as well */
64  if (bp->flags & BC_SUPPORTS_PFC_STATS) {
65  res += offsetof(struct host_port_stats,
68  pfc_frames_tx_hi) + 4 ;
69  }
70  }
71 
72  res >>= 2;
73 
74  WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
75  return res;
76 }
77 
78 /*
79  * Init service functions
80  */
81 
82 /* Post the next statistics ramrod. Protect it with the spin in
83  * order to ensure the strict order between statistics ramrods
84  * (each ramrod has a sequence number passed in a
85  * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
86  * sent in order).
87  */
88 static void bnx2x_storm_stats_post(struct bnx2x *bp)
89 {
90  if (!bp->stats_pending) {
91  int rc;
92 
93  spin_lock_bh(&bp->stats_lock);
94 
95  if (bp->stats_pending) {
96  spin_unlock_bh(&bp->stats_lock);
97  return;
98  }
99 
100  bp->fw_stats_req->hdr.drv_stats_counter =
101  cpu_to_le16(bp->stats_counter++);
102 
103  DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
104  bp->fw_stats_req->hdr.drv_stats_counter);
105 
106 
107 
108  /* send FW stats ramrod */
113  if (rc == 0)
114  bp->stats_pending = 1;
115 
116  spin_unlock_bh(&bp->stats_lock);
117  }
118 }
119 
120 static void bnx2x_hw_stats_post(struct bnx2x *bp)
121 {
122  struct dmae_command *dmae = &bp->stats_dmae;
123  u32 *stats_comp = bnx2x_sp(bp, stats_comp);
124 
125  *stats_comp = DMAE_COMP_VAL;
126  if (CHIP_REV_IS_SLOW(bp))
127  return;
128 
129  /* Update MCP's statistics if possible */
130  if (bp->func_stx)
131  memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
132  sizeof(bp->func_stats));
133 
134  /* loader */
135  if (bp->executer_idx) {
136  int loader_idx = PMF_DMAE_C(bp);
138  true, DMAE_COMP_GRC);
139  opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
140 
141  memset(dmae, 0, sizeof(struct dmae_command));
142  dmae->opcode = opcode;
143  dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
144  dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
145  dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
146  sizeof(struct dmae_command) *
147  (loader_idx + 1)) >> 2;
148  dmae->dst_addr_hi = 0;
149  dmae->len = sizeof(struct dmae_command) >> 2;
150  if (CHIP_IS_E1(bp))
151  dmae->len--;
152  dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
153  dmae->comp_addr_hi = 0;
154  dmae->comp_val = 1;
155 
156  *stats_comp = 0;
157  bnx2x_post_dmae(bp, dmae, loader_idx);
158 
159  } else if (bp->func_stx) {
160  *stats_comp = 0;
161  bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
162  }
163 }
164 
165 static int bnx2x_stats_comp(struct bnx2x *bp)
166 {
167  u32 *stats_comp = bnx2x_sp(bp, stats_comp);
168  int cnt = 10;
169 
170  might_sleep();
171  while (*stats_comp != DMAE_COMP_VAL) {
172  if (!cnt) {
173  BNX2X_ERR("timeout waiting for stats finished\n");
174  break;
175  }
176  cnt--;
177  usleep_range(1000, 1000);
178  }
179  return 1;
180 }
181 
182 /*
183  * Statistics service functions
184  */
185 
186 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
187 {
188  struct dmae_command *dmae;
189  u32 opcode;
190  int loader_idx = PMF_DMAE_C(bp);
191  u32 *stats_comp = bnx2x_sp(bp, stats_comp);
192 
193  /* sanity */
194  if (!bp->port.pmf || !bp->port.port_stx) {
195  BNX2X_ERR("BUG!\n");
196  return;
197  }
198 
199  bp->executer_idx = 0;
200 
201  opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
202 
203  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
205  dmae->src_addr_lo = bp->port.port_stx >> 2;
206  dmae->src_addr_hi = 0;
209  dmae->len = DMAE_LEN32_RD_MAX;
210  dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
211  dmae->comp_addr_hi = 0;
212  dmae->comp_val = 1;
213 
214  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
216  dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
217  dmae->src_addr_hi = 0;
219  DMAE_LEN32_RD_MAX * 4);
221  DMAE_LEN32_RD_MAX * 4);
222  dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
223 
224  dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
225  dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
226  dmae->comp_val = DMAE_COMP_VAL;
227 
228  *stats_comp = 0;
229  bnx2x_hw_stats_post(bp);
230  bnx2x_stats_comp(bp);
231 }
232 
233 static void bnx2x_port_stats_init(struct bnx2x *bp)
234 {
235  struct dmae_command *dmae;
236  int port = BP_PORT(bp);
237  u32 opcode;
238  int loader_idx = PMF_DMAE_C(bp);
239  u32 mac_addr;
240  u32 *stats_comp = bnx2x_sp(bp, stats_comp);
241 
242  /* sanity */
243  if (!bp->link_vars.link_up || !bp->port.pmf) {
244  BNX2X_ERR("BUG!\n");
245  return;
246  }
247 
248  bp->executer_idx = 0;
249 
250  /* MCP */
252  true, DMAE_COMP_GRC);
253 
254  if (bp->port.port_stx) {
255 
256  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
257  dmae->opcode = opcode;
260  dmae->dst_addr_lo = bp->port.port_stx >> 2;
261  dmae->dst_addr_hi = 0;
262  dmae->len = bnx2x_get_port_stats_dma_len(bp);
263  dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
264  dmae->comp_addr_hi = 0;
265  dmae->comp_val = 1;
266  }
267 
268  if (bp->func_stx) {
269 
270  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
271  dmae->opcode = opcode;
272  dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
273  dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
274  dmae->dst_addr_lo = bp->func_stx >> 2;
275  dmae->dst_addr_hi = 0;
276  dmae->len = sizeof(struct host_func_stats) >> 2;
277  dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
278  dmae->comp_addr_hi = 0;
279  dmae->comp_val = 1;
280  }
281 
282  /* MAC */
284  true, DMAE_COMP_GRC);
285 
286  /* EMAC is special */
287  if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
288  mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
289 
290  /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
291  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
292  dmae->opcode = opcode;
293  dmae->src_addr_lo = (mac_addr +
295  dmae->src_addr_hi = 0;
298  dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
299  dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
300  dmae->comp_addr_hi = 0;
301  dmae->comp_val = 1;
302 
303  /* EMAC_REG_EMAC_RX_STAT_AC_28 */
304  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
305  dmae->opcode = opcode;
306  dmae->src_addr_lo = (mac_addr +
308  dmae->src_addr_hi = 0;
310  offsetof(struct emac_stats, rx_stat_falsecarriererrors));
312  offsetof(struct emac_stats, rx_stat_falsecarriererrors));
313  dmae->len = 1;
314  dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
315  dmae->comp_addr_hi = 0;
316  dmae->comp_val = 1;
317 
318  /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
319  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
320  dmae->opcode = opcode;
321  dmae->src_addr_lo = (mac_addr +
323  dmae->src_addr_hi = 0;
325  offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
327  offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
328  dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
329  dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
330  dmae->comp_addr_hi = 0;
331  dmae->comp_val = 1;
332  } else {
333  u32 tx_src_addr_lo, rx_src_addr_lo;
334  u16 rx_len, tx_len;
335 
336  /* configure the params according to MAC type */
337  switch (bp->link_vars.mac_type) {
338  case MAC_TYPE_BMAC:
339  mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
341 
342  /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
343  BIGMAC_REGISTER_TX_STAT_GTBYT */
344  if (CHIP_IS_E1x(bp)) {
345  tx_src_addr_lo = (mac_addr +
347  tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
349  rx_src_addr_lo = (mac_addr +
351  rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
353  } else {
354  tx_src_addr_lo = (mac_addr +
356  tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
358  rx_src_addr_lo = (mac_addr +
360  rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
362  }
363  break;
364 
365  case MAC_TYPE_UMAC: /* handled by MSTAT */
366  case MAC_TYPE_XMAC: /* handled by MSTAT */
367  default:
368  mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
369  tx_src_addr_lo = (mac_addr +
371  rx_src_addr_lo = (mac_addr +
373  tx_len = sizeof(bp->slowpath->
374  mac_stats.mstat_stats.stats_tx) >> 2;
375  rx_len = sizeof(bp->slowpath->
376  mac_stats.mstat_stats.stats_rx) >> 2;
377  break;
378  }
379 
380  /* TX stats */
381  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
382  dmae->opcode = opcode;
383  dmae->src_addr_lo = tx_src_addr_lo;
384  dmae->src_addr_hi = 0;
385  dmae->len = tx_len;
388  dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
389  dmae->comp_addr_hi = 0;
390  dmae->comp_val = 1;
391 
392  /* RX stats */
393  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
394  dmae->opcode = opcode;
395  dmae->src_addr_hi = 0;
396  dmae->src_addr_lo = rx_src_addr_lo;
397  dmae->dst_addr_lo =
398  U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
399  dmae->dst_addr_hi =
400  U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
401  dmae->len = rx_len;
402  dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
403  dmae->comp_addr_hi = 0;
404  dmae->comp_val = 1;
405  }
406 
407  /* NIG */
408  if (!CHIP_IS_E3(bp)) {
409  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
410  dmae->opcode = opcode;
413  dmae->src_addr_hi = 0;
415  offsetof(struct nig_stats, egress_mac_pkt0_lo));
417  offsetof(struct nig_stats, egress_mac_pkt0_lo));
418  dmae->len = (2*sizeof(u32)) >> 2;
419  dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
420  dmae->comp_addr_hi = 0;
421  dmae->comp_val = 1;
422 
423  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
424  dmae->opcode = opcode;
427  dmae->src_addr_hi = 0;
429  offsetof(struct nig_stats, egress_mac_pkt1_lo));
431  offsetof(struct nig_stats, egress_mac_pkt1_lo));
432  dmae->len = (2*sizeof(u32)) >> 2;
433  dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
434  dmae->comp_addr_hi = 0;
435  dmae->comp_val = 1;
436  }
437 
438  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
440  true, DMAE_COMP_PCI);
441  dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
443  dmae->src_addr_hi = 0;
446  dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
447 
448  dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
449  dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
450  dmae->comp_val = DMAE_COMP_VAL;
451 
452  *stats_comp = 0;
453 }
454 
455 static void bnx2x_func_stats_init(struct bnx2x *bp)
456 {
457  struct dmae_command *dmae = &bp->stats_dmae;
458  u32 *stats_comp = bnx2x_sp(bp, stats_comp);
459 
460  /* sanity */
461  if (!bp->func_stx) {
462  BNX2X_ERR("BUG!\n");
463  return;
464  }
465 
466  bp->executer_idx = 0;
467  memset(dmae, 0, sizeof(struct dmae_command));
468 
470  true, DMAE_COMP_PCI);
471  dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
472  dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
473  dmae->dst_addr_lo = bp->func_stx >> 2;
474  dmae->dst_addr_hi = 0;
475  dmae->len = sizeof(struct host_func_stats) >> 2;
476  dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
477  dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
478  dmae->comp_val = DMAE_COMP_VAL;
479 
480  *stats_comp = 0;
481 }
482 
483 static void bnx2x_stats_start(struct bnx2x *bp)
484 {
485  if (bp->port.pmf)
486  bnx2x_port_stats_init(bp);
487 
488  else if (bp->func_stx)
489  bnx2x_func_stats_init(bp);
490 
491  bnx2x_hw_stats_post(bp);
492  bnx2x_storm_stats_post(bp);
493 }
494 
495 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
496 {
497  bnx2x_stats_comp(bp);
498  bnx2x_stats_pmf_update(bp);
499  bnx2x_stats_start(bp);
500 }
501 
502 static void bnx2x_stats_restart(struct bnx2x *bp)
503 {
504  bnx2x_stats_comp(bp);
505  bnx2x_stats_start(bp);
506 }
507 
508 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
509 {
510  struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
511  struct bnx2x_eth_stats *estats = &bp->eth_stats;
512  struct {
513  u32 lo;
514  u32 hi;
515  } diff;
516 
517  if (CHIP_IS_E1x(bp)) {
518  struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
519 
520  /* the macros below will use "bmac1_stats" type */
521  UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
522  UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
523  UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
524  UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
525  UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
526  UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
527  UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
528  UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
529  UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
530 
531  UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
532  UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
533  UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
534  UPDATE_STAT64(tx_stat_gt127,
535  tx_stat_etherstatspkts65octetsto127octets);
536  UPDATE_STAT64(tx_stat_gt255,
537  tx_stat_etherstatspkts128octetsto255octets);
538  UPDATE_STAT64(tx_stat_gt511,
539  tx_stat_etherstatspkts256octetsto511octets);
540  UPDATE_STAT64(tx_stat_gt1023,
541  tx_stat_etherstatspkts512octetsto1023octets);
542  UPDATE_STAT64(tx_stat_gt1518,
543  tx_stat_etherstatspkts1024octetsto1522octets);
544  UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
545  UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
546  UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
547  UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
548  UPDATE_STAT64(tx_stat_gterr,
549  tx_stat_dot3statsinternalmactransmiterrors);
550  UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
551 
552  } else {
553  struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
554 
555  /* the macros below will use "bmac2_stats" type */
556  UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
557  UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
558  UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
559  UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
560  UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
561  UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
562  UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
563  UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
564  UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
565  UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
566  UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
567  UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
568  UPDATE_STAT64(tx_stat_gt127,
569  tx_stat_etherstatspkts65octetsto127octets);
570  UPDATE_STAT64(tx_stat_gt255,
571  tx_stat_etherstatspkts128octetsto255octets);
572  UPDATE_STAT64(tx_stat_gt511,
573  tx_stat_etherstatspkts256octetsto511octets);
574  UPDATE_STAT64(tx_stat_gt1023,
575  tx_stat_etherstatspkts512octetsto1023octets);
576  UPDATE_STAT64(tx_stat_gt1518,
577  tx_stat_etherstatspkts1024octetsto1522octets);
578  UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
579  UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
580  UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
581  UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
582  UPDATE_STAT64(tx_stat_gterr,
583  tx_stat_dot3statsinternalmactransmiterrors);
584  UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
585 
586  /* collect PFC stats */
587  pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
588  pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
589 
590  pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
591  pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
592  }
593 
594  estats->pause_frames_received_hi =
595  pstats->mac_stx[1].rx_stat_mac_xpf_hi;
596  estats->pause_frames_received_lo =
597  pstats->mac_stx[1].rx_stat_mac_xpf_lo;
598 
599  estats->pause_frames_sent_hi =
600  pstats->mac_stx[1].tx_stat_outxoffsent_hi;
601  estats->pause_frames_sent_lo =
602  pstats->mac_stx[1].tx_stat_outxoffsent_lo;
603 
604  estats->pfc_frames_received_hi =
605  pstats->pfc_frames_rx_hi;
606  estats->pfc_frames_received_lo =
607  pstats->pfc_frames_rx_lo;
608  estats->pfc_frames_sent_hi =
609  pstats->pfc_frames_tx_hi;
610  estats->pfc_frames_sent_lo =
611  pstats->pfc_frames_tx_lo;
612 }
613 
614 static void bnx2x_mstat_stats_update(struct bnx2x *bp)
615 {
616  struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
617  struct bnx2x_eth_stats *estats = &bp->eth_stats;
618 
619  struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
620 
621  ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
622  ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
623  ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
624  ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
625  ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
626  ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
627  ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
628  ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
629  ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
630  ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
631 
632  /* collect pfc stats */
633  ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
634  pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
635  ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
636  pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
637 
638  ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
639  ADD_STAT64(stats_tx.tx_gt127,
640  tx_stat_etherstatspkts65octetsto127octets);
641  ADD_STAT64(stats_tx.tx_gt255,
642  tx_stat_etherstatspkts128octetsto255octets);
643  ADD_STAT64(stats_tx.tx_gt511,
644  tx_stat_etherstatspkts256octetsto511octets);
645  ADD_STAT64(stats_tx.tx_gt1023,
646  tx_stat_etherstatspkts512octetsto1023octets);
647  ADD_STAT64(stats_tx.tx_gt1518,
648  tx_stat_etherstatspkts1024octetsto1522octets);
649  ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
650 
651  ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
652  ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
653  ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
654 
655  ADD_STAT64(stats_tx.tx_gterr,
656  tx_stat_dot3statsinternalmactransmiterrors);
657  ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
658 
660  pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
662  pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
663 
665  pstats->mac_stx[1].tx_stat_mac_2047_hi;
667  pstats->mac_stx[1].tx_stat_mac_2047_lo;
668 
670  pstats->mac_stx[1].tx_stat_mac_4095_hi,
672  pstats->mac_stx[1].tx_stat_mac_4095_lo);
673 
675  pstats->mac_stx[1].tx_stat_mac_9216_hi,
677  pstats->mac_stx[1].tx_stat_mac_9216_lo);
678 
680  pstats->mac_stx[1].tx_stat_mac_16383_hi,
682  pstats->mac_stx[1].tx_stat_mac_16383_lo);
683 
684  estats->pause_frames_received_hi =
685  pstats->mac_stx[1].rx_stat_mac_xpf_hi;
686  estats->pause_frames_received_lo =
687  pstats->mac_stx[1].rx_stat_mac_xpf_lo;
688 
689  estats->pause_frames_sent_hi =
690  pstats->mac_stx[1].tx_stat_outxoffsent_hi;
691  estats->pause_frames_sent_lo =
692  pstats->mac_stx[1].tx_stat_outxoffsent_lo;
693 
694  estats->pfc_frames_received_hi =
695  pstats->pfc_frames_rx_hi;
696  estats->pfc_frames_received_lo =
697  pstats->pfc_frames_rx_lo;
698  estats->pfc_frames_sent_hi =
699  pstats->pfc_frames_tx_hi;
700  estats->pfc_frames_sent_lo =
701  pstats->pfc_frames_tx_lo;
702 }
703 
704 static void bnx2x_emac_stats_update(struct bnx2x *bp)
705 {
706  struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
707  struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
708  struct bnx2x_eth_stats *estats = &bp->eth_stats;
709 
710  UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
711  UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
712  UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
713  UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
714  UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
715  UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
716  UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
717  UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
718  UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
719  UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
720  UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
721  UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
722  UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
723  UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
724  UPDATE_EXTEND_STAT(tx_stat_outxonsent);
725  UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
726  UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
727  UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
728  UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
729  UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
730  UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
731  UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
732  UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
733  UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
734  UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
735  UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
736  UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
737  UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
738  UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
739  UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
740  UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
741 
742  estats->pause_frames_received_hi =
743  pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
744  estats->pause_frames_received_lo =
745  pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
747  pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
748  estats->pause_frames_received_lo,
749  pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
750 
751  estats->pause_frames_sent_hi =
752  pstats->mac_stx[1].tx_stat_outxonsent_hi;
753  estats->pause_frames_sent_lo =
754  pstats->mac_stx[1].tx_stat_outxonsent_lo;
756  pstats->mac_stx[1].tx_stat_outxoffsent_hi,
757  estats->pause_frames_sent_lo,
758  pstats->mac_stx[1].tx_stat_outxoffsent_lo);
759 }
760 
761 static int bnx2x_hw_stats_update(struct bnx2x *bp)
762 {
763  struct nig_stats *new = bnx2x_sp(bp, nig_stats);
764  struct nig_stats *old = &(bp->port.old_nig_stats);
765  struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
766  struct bnx2x_eth_stats *estats = &bp->eth_stats;
767  struct {
768  u32 lo;
769  u32 hi;
770  } diff;
771 
772  switch (bp->link_vars.mac_type) {
773  case MAC_TYPE_BMAC:
774  bnx2x_bmac_stats_update(bp);
775  break;
776 
777  case MAC_TYPE_EMAC:
778  bnx2x_emac_stats_update(bp);
779  break;
780 
781  case MAC_TYPE_UMAC:
782  case MAC_TYPE_XMAC:
783  bnx2x_mstat_stats_update(bp);
784  break;
785 
786  case MAC_TYPE_NONE: /* unreached */
788  "stats updated by DMAE but no MAC active\n");
789  return -1;
790 
791  default: /* unreached */
792  BNX2X_ERR("Unknown MAC type\n");
793  }
794 
795  ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
796  new->brb_discard - old->brb_discard);
798  new->brb_truncate - old->brb_truncate);
799 
800  if (!CHIP_IS_E3(bp)) {
801  UPDATE_STAT64_NIG(egress_mac_pkt0,
802  etherstatspkts1024octetsto1522octets);
803  UPDATE_STAT64_NIG(egress_mac_pkt1,
804  etherstatspktsover1522octets);
805  }
806 
807  memcpy(old, new, sizeof(struct nig_stats));
808 
809  memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
810  sizeof(struct mac_stx));
811  estats->brb_drop_hi = pstats->brb_drop_hi;
812  estats->brb_drop_lo = pstats->brb_drop_lo;
813 
814  pstats->host_port_stats_counter++;
815 
816  if (CHIP_IS_E3(bp)) {
819  estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
820  }
821 
822  if (!BP_NOMCP(bp)) {
823  u32 nig_timer_max =
824  SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
825  if (nig_timer_max != estats->nig_timer_max) {
826  estats->nig_timer_max = nig_timer_max;
827  BNX2X_ERR("NIG timer max (%u)\n",
828  estats->nig_timer_max);
829  }
830  }
831 
832  return 0;
833 }
834 
835 static int bnx2x_storm_stats_update(struct bnx2x *bp)
836 {
837  struct tstorm_per_port_stats *tport =
838  &bp->fw_stats_data->port.tstorm_port_statistics;
839  struct tstorm_per_pf_stats *tfunc =
840  &bp->fw_stats_data->pf.tstorm_pf_statistics;
841  struct host_func_stats *fstats = &bp->func_stats;
842  struct bnx2x_eth_stats *estats = &bp->eth_stats;
843  struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
844  struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
845  int i;
846  u16 cur_stats_counter;
847 
848  /* Make sure we use the value of the counter
849  * used for sending the last stats ramrod.
850  */
851  spin_lock_bh(&bp->stats_lock);
852  cur_stats_counter = bp->stats_counter - 1;
853  spin_unlock_bh(&bp->stats_lock);
854 
855  /* are storm stats valid? */
856  if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
858  "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
859  le16_to_cpu(counters->xstats_counter), bp->stats_counter);
860  return -EAGAIN;
861  }
862 
863  if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
865  "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
866  le16_to_cpu(counters->ustats_counter), bp->stats_counter);
867  return -EAGAIN;
868  }
869 
870  if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
872  "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
873  le16_to_cpu(counters->cstats_counter), bp->stats_counter);
874  return -EAGAIN;
875  }
876 
877  if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
879  "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
880  le16_to_cpu(counters->tstats_counter), bp->stats_counter);
881  return -EAGAIN;
882  }
883 
884  estats->error_bytes_received_hi = 0;
885  estats->error_bytes_received_lo = 0;
886 
887  for_each_eth_queue(bp, i) {
888  struct bnx2x_fastpath *fp = &bp->fp[i];
889  struct tstorm_per_queue_stats *tclient =
890  &bp->fw_stats_data->queue_stats[i].
891  tstorm_queue_statistics;
892  struct tstorm_per_queue_stats *old_tclient =
893  &bnx2x_fp_stats(bp, fp)->old_tclient;
894  struct ustorm_per_queue_stats *uclient =
895  &bp->fw_stats_data->queue_stats[i].
896  ustorm_queue_statistics;
897  struct ustorm_per_queue_stats *old_uclient =
898  &bnx2x_fp_stats(bp, fp)->old_uclient;
899  struct xstorm_per_queue_stats *xclient =
900  &bp->fw_stats_data->queue_stats[i].
901  xstorm_queue_statistics;
902  struct xstorm_per_queue_stats *old_xclient =
903  &bnx2x_fp_stats(bp, fp)->old_xclient;
904  struct bnx2x_eth_q_stats *qstats =
905  &bnx2x_fp_stats(bp, fp)->eth_q_stats;
906  struct bnx2x_eth_q_stats_old *qstats_old =
907  &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
908 
909  u32 diff;
910 
911  DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
912  i, xclient->ucast_pkts_sent,
913  xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
914 
915  DP(BNX2X_MSG_STATS, "---------------\n");
916 
917  UPDATE_QSTAT(tclient->rcv_bcast_bytes,
918  total_broadcast_bytes_received);
919  UPDATE_QSTAT(tclient->rcv_mcast_bytes,
920  total_multicast_bytes_received);
921  UPDATE_QSTAT(tclient->rcv_ucast_bytes,
922  total_unicast_bytes_received);
923 
924  /*
925  * sum to total_bytes_received all
926  * unicast/multicast/broadcast
927  */
928  qstats->total_bytes_received_hi =
930  qstats->total_bytes_received_lo =
932 
935  qstats->total_bytes_received_lo,
937 
940  qstats->total_bytes_received_lo,
942 
943  qstats->valid_bytes_received_hi =
944  qstats->total_bytes_received_hi;
945  qstats->valid_bytes_received_lo =
946  qstats->total_bytes_received_lo;
947 
948 
949  UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
950  total_unicast_packets_received);
951  UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
952  total_multicast_packets_received);
953  UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
954  total_broadcast_packets_received);
955  UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
956  etherstatsoverrsizepkts);
957  UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
958 
959  SUB_EXTEND_USTAT(ucast_no_buff_pkts,
960  total_unicast_packets_received);
961  SUB_EXTEND_USTAT(mcast_no_buff_pkts,
962  total_multicast_packets_received);
963  SUB_EXTEND_USTAT(bcast_no_buff_pkts,
964  total_broadcast_packets_received);
965  UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
966  UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
967  UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
968 
970  total_broadcast_bytes_transmitted);
972  total_multicast_bytes_transmitted);
974  total_unicast_bytes_transmitted);
975 
976  /*
977  * sum to total_bytes_transmitted all
978  * unicast/multicast/broadcast
979  */
984 
989 
994 
995  UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
996  total_unicast_packets_transmitted);
997  UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
998  total_multicast_packets_transmitted);
999  UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1000  total_broadcast_packets_transmitted);
1001 
1002  UPDATE_EXTEND_TSTAT(checksum_discard,
1003  total_packets_received_checksum_discarded);
1004  UPDATE_EXTEND_TSTAT(ttl0_discard,
1005  total_packets_received_ttl0_discarded);
1006 
1007  UPDATE_EXTEND_XSTAT(error_drop_pkts,
1008  total_transmitted_dropped_packets_error);
1009 
1010  /* TPA aggregations completed */
1011  UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
1012  /* Number of network frames aggregated by TPA */
1013  UPDATE_EXTEND_E_USTAT(coalesced_pkts,
1014  total_tpa_aggregated_frames);
1015  /* Total number of bytes in completed TPA aggregations */
1016  UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
1017 
1018  UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
1019 
1020  UPDATE_FSTAT_QSTAT(total_bytes_received);
1021  UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1022  UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1023  UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1024  UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1025  UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1026  UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1027  UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1028  UPDATE_FSTAT_QSTAT(valid_bytes_received);
1029  }
1030 
1033  estats->total_bytes_received_lo,
1034  estats->rx_stat_ifhcinbadoctets_lo);
1035 
1037  le32_to_cpu(tfunc->rcv_error_bytes.hi),
1038  estats->total_bytes_received_lo,
1039  le32_to_cpu(tfunc->rcv_error_bytes.lo));
1040 
1042  le32_to_cpu(tfunc->rcv_error_bytes.hi),
1043  estats->error_bytes_received_lo,
1044  le32_to_cpu(tfunc->rcv_error_bytes.lo));
1045 
1046  UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1047 
1050  estats->error_bytes_received_lo,
1051  estats->rx_stat_ifhcinbadoctets_lo);
1052 
1053  if (bp->port.pmf) {
1054  struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1059  }
1060 
1061  fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1062 
1063  bp->stats_pending = 0;
1064 
1065  return 0;
1066 }
1067 
1068 static void bnx2x_net_stats_update(struct bnx2x *bp)
1069 {
1070  struct bnx2x_eth_stats *estats = &bp->eth_stats;
1071  struct net_device_stats *nstats = &bp->dev->stats;
1072  unsigned long tmp;
1073  int i;
1074 
1075  nstats->rx_packets =
1076  bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1077  bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1078  bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1079 
1080  nstats->tx_packets =
1081  bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1082  bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1083  bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1084 
1085  nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1086 
1087  nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1088 
1089  tmp = estats->mac_discard;
1090  for_each_rx_queue(bp, i) {
1091  struct tstorm_per_queue_stats *old_tclient =
1092  &bp->fp_stats[i].old_tclient;
1093  tmp += le32_to_cpu(old_tclient->checksum_discard);
1094  }
1095  nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
1096 
1097  nstats->tx_dropped = 0;
1098 
1099  nstats->multicast =
1100  bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1101 
1102  nstats->collisions =
1103  bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1104 
1105  nstats->rx_length_errors =
1106  bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1107  bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1108  nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1109  bnx2x_hilo(&estats->brb_truncate_hi);
1110  nstats->rx_crc_errors =
1111  bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1112  nstats->rx_frame_errors =
1113  bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1114  nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
1115  nstats->rx_missed_errors = 0;
1116 
1117  nstats->rx_errors = nstats->rx_length_errors +
1118  nstats->rx_over_errors +
1119  nstats->rx_crc_errors +
1120  nstats->rx_frame_errors +
1121  nstats->rx_fifo_errors +
1122  nstats->rx_missed_errors;
1123 
1124  nstats->tx_aborted_errors =
1125  bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1126  bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1127  nstats->tx_carrier_errors =
1128  bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1129  nstats->tx_fifo_errors = 0;
1130  nstats->tx_heartbeat_errors = 0;
1131  nstats->tx_window_errors = 0;
1132 
1133  nstats->tx_errors = nstats->tx_aborted_errors +
1134  nstats->tx_carrier_errors +
1135  bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1136 }
1137 
1138 static void bnx2x_drv_stats_update(struct bnx2x *bp)
1139 {
1140  struct bnx2x_eth_stats *estats = &bp->eth_stats;
1141  int i;
1142 
1143  for_each_queue(bp, i) {
1144  struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1145  struct bnx2x_eth_q_stats_old *qstats_old =
1146  &bp->fp_stats[i].eth_q_stats_old;
1147 
1148  UPDATE_ESTAT_QSTAT(driver_xoff);
1149  UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
1150  UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
1151  UPDATE_ESTAT_QSTAT(hw_csum_err);
1152  }
1153 }
1154 
1155 static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1156 {
1157  u32 val;
1158 
1159  if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1160  val = SHMEM2_RD(bp, edebug_driver_if[1]);
1161 
1163  return true;
1164  }
1165 
1166  return false;
1167 }
1168 
1169 static void bnx2x_stats_update(struct bnx2x *bp)
1170 {
1171  u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1172 
1173  if (bnx2x_edebug_stats_stopped(bp))
1174  return;
1175 
1176  if (*stats_comp != DMAE_COMP_VAL)
1177  return;
1178 
1179  if (bp->port.pmf)
1180  bnx2x_hw_stats_update(bp);
1181 
1182  if (bnx2x_storm_stats_update(bp)) {
1183  if (bp->stats_pending++ == 3) {
1184  BNX2X_ERR("storm stats were not updated for 3 times\n");
1185  bnx2x_panic();
1186  }
1187  return;
1188  }
1189 
1190  bnx2x_net_stats_update(bp);
1191  bnx2x_drv_stats_update(bp);
1192 
1193  if (netif_msg_timer(bp)) {
1194  struct bnx2x_eth_stats *estats = &bp->eth_stats;
1195 
1196  netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
1197  estats->brb_drop_lo, estats->brb_truncate_lo);
1198  }
1199 
1200  bnx2x_hw_stats_post(bp);
1201  bnx2x_storm_stats_post(bp);
1202 }
1203 
1204 static void bnx2x_port_stats_stop(struct bnx2x *bp)
1205 {
1206  struct dmae_command *dmae;
1207  u32 opcode;
1208  int loader_idx = PMF_DMAE_C(bp);
1209  u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1210 
1211  bp->executer_idx = 0;
1212 
1213  opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
1214 
1215  if (bp->port.port_stx) {
1216 
1217  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1218  if (bp->func_stx)
1220  opcode, DMAE_COMP_GRC);
1221  else
1223  opcode, DMAE_COMP_PCI);
1224 
1227  dmae->dst_addr_lo = bp->port.port_stx >> 2;
1228  dmae->dst_addr_hi = 0;
1229  dmae->len = bnx2x_get_port_stats_dma_len(bp);
1230  if (bp->func_stx) {
1231  dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1232  dmae->comp_addr_hi = 0;
1233  dmae->comp_val = 1;
1234  } else {
1235  dmae->comp_addr_lo =
1236  U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1237  dmae->comp_addr_hi =
1238  U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1239  dmae->comp_val = DMAE_COMP_VAL;
1240 
1241  *stats_comp = 0;
1242  }
1243  }
1244 
1245  if (bp->func_stx) {
1246 
1247  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1248  dmae->opcode =
1250  dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1251  dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1252  dmae->dst_addr_lo = bp->func_stx >> 2;
1253  dmae->dst_addr_hi = 0;
1254  dmae->len = sizeof(struct host_func_stats) >> 2;
1255  dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1256  dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1257  dmae->comp_val = DMAE_COMP_VAL;
1258 
1259  *stats_comp = 0;
1260  }
1261 }
1262 
1263 static void bnx2x_stats_stop(struct bnx2x *bp)
1264 {
1265  int update = 0;
1266 
1267  bnx2x_stats_comp(bp);
1268 
1269  if (bp->port.pmf)
1270  update = (bnx2x_hw_stats_update(bp) == 0);
1271 
1272  update |= (bnx2x_storm_stats_update(bp) == 0);
1273 
1274  if (update) {
1275  bnx2x_net_stats_update(bp);
1276 
1277  if (bp->port.pmf)
1278  bnx2x_port_stats_stop(bp);
1279 
1280  bnx2x_hw_stats_post(bp);
1281  bnx2x_stats_comp(bp);
1282  }
1283 }
1284 
1285 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1286 {
1287 }
1288 
1289 static const struct {
1290  void (*action)(struct bnx2x *bp);
1292 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1293 /* state event */
1294 {
1295 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1296 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
1297 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1298 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1299 },
1300 {
1301 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
1302 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
1303 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
1304 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
1305 }
1306 };
1307 
1309 {
1310  enum bnx2x_stats_state state;
1311  if (unlikely(bp->panic))
1312  return;
1313 
1314  spin_lock_bh(&bp->stats_lock);
1315  state = bp->stats_state;
1316  bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1317  spin_unlock_bh(&bp->stats_lock);
1318 
1319  bnx2x_stats_stm[state][event].action(bp);
1320 
1321  if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1322  DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1323  state, event, bp->stats_state);
1324 }
1325 
1326 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1327 {
1328  struct dmae_command *dmae;
1329  u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1330 
1331  /* sanity */
1332  if (!bp->port.pmf || !bp->port.port_stx) {
1333  BNX2X_ERR("BUG!\n");
1334  return;
1335  }
1336 
1337  bp->executer_idx = 0;
1338 
1339  dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1341  true, DMAE_COMP_PCI);
1344  dmae->dst_addr_lo = bp->port.port_stx >> 2;
1345  dmae->dst_addr_hi = 0;
1346  dmae->len = bnx2x_get_port_stats_dma_len(bp);
1347  dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1348  dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1349  dmae->comp_val = DMAE_COMP_VAL;
1350 
1351  *stats_comp = 0;
1352  bnx2x_hw_stats_post(bp);
1353  bnx2x_stats_comp(bp);
1354 }
1355 
1356 /* This function will prepare the statistics ramrod data the way
1357  * we will only have to increment the statistics counter and
1358  * send the ramrod each time we have to.
1359  */
1360 static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1361 {
1362  int i;
1363  int first_queue_query_index;
1364  struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1365 
1366  dma_addr_t cur_data_offset;
1367  struct stats_query_entry *cur_query_entry;
1368 
1369  stats_hdr->cmd_num = bp->fw_stats_num;
1370  stats_hdr->drv_stats_counter = 0;
1371 
1372  /* storm_counters struct contains the counters of completed
1373  * statistics requests per storm which are incremented by FW
1374  * each time it completes hadning a statistics ramrod. We will
1375  * check these counters in the timer handler and discard a
1376  * (statistics) ramrod completion.
1377  */
1378  cur_data_offset = bp->fw_stats_data_mapping +
1379  offsetof(struct bnx2x_fw_stats_data, storm_counters);
1380 
1381  stats_hdr->stats_counters_addrs.hi =
1382  cpu_to_le32(U64_HI(cur_data_offset));
1383  stats_hdr->stats_counters_addrs.lo =
1384  cpu_to_le32(U64_LO(cur_data_offset));
1385 
1386  /* prepare to the first stats ramrod (will be completed with
1387  * the counters equal to zero) - init counters to somethig different.
1388  */
1389  memset(&bp->fw_stats_data->storm_counters, 0xff,
1390  sizeof(struct stats_counter));
1391 
1392  /**** Port FW statistics data ****/
1393  cur_data_offset = bp->fw_stats_data_mapping +
1394  offsetof(struct bnx2x_fw_stats_data, port);
1395 
1396  cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1397 
1398  cur_query_entry->kind = STATS_TYPE_PORT;
1399  /* For port query index is a DONT CARE */
1400  cur_query_entry->index = BP_PORT(bp);
1401  /* For port query funcID is a DONT CARE */
1402  cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1403  cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1404  cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1405 
1406  /**** PF FW statistics data ****/
1407  cur_data_offset = bp->fw_stats_data_mapping +
1408  offsetof(struct bnx2x_fw_stats_data, pf);
1409 
1410  cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1411 
1412  cur_query_entry->kind = STATS_TYPE_PF;
1413  /* For PF query index is a DONT CARE */
1414  cur_query_entry->index = BP_PORT(bp);
1415  cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1416  cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1417  cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1418 
1419  /**** FCoE FW statistics data ****/
1420  if (!NO_FCOE(bp)) {
1421  cur_data_offset = bp->fw_stats_data_mapping +
1422  offsetof(struct bnx2x_fw_stats_data, fcoe);
1423 
1424  cur_query_entry =
1425  &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
1426 
1427  cur_query_entry->kind = STATS_TYPE_FCOE;
1428  /* For FCoE query index is a DONT CARE */
1429  cur_query_entry->index = BP_PORT(bp);
1430  cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1431  cur_query_entry->address.hi =
1432  cpu_to_le32(U64_HI(cur_data_offset));
1433  cur_query_entry->address.lo =
1434  cpu_to_le32(U64_LO(cur_data_offset));
1435  }
1436 
1437  /**** Clients' queries ****/
1438  cur_data_offset = bp->fw_stats_data_mapping +
1439  offsetof(struct bnx2x_fw_stats_data, queue_stats);
1440 
1441  /* first queue query index depends whether FCoE offloaded request will
1442  * be included in the ramrod
1443  */
1444  if (!NO_FCOE(bp))
1445  first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
1446  else
1447  first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
1448 
1449  for_each_eth_queue(bp, i) {
1450  cur_query_entry =
1451  &bp->fw_stats_req->
1452  query[first_queue_query_index + i];
1453 
1454  cur_query_entry->kind = STATS_TYPE_QUEUE;
1455  cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1456  cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1457  cur_query_entry->address.hi =
1458  cpu_to_le32(U64_HI(cur_data_offset));
1459  cur_query_entry->address.lo =
1460  cpu_to_le32(U64_LO(cur_data_offset));
1461 
1462  cur_data_offset += sizeof(struct per_queue_stats);
1463  }
1464 
1465  /* add FCoE queue query if needed */
1466  if (!NO_FCOE(bp)) {
1467  cur_query_entry =
1468  &bp->fw_stats_req->
1469  query[first_queue_query_index + i];
1470 
1471  cur_query_entry->kind = STATS_TYPE_QUEUE;
1472  cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
1473  cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1474  cur_query_entry->address.hi =
1475  cpu_to_le32(U64_HI(cur_data_offset));
1476  cur_query_entry->address.lo =
1477  cpu_to_le32(U64_LO(cur_data_offset));
1478  }
1479 }
1480 
1481 void bnx2x_stats_init(struct bnx2x *bp)
1482 {
1483  int /*abs*/port = BP_PORT(bp);
1484  int mb_idx = BP_FW_MB_IDX(bp);
1485  int i;
1486 
1487  bp->stats_pending = 0;
1488  bp->executer_idx = 0;
1489  bp->stats_counter = 0;
1490 
1491  /* port and func stats for management */
1492  if (!BP_NOMCP(bp)) {
1493  bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1494  bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1495 
1496  } else {
1497  bp->port.port_stx = 0;
1498  bp->func_stx = 0;
1499  }
1500  DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1501  bp->port.port_stx, bp->func_stx);
1502 
1503  /* pmf should retrieve port statistics from SP on a non-init*/
1504  if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
1506 
1507  port = BP_PORT(bp);
1508  /* port stats */
1509  memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1510  bp->port.old_nig_stats.brb_discard =
1511  REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1512  bp->port.old_nig_stats.brb_truncate =
1513  REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1514  if (!CHIP_IS_E3(bp)) {
1516  &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1518  &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1519  }
1520 
1521  /* function stats */
1522  for_each_queue(bp, i) {
1523  struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1524 
1525  memset(&fp_stats->old_tclient, 0,
1526  sizeof(fp_stats->old_tclient));
1527  memset(&fp_stats->old_uclient, 0,
1528  sizeof(fp_stats->old_uclient));
1529  memset(&fp_stats->old_xclient, 0,
1530  sizeof(fp_stats->old_xclient));
1531  if (bp->stats_init) {
1532  memset(&fp_stats->eth_q_stats, 0,
1533  sizeof(fp_stats->eth_q_stats));
1534  memset(&fp_stats->eth_q_stats_old, 0,
1535  sizeof(fp_stats->eth_q_stats_old));
1536  }
1537  }
1538 
1539  /* Prepare statistics ramrod data */
1540  bnx2x_prep_fw_stats_req(bp);
1541 
1542  memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1543  if (bp->stats_init) {
1544  memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1545  memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1546  memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1547  memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1548  memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1549 
1550  /* Clean SP from previous statistics */
1551  if (bp->func_stx) {
1552  memset(bnx2x_sp(bp, func_stats), 0,
1553  sizeof(struct host_func_stats));
1554  bnx2x_func_stats_init(bp);
1555  bnx2x_hw_stats_post(bp);
1556  bnx2x_stats_comp(bp);
1557  }
1558  }
1559 
1561 
1562  if (bp->port.pmf && bp->port.port_stx)
1563  bnx2x_port_stats_base_init(bp);
1564 
1565  /* mark the end of statistics initializiation */
1566  bp->stats_init = false;
1567 }
1568 
1570 {
1571  int i;
1572  struct net_device_stats *nstats = &bp->dev->stats;
1573 
1574  /* save queue statistics */
1575  for_each_eth_queue(bp, i) {
1576  struct bnx2x_fastpath *fp = &bp->fp[i];
1577  struct bnx2x_eth_q_stats *qstats =
1578  &bnx2x_fp_stats(bp, fp)->eth_q_stats;
1579  struct bnx2x_eth_q_stats_old *qstats_old =
1580  &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
1581 
1596  }
1597 
1598  /* save net_device_stats statistics */
1599  bp->net_stats_old.rx_dropped = nstats->rx_dropped;
1600 
1601  /* store port firmware statistics */
1602  if (bp->port.pmf && IS_MF(bp)) {
1603  struct bnx2x_eth_stats *estats = &bp->eth_stats;
1604  struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1609  }
1610 }
1611 
1612 void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1613  u32 stats_type)
1614 {
1615  int i;
1616  struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1617  struct bnx2x_eth_stats *estats = &bp->eth_stats;
1618  struct per_queue_stats *fcoe_q_stats =
1619  &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
1620 
1621  struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1622  &fcoe_q_stats->tstorm_queue_statistics;
1623 
1624  struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1625  &fcoe_q_stats->ustorm_queue_statistics;
1626 
1627  struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1628  &fcoe_q_stats->xstorm_queue_statistics;
1629 
1630  struct fcoe_statistics_params *fw_fcoe_stat =
1631  &bp->fw_stats_data->fcoe;
1632 
1633  memset(afex_stats, 0, sizeof(struct afex_stats));
1634 
1635  for_each_eth_queue(bp, i) {
1636  struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1637 
1638  ADD_64(afex_stats->rx_unicast_bytes_hi,
1640  afex_stats->rx_unicast_bytes_lo,
1642 
1643  ADD_64(afex_stats->rx_broadcast_bytes_hi,
1645  afex_stats->rx_broadcast_bytes_lo,
1647 
1648  ADD_64(afex_stats->rx_multicast_bytes_hi,
1650  afex_stats->rx_multicast_bytes_lo,
1652 
1653  ADD_64(afex_stats->rx_unicast_frames_hi,
1655  afex_stats->rx_unicast_frames_lo,
1657 
1658  ADD_64(afex_stats->rx_broadcast_frames_hi,
1660  afex_stats->rx_broadcast_frames_lo,
1662 
1663  ADD_64(afex_stats->rx_multicast_frames_hi,
1665  afex_stats->rx_multicast_frames_lo,
1667 
1668  /* sum to rx_frames_discarded all discraded
1669  * packets due to size, ttl0 and checksum
1670  */
1671  ADD_64(afex_stats->rx_frames_discarded_hi,
1673  afex_stats->rx_frames_discarded_lo,
1675 
1676  ADD_64(afex_stats->rx_frames_discarded_hi,
1678  afex_stats->rx_frames_discarded_lo,
1680 
1681  ADD_64(afex_stats->rx_frames_discarded_hi,
1683  afex_stats->rx_frames_discarded_lo,
1684  qstats->etherstatsoverrsizepkts_lo);
1685 
1686  ADD_64(afex_stats->rx_frames_dropped_hi,
1687  qstats->no_buff_discard_hi,
1688  afex_stats->rx_frames_dropped_lo,
1689  qstats->no_buff_discard_lo);
1690 
1691  ADD_64(afex_stats->tx_unicast_bytes_hi,
1693  afex_stats->tx_unicast_bytes_lo,
1695 
1696  ADD_64(afex_stats->tx_broadcast_bytes_hi,
1698  afex_stats->tx_broadcast_bytes_lo,
1700 
1701  ADD_64(afex_stats->tx_multicast_bytes_hi,
1703  afex_stats->tx_multicast_bytes_lo,
1705 
1706  ADD_64(afex_stats->tx_unicast_frames_hi,
1708  afex_stats->tx_unicast_frames_lo,
1710 
1711  ADD_64(afex_stats->tx_broadcast_frames_hi,
1713  afex_stats->tx_broadcast_frames_lo,
1715 
1716  ADD_64(afex_stats->tx_multicast_frames_hi,
1718  afex_stats->tx_multicast_frames_lo,
1720 
1721  ADD_64(afex_stats->tx_frames_dropped_hi,
1723  afex_stats->tx_frames_dropped_lo,
1725  }
1726 
1727  /* now add FCoE statistics which are collected separately
1728  * (both offloaded and non offloaded)
1729  */
1730  if (!NO_FCOE(bp)) {
1731  ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1732  LE32_0,
1733  afex_stats->rx_unicast_bytes_lo,
1734  fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1735 
1736  ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1737  fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1738  afex_stats->rx_unicast_bytes_lo,
1739  fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1740 
1741  ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1742  fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1743  afex_stats->rx_broadcast_bytes_lo,
1744  fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1745 
1746  ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1747  fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1748  afex_stats->rx_multicast_bytes_lo,
1749  fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1750 
1751  ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1752  LE32_0,
1753  afex_stats->rx_unicast_frames_lo,
1754  fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1755 
1756  ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1757  LE32_0,
1758  afex_stats->rx_unicast_frames_lo,
1759  fcoe_q_tstorm_stats->rcv_ucast_pkts);
1760 
1761  ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1762  LE32_0,
1763  afex_stats->rx_broadcast_frames_lo,
1764  fcoe_q_tstorm_stats->rcv_bcast_pkts);
1765 
1766  ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1767  LE32_0,
1768  afex_stats->rx_multicast_frames_lo,
1769  fcoe_q_tstorm_stats->rcv_ucast_pkts);
1770 
1771  ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1772  LE32_0,
1773  afex_stats->rx_frames_discarded_lo,
1774  fcoe_q_tstorm_stats->checksum_discard);
1775 
1776  ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1777  LE32_0,
1778  afex_stats->rx_frames_discarded_lo,
1779  fcoe_q_tstorm_stats->pkts_too_big_discard);
1780 
1781  ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1782  LE32_0,
1783  afex_stats->rx_frames_discarded_lo,
1784  fcoe_q_tstorm_stats->ttl0_discard);
1785 
1786  ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1787  LE16_0,
1788  afex_stats->rx_frames_dropped_lo,
1789  fcoe_q_tstorm_stats->no_buff_discard);
1790 
1791  ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1792  LE32_0,
1793  afex_stats->rx_frames_dropped_lo,
1794  fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1795 
1796  ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1797  LE32_0,
1798  afex_stats->rx_frames_dropped_lo,
1799  fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1800 
1801  ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1802  LE32_0,
1803  afex_stats->rx_frames_dropped_lo,
1804  fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1805 
1806  ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1807  LE32_0,
1808  afex_stats->rx_frames_dropped_lo,
1809  fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1810 
1811  ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1812  LE32_0,
1813  afex_stats->rx_frames_dropped_lo,
1814  fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1815 
1816  ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1817  LE32_0,
1818  afex_stats->tx_unicast_bytes_lo,
1819  fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1820 
1821  ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1822  fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1823  afex_stats->tx_unicast_bytes_lo,
1824  fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1825 
1826  ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1827  fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1828  afex_stats->tx_broadcast_bytes_lo,
1829  fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1830 
1831  ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1832  fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1833  afex_stats->tx_multicast_bytes_lo,
1834  fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1835 
1836  ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1837  LE32_0,
1838  afex_stats->tx_unicast_frames_lo,
1839  fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1840 
1841  ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1842  LE32_0,
1843  afex_stats->tx_unicast_frames_lo,
1844  fcoe_q_xstorm_stats->ucast_pkts_sent);
1845 
1846  ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1847  LE32_0,
1848  afex_stats->tx_broadcast_frames_lo,
1849  fcoe_q_xstorm_stats->bcast_pkts_sent);
1850 
1851  ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1852  LE32_0,
1853  afex_stats->tx_multicast_frames_lo,
1854  fcoe_q_xstorm_stats->mcast_pkts_sent);
1855 
1856  ADD_64_LE(afex_stats->tx_frames_dropped_hi,
1857  LE32_0,
1858  afex_stats->tx_frames_dropped_lo,
1859  fcoe_q_xstorm_stats->error_drop_pkts);
1860  }
1861 
1862  /* if port stats are requested, add them to the PMF
1863  * stats, as anyway they will be accumulated by the
1864  * MCP before sent to the switch
1865  */
1866  if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1867  ADD_64(afex_stats->rx_frames_dropped_hi,
1868  0,
1869  afex_stats->rx_frames_dropped_lo,
1870  estats->mac_filter_discard);
1871  ADD_64(afex_stats->rx_frames_dropped_hi,
1872  0,
1873  afex_stats->rx_frames_dropped_lo,
1874  estats->brb_truncate_discard);
1875  ADD_64(afex_stats->rx_frames_discarded_hi,
1876  0,
1877  afex_stats->rx_frames_discarded_lo,
1878  estats->mac_discard);
1879  }
1880 }