Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bnx2x_link.c
Go to the documentation of this file.
1 /* Copyright 2008-2012 Broadcom Corporation
2  *
3  * Unless you and Broadcom execute a separate written software license
4  * agreement governing use of this software, this software is licensed to you
5  * under the terms of the GNU General Public License version 2, available
6  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
7  *
8  * Notwithstanding the above, under no circumstances may you combine this
9  * software in any way with any other Broadcom software provided under a
10  * license other than the GPL, without Broadcom's express prior written
11  * consent.
12  *
13  * Written by Yaniv Rosner
14  *
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/delay.h>
24 #include <linux/ethtool.h>
25 #include <linux/mutex.h>
26 
27 #include "bnx2x.h"
28 #include "bnx2x_cmn.h"
29 
30 /********************************************************/
31 #define ETH_HLEN 14
32 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
33 #define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
34 #define ETH_MIN_PACKET_SIZE 60
35 #define ETH_MAX_PACKET_SIZE 1500
36 #define ETH_MAX_JUMBO_PACKET_SIZE 9600
37 #define MDIO_ACCESS_TIMEOUT 1000
38 #define WC_LANE_MAX 4
39 #define I2C_SWITCH_WIDTH 2
40 #define I2C_BSC0 0
41 #define I2C_BSC1 1
42 #define I2C_WA_RETRY_CNT 3
43 #define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1)
44 #define MCPR_IMC_COMMAND_READ_OP 1
45 #define MCPR_IMC_COMMAND_WRITE_OP 2
46 
47 /* LED Blink rate that will achieve ~15.9Hz */
48 #define LED_BLINK_RATE_VAL_E3 354
49 #define LED_BLINK_RATE_VAL_E1X_E2 480
50 /***********************************************************/
51 /* Shortcut definitions */
52 /***********************************************************/
53 
54 #define NIG_LATCH_BC_ENABLE_MI_INT 0
55 
56 #define NIG_STATUS_EMAC0_MI_INT \
57  NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT
58 #define NIG_STATUS_XGXS0_LINK10G \
59  NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
60 #define NIG_STATUS_XGXS0_LINK_STATUS \
61  NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
62 #define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
63  NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
64 #define NIG_STATUS_SERDES0_LINK_STATUS \
65  NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
66 #define NIG_MASK_MI_INT \
67  NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
68 #define NIG_MASK_XGXS0_LINK10G \
69  NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
70 #define NIG_MASK_XGXS0_LINK_STATUS \
71  NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
72 #define NIG_MASK_SERDES0_LINK_STATUS \
73  NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
74 
75 #define MDIO_AN_CL73_OR_37_COMPLETE \
76  (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
77  MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
78 
79 #define XGXS_RESET_BITS \
80  (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \
81  MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \
82  MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \
83  MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
84  MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
85 
86 #define SERDES_RESET_BITS \
87  (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
88  MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \
89  MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \
90  MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
91 
92 #define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
93 #define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
94 #define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
95 #define AUTONEG_PARALLEL \
96  SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
97 #define AUTONEG_SGMII_FIBER_AUTODET \
98  SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
99 #define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
100 
101 #define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
102  MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
103 #define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
104  MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
105 #define GP_STATUS_SPEED_MASK \
106  MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
107 #define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
108 #define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
109 #define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
110 #define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
111 #define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
112 #define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
113 #define GP_STATUS_10G_HIG \
114  MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
115 #define GP_STATUS_10G_CX4 \
116  MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
117 #define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
118 #define GP_STATUS_10G_KX4 \
119  MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
120 #define GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR
121 #define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
122 #define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
123 #define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
124 #define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
125 #define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
126 #define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
127 #define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
128 #define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
129 #define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
130 #define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
131 #define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
132 #define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
133 #define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
134 #define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
135 #define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
136 #define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
137 #define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
138 #define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
139 
140 #define LINK_UPDATE_MASK \
141  (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \
142  LINK_STATUS_LINK_UP | \
143  LINK_STATUS_PHYSICAL_LINK_FLAG | \
144  LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \
145  LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \
146  LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \
147  LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \
148  LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \
149  LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
150 
151 #define SFP_EEPROM_CON_TYPE_ADDR 0x2
152  #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
153  #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
154 
155 
156 #define SFP_EEPROM_COMP_CODE_ADDR 0x3
157  #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4)
158  #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5)
159  #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6)
160 
161 #define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
162  #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
163  #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
164 
165 #define SFP_EEPROM_OPTIONS_ADDR 0x40
166  #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
167 #define SFP_EEPROM_OPTIONS_SIZE 2
168 
169 #define EDC_MODE_LINEAR 0x0022
170 #define EDC_MODE_LIMITING 0x0044
171 #define EDC_MODE_PASSIVE_DAC 0x0055
172 
173 /* ETS defines*/
174 #define DCBX_INVALID_COS (0xFF)
175 
176 #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
177 #define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000)
178 #define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360)
179 #define ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720)
180 #define ETS_E3B0_PBF_MIN_W_VAL (10000)
181 
182 #define MAX_PACKET_SIZE (9700)
183 #define MAX_KR_LINK_RETRY 4
184 
185 /**********************************************************/
186 /* INTERFACE */
187 /**********************************************************/
188 
189 #define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
190  bnx2x_cl45_write(_bp, _phy, \
191  (_phy)->def_md_devad, \
192  (_bank + (_addr & 0xf)), \
193  _val)
194 
195 #define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
196  bnx2x_cl45_read(_bp, _phy, \
197  (_phy)->def_md_devad, \
198  (_bank + (_addr & 0xf)), \
199  _val)
200 
201 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
202 {
203  u32 val = REG_RD(bp, reg);
204 
205  val |= bits;
206  REG_WR(bp, reg, val);
207  return val;
208 }
209 
210 static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
211 {
212  u32 val = REG_RD(bp, reg);
213 
214  val &= ~bits;
215  REG_WR(bp, reg, val);
216  return val;
217 }
218 
219 /*
220  * bnx2x_check_lfa - This function checks if link reinitialization is required,
221  * or link flap can be avoided.
222  *
223  * @params: link parameters
224  * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed
225  * condition code.
226  */
227 static int bnx2x_check_lfa(struct link_params *params)
228 {
229  u32 link_status, cfg_idx, lfa_mask, cfg_size;
230  u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config;
231  u32 saved_val, req_val, eee_status;
232  struct bnx2x *bp = params->bp;
233 
234  additional_config =
235  REG_RD(bp, params->lfa_base +
236  offsetof(struct shmem_lfa, additional_config));
237 
238  /* NOTE: must be first condition checked -
239  * to verify DCC bit is cleared in any case!
240  */
241  if (additional_config & NO_LFA_DUE_TO_DCC_MASK) {
242  DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n");
243  REG_WR(bp, params->lfa_base +
244  offsetof(struct shmem_lfa, additional_config),
245  additional_config & ~NO_LFA_DUE_TO_DCC_MASK);
246  return LFA_DCC_LFA_DISABLED;
247  }
248 
249  /* Verify that link is up */
250  link_status = REG_RD(bp, params->shmem_base +
251  offsetof(struct shmem_region,
252  port_mb[params->port].link_status));
253  if (!(link_status & LINK_STATUS_LINK_UP))
254  return LFA_LINK_DOWN;
255 
256  /* Verify that loopback mode is not set */
257  if (params->loopback_mode)
258  return LFA_LOOPBACK_ENABLED;
259 
260  /* Verify that MFW supports LFA */
261  if (!params->lfa_base)
262  return LFA_MFW_IS_TOO_OLD;
263 
264  if (params->num_phys == 3) {
265  cfg_size = 2;
266  lfa_mask = 0xffffffff;
267  } else {
268  cfg_size = 1;
269  lfa_mask = 0xffff;
270  }
271 
272  /* Compare Duplex */
273  saved_val = REG_RD(bp, params->lfa_base +
274  offsetof(struct shmem_lfa, req_duplex));
275  req_val = params->req_duplex[0] | (params->req_duplex[1] << 16);
276  if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
277  DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n",
278  (saved_val & lfa_mask), (req_val & lfa_mask));
279  return LFA_DUPLEX_MISMATCH;
280  }
281  /* Compare Flow Control */
282  saved_val = REG_RD(bp, params->lfa_base +
283  offsetof(struct shmem_lfa, req_flow_ctrl));
284  req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16);
285  if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
286  DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n",
287  (saved_val & lfa_mask), (req_val & lfa_mask));
288  return LFA_FLOW_CTRL_MISMATCH;
289  }
290  /* Compare Link Speed */
291  saved_val = REG_RD(bp, params->lfa_base +
292  offsetof(struct shmem_lfa, req_line_speed));
293  req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16);
294  if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
295  DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n",
296  (saved_val & lfa_mask), (req_val & lfa_mask));
298  }
299 
300  for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) {
301  cur_speed_cap_mask = REG_RD(bp, params->lfa_base +
302  offsetof(struct shmem_lfa,
303  speed_cap_mask[cfg_idx]));
304 
305  if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) {
306  DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n",
307  cur_speed_cap_mask,
308  params->speed_cap_mask[cfg_idx]);
309  return LFA_SPEED_CAP_MISMATCH;
310  }
311  }
312 
313  cur_req_fc_auto_adv =
314  REG_RD(bp, params->lfa_base +
315  offsetof(struct shmem_lfa, additional_config)) &
317 
318  if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) {
319  DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n",
320  cur_req_fc_auto_adv, params->req_fc_auto_adv);
321  return LFA_FLOW_CTRL_MISMATCH;
322  }
323 
324  eee_status = REG_RD(bp, params->shmem2_base +
325  offsetof(struct shmem2_region,
326  eee_status[params->port]));
327 
328  if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^
329  (params->eee_mode & EEE_MODE_ENABLE_LPI)) ||
330  ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^
331  (params->eee_mode & EEE_MODE_ADV_LPI))) {
332  DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode,
333  eee_status);
334  return LFA_EEE_MISMATCH;
335  }
336 
337  /* LFA conditions are met */
338  return 0;
339 }
340 /******************************************************************/
341 /* EPIO/GPIO section */
342 /******************************************************************/
343 static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en)
344 {
345  u32 epio_mask, gp_oenable;
346  *en = 0;
347  /* Sanity check */
348  if (epio_pin > 31) {
349  DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin);
350  return;
351  }
352 
353  epio_mask = 1 << epio_pin;
354  /* Set this EPIO to output */
355  gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
356  REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask);
357 
358  *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin;
359 }
360 static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en)
361 {
362  u32 epio_mask, gp_output, gp_oenable;
363 
364  /* Sanity check */
365  if (epio_pin > 31) {
366  DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin);
367  return;
368  }
369  DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en);
370  epio_mask = 1 << epio_pin;
371  /* Set this EPIO to output */
372  gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS);
373  if (en)
374  gp_output |= epio_mask;
375  else
376  gp_output &= ~epio_mask;
377 
378  REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output);
379 
380  /* Set the value for this EPIO */
381  gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
382  REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask);
383 }
384 
385 static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val)
386 {
387  if (pin_cfg == PIN_CFG_NA)
388  return;
389  if (pin_cfg >= PIN_CFG_EPIO0) {
390  bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
391  } else {
392  u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
393  u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
394  bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port);
395  }
396 }
397 
398 static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val)
399 {
400  if (pin_cfg == PIN_CFG_NA)
401  return -EINVAL;
402  if (pin_cfg >= PIN_CFG_EPIO0) {
403  bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
404  } else {
405  u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
406  u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
407  *val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
408  }
409  return 0;
410 
411 }
412 /******************************************************************/
413 /* ETS section */
414 /******************************************************************/
415 static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
416 {
417  /* ETS disabled configuration*/
418  struct bnx2x *bp = params->bp;
419 
420  DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
421 
422  /* mapping between entry priority to client number (0,1,2 -debug and
423  * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
424  * 3bits client num.
425  * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
426  * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000
427  */
428 
430  /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
431  * as strict. Bits 0,1,2 - debug and management entries, 3 -
432  * COS0 entry, 4 - COS1 entry.
433  * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
434  * bit4 bit3 bit2 bit1 bit0
435  * MCP and debug are strict
436  */
437 
439  /* defines which entries (clients) are subjected to WFQ arbitration */
441  /* For strict priority entries defines the number of consecutive
442  * slots for the highest priority.
443  */
445  /* mapping between the CREDIT_WEIGHT registers and actual client
446  * numbers
447  */
451 
455  /* ETS mode disable */
456  REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
457  /* If ETS mode is enabled (there is no strict priority) defines a WFQ
458  * weight for COS0/COS1.
459  */
460  REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
461  REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
462  /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
463  REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
464  REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
465  /* Defines the number of consecutive slots for the strict priority */
467 }
468 /******************************************************************************
469 * Description:
470 * Getting min_w_val will be set according to line speed .
471 *.
472 ******************************************************************************/
473 static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
474 {
475  u32 min_w_val = 0;
476  /* Calculate min_w_val.*/
477  if (vars->link_up) {
478  if (vars->line_speed == SPEED_20000)
479  min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
480  else
482  } else
483  min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
484  /* If the link isn't up (static configuration for example ) The
485  * link will be according to 20GBPS.
486  */
487  return min_w_val;
488 }
489 /******************************************************************************
490 * Description:
491 * Getting credit upper bound form min_w_val.
492 *.
493 ******************************************************************************/
494 static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val)
495 {
496  const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val),
498  return credit_upper_bound;
499 }
500 /******************************************************************************
501 * Description:
502 * Set credit upper bound for NIG.
503 *.
504 ******************************************************************************/
505 static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
506  const struct link_params *params,
507  const u32 min_w_val)
508 {
509  struct bnx2x *bp = params->bp;
510  const u8 port = params->port;
511  const u32 credit_upper_bound =
512  bnx2x_ets_get_credit_upper_bound(min_w_val);
513 
515  NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
517  NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
519  NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
521  NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
523  NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
525  NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
526 
527  if (!port) {
529  credit_upper_bound);
531  credit_upper_bound);
533  credit_upper_bound);
534  }
535 }
536 /******************************************************************************
537 * Description:
538 * Will return the NIG ETS registers to init values.Except
539 * credit_upper_bound.
540 * That isn't used in this configuration (No WFQ is enabled) and will be
541 * configured acording to spec
542 *.
543 ******************************************************************************/
544 static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
545  const struct link_vars *vars)
546 {
547  struct bnx2x *bp = params->bp;
548  const u8 port = params->port;
549  const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
550  /* Mapping between entry priority to client number (0,1,2 -debug and
551  * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
552  * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
553  * reset value or init tool
554  */
555  if (port) {
558  } else {
561  }
562  /* For strict priority entries defines the number of consecutive
563  * slots for the highest priority.
564  */
567  /* Mapping between the CREDIT_WEIGHT registers and actual client
568  * numbers
569  */
570  if (port) {
571  /*Port 1 has 6 COS*/
574  } else {
575  /*Port 0 has 9 COS*/
577  0x43210876);
579  }
580 
581  /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
582  * as strict. Bits 0,1,2 - debug and management entries, 3 -
583  * COS0 entry, 4 - COS1 entry.
584  * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
585  * bit4 bit3 bit2 bit1 bit0
586  * MCP and debug are strict
587  */
588  if (port)
590  else
592  /* defines which entries (clients) are subjected to WFQ arbitration */
595 
596  /* Please notice the register address are note continuous and a
597  * for here is note appropriate.In 2 port mode port0 only COS0-5
598  * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
599  * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
600  * are never used for WFQ
601  */
614  if (!port) {
618  }
619 
620  bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
621 }
622 /******************************************************************************
623 * Description:
624 * Set credit upper bound for PBF.
625 *.
626 ******************************************************************************/
627 static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
628  const struct link_params *params,
629  const u32 min_w_val)
630 {
631  struct bnx2x *bp = params->bp;
632  const u32 credit_upper_bound =
633  bnx2x_ets_get_credit_upper_bound(min_w_val);
634  const u8 port = params->port;
635  u32 base_upper_bound = 0;
636  u8 max_cos = 0;
637  u8 i = 0;
638  /* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
639  * port mode port1 has COS0-2 that can be used for WFQ.
640  */
641  if (!port) {
642  base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
643  max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
644  } else {
645  base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
646  max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
647  }
648 
649  for (i = 0; i < max_cos; i++)
650  REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound);
651 }
652 
653 /******************************************************************************
654 * Description:
655 * Will return the PBF ETS registers to init values.Except
656 * credit_upper_bound.
657 * That isn't used in this configuration (No WFQ is enabled) and will be
658 * configured acording to spec
659 *.
660 ******************************************************************************/
661 static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
662 {
663  struct bnx2x *bp = params->bp;
664  const u8 port = params->port;
665  const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
666  u8 i = 0;
667  u32 base_weight = 0;
668  u8 max_cos = 0;
669 
670  /* Mapping between entry priority to client number 0 - COS0
671  * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
672  * TODO_ETS - Should be done by reset value or init tool
673  */
674  if (port)
675  /* 0x688 (|011|0 10|00 1|000) */
677  else
678  /* (10 1|100 |011|0 10|00 1|000) */
680 
681  /* TODO_ETS - Should be done by reset value or init tool */
682  if (port)
683  /* 0x688 (|011|0 10|00 1|000)*/
685  else
686  /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */
688 
691 
692 
695 
698  /* In 2 port mode port0 has COS0-5 that can be used for WFQ.
699  * In 4 port mode port1 has COS0-2 that can be used for WFQ.
700  */
701  if (!port) {
702  base_weight = PBF_REG_COS0_WEIGHT_P0;
703  max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
704  } else {
705  base_weight = PBF_REG_COS0_WEIGHT_P1;
706  max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
707  }
708 
709  for (i = 0; i < max_cos; i++)
710  REG_WR(bp, base_weight + (0x4 * i), 0);
711 
712  bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
713 }
714 /******************************************************************************
715 * Description:
716 * E3B0 disable will return basicly the values to init values.
717 *.
718 ******************************************************************************/
719 static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
720  const struct link_vars *vars)
721 {
722  struct bnx2x *bp = params->bp;
723 
724  if (!CHIP_IS_E3B0(bp)) {
726  "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
727  return -EINVAL;
728  }
729 
730  bnx2x_ets_e3b0_nig_disabled(params, vars);
731 
732  bnx2x_ets_e3b0_pbf_disabled(params);
733 
734  return 0;
735 }
736 
737 /******************************************************************************
738 * Description:
739 * Disable will return basicly the values to init values.
740 *
741 ******************************************************************************/
742 int bnx2x_ets_disabled(struct link_params *params,
743  struct link_vars *vars)
744 {
745  struct bnx2x *bp = params->bp;
746  int bnx2x_status = 0;
747 
748  if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp)))
749  bnx2x_ets_e2e3a0_disabled(params);
750  else if (CHIP_IS_E3B0(bp))
751  bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars);
752  else {
753  DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n");
754  return -EINVAL;
755  }
756 
757  return bnx2x_status;
758 }
759 
760 /******************************************************************************
761 * Description
762 * Set the COS mappimg to SP and BW until this point all the COS are not
763 * set as SP or BW.
764 ******************************************************************************/
765 static int bnx2x_ets_e3b0_cli_map(const struct link_params *params,
766  const struct bnx2x_ets_params *ets_params,
767  const u8 cos_sp_bitmap,
768  const u8 cos_bw_bitmap)
769 {
770  struct bnx2x *bp = params->bp;
771  const u8 port = params->port;
772  const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
773  const u8 pbf_cli_sp_bitmap = cos_sp_bitmap;
774  const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
775  const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
776 
778  NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
779 
781  PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap);
782 
785  nig_cli_subject2wfq_bitmap);
786 
789  pbf_cli_subject2wfq_bitmap);
790 
791  return 0;
792 }
793 
794 /******************************************************************************
795 * Description:
796 * This function is needed because NIG ARB_CREDIT_WEIGHT_X are
797 * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
798 ******************************************************************************/
799 static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
800  const u8 cos_entry,
801  const u32 min_w_val_nig,
802  const u32 min_w_val_pbf,
803  const u16 total_bw,
804  const u8 bw,
805  const u8 port)
806 {
807  u32 nig_reg_adress_crd_weight = 0;
808  u32 pbf_reg_adress_crd_weight = 0;
809  /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
810  const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
811  const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
812 
813  switch (cos_entry) {
814  case 0:
815  nig_reg_adress_crd_weight =
818  pbf_reg_adress_crd_weight = (port) ?
820  break;
821  case 1:
822  nig_reg_adress_crd_weight = (port) ?
825  pbf_reg_adress_crd_weight = (port) ?
827  break;
828  case 2:
829  nig_reg_adress_crd_weight = (port) ?
832 
833  pbf_reg_adress_crd_weight = (port) ?
835  break;
836  case 3:
837  if (port)
838  return -EINVAL;
839  nig_reg_adress_crd_weight =
841  pbf_reg_adress_crd_weight =
843  break;
844  case 4:
845  if (port)
846  return -EINVAL;
847  nig_reg_adress_crd_weight =
849  pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
850  break;
851  case 5:
852  if (port)
853  return -EINVAL;
854  nig_reg_adress_crd_weight =
856  pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
857  break;
858  }
859 
860  REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
861 
862  REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf);
863 
864  return 0;
865 }
866 /******************************************************************************
867 * Description:
868 * Calculate the total BW.A value of 0 isn't legal.
869 *
870 ******************************************************************************/
871 static int bnx2x_ets_e3b0_get_total_bw(
872  const struct link_params *params,
873  struct bnx2x_ets_params *ets_params,
874  u16 *total_bw)
875 {
876  struct bnx2x *bp = params->bp;
877  u8 cos_idx = 0;
878  u8 is_bw_cos_exist = 0;
879 
880  *total_bw = 0 ;
881  /* Calculate total BW requested */
882  for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
883  if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
884  is_bw_cos_exist = 1;
885  if (!ets_params->cos[cos_idx].params.bw_params.bw) {
886  DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
887  "was set to 0\n");
888  /* This is to prevent a state when ramrods
889  * can't be sent
890  */
891  ets_params->cos[cos_idx].params.bw_params.bw
892  = 1;
893  }
894  *total_bw +=
895  ets_params->cos[cos_idx].params.bw_params.bw;
896  }
897  }
898 
899  /* Check total BW is valid */
900  if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
901  if (*total_bw == 0) {
903  "bnx2x_ets_E3B0_config total BW shouldn't be 0\n");
904  return -EINVAL;
905  }
907  "bnx2x_ets_E3B0_config total BW should be 100\n");
908  /* We can handle a case whre the BW isn't 100 this can happen
909  * if the TC are joined.
910  */
911  }
912  return 0;
913 }
914 
915 /******************************************************************************
916 * Description:
917 * Invalidate all the sp_pri_to_cos.
918 *
919 ******************************************************************************/
920 static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
921 {
922  u8 pri = 0;
923  for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++)
924  sp_pri_to_cos[pri] = DCBX_INVALID_COS;
925 }
926 /******************************************************************************
927 * Description:
928 * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
929 * according to sp_pri_to_cos.
930 *
931 ******************************************************************************/
932 static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
933  u8 *sp_pri_to_cos, const u8 pri,
934  const u8 cos_entry)
935 {
936  struct bnx2x *bp = params->bp;
937  const u8 port = params->port;
938  const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
940 
941  if (pri >= max_num_of_cos) {
942  DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
943  "parameter Illegal strict priority\n");
944  return -EINVAL;
945  }
946 
947  if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
948  DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
949  "parameter There can't be two COS's with "
950  "the same strict pri\n");
951  return -EINVAL;
952  }
953 
954  sp_pri_to_cos[pri] = cos_entry;
955  return 0;
956 
957 }
958 
959 /******************************************************************************
960 * Description:
961 * Returns the correct value according to COS and priority in
962 * the sp_pri_cli register.
963 *
964 ******************************************************************************/
965 static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
966  const u8 pri_set,
967  const u8 pri_offset,
968  const u8 entry_size)
969 {
970  u64 pri_cli_nig = 0;
971  pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size *
972  (pri_set + pri_offset));
973 
974  return pri_cli_nig;
975 }
976 /******************************************************************************
977 * Description:
978 * Returns the correct value according to COS and priority in the
979 * sp_pri_cli register for NIG.
980 *
981 ******************************************************************************/
982 static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
983 {
984  /* MCP Dbg0 and dbg1 are always with higher strict pri*/
985  const u8 nig_cos_offset = 3;
986  const u8 nig_pri_offset = 3;
987 
988  return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
989  nig_pri_offset, 4);
990 
991 }
992 /******************************************************************************
993 * Description:
994 * Returns the correct value according to COS and priority in the
995 * sp_pri_cli register for PBF.
996 *
997 ******************************************************************************/
998 static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
999 {
1000  const u8 pbf_cos_offset = 0;
1001  const u8 pbf_pri_offset = 0;
1002 
1003  return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
1004  pbf_pri_offset, 3);
1005 
1006 }
1007 
1008 /******************************************************************************
1009 * Description:
1010 * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
1011 * according to sp_pri_to_cos.(which COS has higher priority)
1012 *
1013 ******************************************************************************/
1014 static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
1015  u8 *sp_pri_to_cos)
1016 {
1017  struct bnx2x *bp = params->bp;
1018  u8 i = 0;
1019  const u8 port = params->port;
1020  /* MCP Dbg0 and dbg1 are always with higher strict pri*/
1021  u64 pri_cli_nig = 0x210;
1022  u32 pri_cli_pbf = 0x0;
1023  u8 pri_set = 0;
1024  u8 pri_bitmask = 0;
1025  const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
1027 
1028  u8 cos_bit_to_set = (1 << max_num_of_cos) - 1;
1029 
1030  /* Set all the strict priority first */
1031  for (i = 0; i < max_num_of_cos; i++) {
1032  if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
1033  if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) {
1035  "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
1036  "invalid cos entry\n");
1037  return -EINVAL;
1038  }
1039 
1040  pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
1041  sp_pri_to_cos[i], pri_set);
1042 
1043  pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
1044  sp_pri_to_cos[i], pri_set);
1045  pri_bitmask = 1 << sp_pri_to_cos[i];
1046  /* COS is used remove it from bitmap.*/
1047  if (!(pri_bitmask & cos_bit_to_set)) {
1049  "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
1050  "invalid There can't be two COS's with"
1051  " the same strict pri\n");
1052  return -EINVAL;
1053  }
1054  cos_bit_to_set &= ~pri_bitmask;
1055  pri_set++;
1056  }
1057  }
1058 
1059  /* Set all the Non strict priority i= COS*/
1060  for (i = 0; i < max_num_of_cos; i++) {
1061  pri_bitmask = 1 << i;
1062  /* Check if COS was already used for SP */
1063  if (pri_bitmask & cos_bit_to_set) {
1064  /* COS wasn't used for SP */
1065  pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
1066  i, pri_set);
1067 
1068  pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
1069  i, pri_set);
1070  /* COS is used remove it from bitmap.*/
1071  cos_bit_to_set &= ~pri_bitmask;
1072  pri_set++;
1073  }
1074  }
1075 
1076  if (pri_set != max_num_of_cos) {
1077  DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all "
1078  "entries were set\n");
1079  return -EINVAL;
1080  }
1081 
1082  if (port) {
1083  /* Only 6 usable clients*/
1085  (u32)pri_cli_nig);
1086 
1087  REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf);
1088  } else {
1089  /* Only 9 usable clients*/
1090  const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig);
1091  const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF);
1092 
1094  pri_cli_nig_lsb);
1096  pri_cli_nig_msb);
1097 
1098  REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf);
1099  }
1100  return 0;
1101 }
1102 
1103 /******************************************************************************
1104 * Description:
1105 * Configure the COS to ETS according to BW and SP settings.
1106 ******************************************************************************/
1107 int bnx2x_ets_e3b0_config(const struct link_params *params,
1108  const struct link_vars *vars,
1109  struct bnx2x_ets_params *ets_params)
1110 {
1111  struct bnx2x *bp = params->bp;
1112  int bnx2x_status = 0;
1113  const u8 port = params->port;
1114  u16 total_bw = 0;
1115  const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars);
1116  const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
1117  u8 cos_bw_bitmap = 0;
1118  u8 cos_sp_bitmap = 0;
1119  u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0};
1120  const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
1122  u8 cos_entry = 0;
1123 
1124  if (!CHIP_IS_E3B0(bp)) {
1126  "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
1127  return -EINVAL;
1128  }
1129 
1130  if ((ets_params->num_of_cos > max_num_of_cos)) {
1131  DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS "
1132  "isn't supported\n");
1133  return -EINVAL;
1134  }
1135 
1136  /* Prepare sp strict priority parameters*/
1137  bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
1138 
1139  /* Prepare BW parameters*/
1140  bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
1141  &total_bw);
1142  if (bnx2x_status) {
1144  "bnx2x_ets_E3B0_config get_total_bw failed\n");
1145  return -EINVAL;
1146  }
1147 
1148  /* Upper bound is set according to current link speed (min_w_val
1149  * should be the same for upper bound and COS credit val).
1150  */
1151  bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
1152  bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
1153 
1154 
1155  for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
1156  if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
1157  cos_bw_bitmap |= (1 << cos_entry);
1158  /* The function also sets the BW in HW(not the mappin
1159  * yet)
1160  */
1161  bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
1162  bp, cos_entry, min_w_val_nig, min_w_val_pbf,
1163  total_bw,
1164  ets_params->cos[cos_entry].params.bw_params.bw,
1165  port);
1166  } else if (bnx2x_cos_state_strict ==
1167  ets_params->cos[cos_entry].state){
1168  cos_sp_bitmap |= (1 << cos_entry);
1169 
1170  bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set(
1171  params,
1172  sp_pri_to_cos,
1173  ets_params->cos[cos_entry].params.sp_params.pri,
1174  cos_entry);
1175 
1176  } else {
1178  "bnx2x_ets_e3b0_config cos state not valid\n");
1179  return -EINVAL;
1180  }
1181  if (bnx2x_status) {
1183  "bnx2x_ets_e3b0_config set cos bw failed\n");
1184  return bnx2x_status;
1185  }
1186  }
1187 
1188  /* Set SP register (which COS has higher priority) */
1189  bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
1190  sp_pri_to_cos);
1191 
1192  if (bnx2x_status) {
1194  "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
1195  return bnx2x_status;
1196  }
1197 
1198  /* Set client mapping of BW and strict */
1199  bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params,
1200  cos_sp_bitmap,
1201  cos_bw_bitmap);
1202 
1203  if (bnx2x_status) {
1204  DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
1205  return bnx2x_status;
1206  }
1207  return 0;
1208 }
1209 static void bnx2x_ets_bw_limit_common(const struct link_params *params)
1210 {
1211  /* ETS disabled configuration */
1212  struct bnx2x *bp = params->bp;
1213  DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
1214  /* Defines which entries (clients) are subjected to WFQ arbitration
1215  * COS0 0x8
1216  * COS1 0x10
1217  */
1219  /* Mapping between the ARB_CREDIT_WEIGHT registers and actual
1220  * client numbers (WEIGHT_0 does not actually have to represent
1221  * client 0)
1222  * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
1223  * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
1224  */
1226 
1231 
1232  /* ETS mode enabled*/
1233  REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
1234 
1235  /* Defines the number of consecutive slots for the strict priority */
1237  /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
1238  * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
1239  * entry, 4 - COS1 entry.
1240  * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
1241  * bit4 bit3 bit2 bit1 bit0
1242  * MCP and debug are strict
1243  */
1245 
1246  /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
1251 }
1252 
1253 void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
1254  const u32 cos1_bw)
1255 {
1256  /* ETS disabled configuration*/
1257  struct bnx2x *bp = params->bp;
1258  const u32 total_bw = cos0_bw + cos1_bw;
1259  u32 cos0_credit_weight = 0;
1260  u32 cos1_credit_weight = 0;
1261 
1262  DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
1263 
1264  if ((!total_bw) ||
1265  (!cos0_bw) ||
1266  (!cos1_bw)) {
1267  DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
1268  return;
1269  }
1270 
1271  cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
1272  total_bw;
1273  cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
1274  total_bw;
1275 
1276  bnx2x_ets_bw_limit_common(params);
1277 
1278  REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
1279  REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
1280 
1281  REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
1282  REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
1283 }
1284 
1285 int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1286 {
1287  /* ETS disabled configuration*/
1288  struct bnx2x *bp = params->bp;
1289  u32 val = 0;
1290 
1291  DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
1292  /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
1293  * as strict. Bits 0,1,2 - debug and management entries,
1294  * 3 - COS0 entry, 4 - COS1 entry.
1295  * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
1296  * bit4 bit3 bit2 bit1 bit0
1297  * MCP and debug are strict
1298  */
1300  /* For strict priority entries defines the number of consecutive slots
1301  * for the highest priority.
1302  */
1304  /* ETS mode disable */
1305  REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
1306  /* Defines the number of consecutive slots for the strict priority */
1308 
1309  /* Defines the number of consecutive slots for the strict priority */
1310  REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
1311 
1312  /* Mapping between entry priority to client number (0,1,2 -debug and
1313  * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
1314  * 3bits client num.
1315  * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
1316  * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
1317  * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
1318  */
1319  val = (!strict_cos) ? 0x2318 : 0x22E0;
1321 
1322  return 0;
1323 }
1324 
1325 /******************************************************************/
1326 /* PFC section */
1327 /******************************************************************/
1328 static void bnx2x_update_pfc_xmac(struct link_params *params,
1329  struct link_vars *vars,
1330  u8 is_lb)
1331 {
1332  struct bnx2x *bp = params->bp;
1333  u32 xmac_base;
1334  u32 pause_val, pfc0_val, pfc1_val;
1335 
1336  /* XMAC base adrr */
1337  xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
1338 
1339  /* Initialize pause and pfc registers */
1340  pause_val = 0x18000;
1341  pfc0_val = 0xFFFF8000;
1342  pfc1_val = 0x2;
1343 
1344  /* No PFC support */
1345  if (!(params->feature_config_flags &
1347 
1348  /* RX flow control - Process pause frame in receive direction
1349  */
1350  if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
1351  pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
1352 
1353  /* TX flow control - Send pause packet when buffer is full */
1354  if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
1355  pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
1356  } else {/* PFC support */
1362  /* Write pause and PFC registers */
1363  REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
1364  REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
1365  REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
1367 
1368  }
1369 
1370  /* Write pause and PFC registers */
1371  REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
1372  REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
1373  REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
1374 
1375 
1376  /* Set MAC address for source TX Pause/PFC frames */
1377  REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO,
1378  ((params->mac_addr[2] << 24) |
1379  (params->mac_addr[3] << 16) |
1380  (params->mac_addr[4] << 8) |
1381  (params->mac_addr[5])));
1382  REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI,
1383  ((params->mac_addr[0] << 8) |
1384  (params->mac_addr[1])));
1385 
1386  udelay(30);
1387 }
1388 
1389 
1390 static void bnx2x_emac_get_pfc_stat(struct link_params *params,
1391  u32 pfc_frames_sent[2],
1392  u32 pfc_frames_received[2])
1393 {
1394  /* Read pfc statistic */
1395  struct bnx2x *bp = params->bp;
1396  u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1397  u32 val_xon = 0;
1398  u32 val_xoff = 0;
1399 
1400  DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
1401 
1402  /* PFC received frames */
1403  val_xoff = REG_RD(bp, emac_base +
1406  val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
1408 
1409  pfc_frames_received[0] = val_xon + val_xoff;
1410 
1411  /* PFC received sent */
1412  val_xoff = REG_RD(bp, emac_base +
1415  val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
1417 
1418  pfc_frames_sent[0] = val_xon + val_xoff;
1419 }
1420 
1421 /* Read pfc statistic*/
1422 void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
1423  u32 pfc_frames_sent[2],
1424  u32 pfc_frames_received[2])
1425 {
1426  /* Read pfc statistic */
1427  struct bnx2x *bp = params->bp;
1428 
1429  DP(NETIF_MSG_LINK, "pfc statistic\n");
1430 
1431  if (!vars->link_up)
1432  return;
1433 
1434  if (vars->mac_type == MAC_TYPE_EMAC) {
1435  DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
1436  bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
1437  pfc_frames_received);
1438  }
1439 }
1440 /******************************************************************/
1441 /* MAC/PBF section */
1442 /******************************************************************/
1443 static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
1444 {
1445  u32 mode, emac_base;
1446  /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1447  * (a value of 49==0x31) and make sure that the AUTO poll is off
1448  */
1449 
1450  if (CHIP_IS_E2(bp))
1451  emac_base = GRCBASE_EMAC0;
1452  else
1453  emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1454  mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1455  mode &= ~(EMAC_MDIO_MODE_AUTO_POLL |
1457  if (USES_WARPCORE(bp))
1458  mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1459  else
1460  mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1461 
1462  mode |= (EMAC_MDIO_MODE_CLAUSE_45);
1463  REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode);
1464 
1465  udelay(40);
1466 }
1467 static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
1468 {
1469  u32 port4mode_ovwr_val;
1470  /* Check 4-port override enabled */
1471  port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
1472  if (port4mode_ovwr_val & (1<<0)) {
1473  /* Return 4-port mode override value */
1474  return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
1475  }
1476  /* Return 4-port mode from input pin */
1477  return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
1478 }
1479 
1480 static void bnx2x_emac_init(struct link_params *params,
1481  struct link_vars *vars)
1482 {
1483  /* reset and unreset the emac core */
1484  struct bnx2x *bp = params->bp;
1485  u8 port = params->port;
1486  u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1487  u32 val;
1488  u16 timeout;
1489 
1492  udelay(5);
1495 
1496  /* init emac - use read-modify-write */
1497  /* self clear reset */
1498  val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1500 
1501  timeout = 200;
1502  do {
1503  val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1504  DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
1505  if (!timeout) {
1506  DP(NETIF_MSG_LINK, "EMAC timeout!\n");
1507  return;
1508  }
1509  timeout--;
1510  } while (val & EMAC_MODE_RESET);
1511  bnx2x_set_mdio_clk(bp, params->chip_id, port);
1512  /* Set mac address */
1513  val = ((params->mac_addr[0] << 8) |
1514  params->mac_addr[1]);
1515  EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
1516 
1517  val = ((params->mac_addr[2] << 24) |
1518  (params->mac_addr[3] << 16) |
1519  (params->mac_addr[4] << 8) |
1520  params->mac_addr[5]);
1521  EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
1522 }
1523 
1524 static void bnx2x_set_xumac_nig(struct link_params *params,
1525  u16 tx_pause_en,
1526  u8 enable)
1527 {
1528  struct bnx2x *bp = params->bp;
1529 
1531  enable);
1533  enable);
1534  REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN :
1535  NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
1536 }
1537 
1538 static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en)
1539 {
1540  u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
1541  u32 val;
1542  struct bnx2x *bp = params->bp;
1543  if (!(REG_RD(bp, MISC_REG_RESET_REG_2) &
1544  (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
1545  return;
1546  val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG);
1547  if (en)
1550  else
1553  /* Disable RX and TX */
1554  REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1555 }
1556 
1557 static void bnx2x_umac_enable(struct link_params *params,
1558  struct link_vars *vars, u8 lb)
1559 {
1560  u32 val;
1561  u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
1562  struct bnx2x *bp = params->bp;
1563  /* Reset UMAC */
1565  (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
1566  usleep_range(1000, 2000);
1567 
1569  (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
1570 
1571  DP(NETIF_MSG_LINK, "enabling UMAC\n");
1572 
1573  /* This register opens the gate for the UMAC despite its name */
1574  REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
1575 
1580  switch (vars->line_speed) {
1581  case SPEED_10:
1582  val |= (0<<2);
1583  break;
1584  case SPEED_100:
1585  val |= (1<<2);
1586  break;
1587  case SPEED_1000:
1588  val |= (2<<2);
1589  break;
1590  case SPEED_2500:
1591  val |= (3<<2);
1592  break;
1593  default:
1594  DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n",
1595  vars->line_speed);
1596  break;
1597  }
1598  if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
1600 
1601  if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
1603 
1604  if (vars->duplex == DUPLEX_HALF)
1606 
1607  REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1608  udelay(50);
1609 
1610  /* Configure UMAC for EEE */
1611  if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
1612  DP(NETIF_MSG_LINK, "configured UMAC for EEE\n");
1613  REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL,
1615  REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11);
1616  } else {
1617  REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0);
1618  }
1619 
1620  /* Set MAC address for source TX Pause/PFC frames (under SW reset) */
1621  REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
1622  ((params->mac_addr[2] << 24) |
1623  (params->mac_addr[3] << 16) |
1624  (params->mac_addr[4] << 8) |
1625  (params->mac_addr[5])));
1626  REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1,
1627  ((params->mac_addr[0] << 8) |
1628  (params->mac_addr[1])));
1629 
1630  /* Enable RX and TX */
1634  REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1635  udelay(50);
1636 
1637  /* Remove SW Reset */
1639 
1640  /* Check loopback mode */
1641  if (lb)
1643  REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1644 
1645  /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
1646  * length used by the MAC receive logic to check frames.
1647  */
1648  REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
1649  bnx2x_set_xumac_nig(params,
1650  ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
1651  vars->mac_type = MAC_TYPE_UMAC;
1652 
1653 }
1654 
1655 /* Define the XMAC mode */
1656 static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1657 {
1658  struct bnx2x *bp = params->bp;
1659  u32 is_port4mode = bnx2x_is_4_port_mode(bp);
1660 
1661  /* In 4-port mode, need to set the mode only once, so if XMAC is
1662  * already out of reset, it means the mode has already been set,
1663  * and it must not* reset the XMAC again, since it controls both
1664  * ports of the path
1665  */
1666 
1667  if ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) &&
1671  "XMAC already out of reset in 4-port mode\n");
1672  return;
1673  }
1674 
1675  /* Hard reset */
1677  MISC_REGISTERS_RESET_REG_2_XMAC);
1678  usleep_range(1000, 2000);
1679 
1681  MISC_REGISTERS_RESET_REG_2_XMAC);
1682  if (is_port4mode) {
1683  DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
1684 
1685  /* Set the number of ports on the system side to up to 2 */
1687 
1688  /* Set the number of ports on the Warp Core to 10G */
1690  } else {
1691  /* Set the number of ports on the system side to 1 */
1693  if (max_speed == SPEED_10000) {
1695  "Init XMAC to 10G x 1 port per path\n");
1696  /* Set the number of ports on the Warp Core to 10G */
1698  } else {
1700  "Init XMAC to 20G x 2 ports per path\n");
1701  /* Set the number of ports on the Warp Core to 20G */
1703  }
1704  }
1705  /* Soft reset */
1708  usleep_range(1000, 2000);
1709 
1712 
1713 }
1714 
1715 static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en)
1716 {
1717  u8 port = params->port;
1718  struct bnx2x *bp = params->bp;
1719  u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
1720  u32 val;
1721 
1722  if (REG_RD(bp, MISC_REG_RESET_REG_2) &
1724  /* Send an indication to change the state in the NIG back to XON
1725  * Clearing this bit enables the next set of this bit to get
1726  * rising edge
1727  */
1728  pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI);
1729  REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
1730  (pfc_ctrl & ~(1<<1)));
1731  REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
1732  (pfc_ctrl | (1<<1)));
1733  DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
1734  val = REG_RD(bp, xmac_base + XMAC_REG_CTRL);
1735  if (en)
1737  else
1739  REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
1740  }
1741 }
1742 
1743 static int bnx2x_xmac_enable(struct link_params *params,
1744  struct link_vars *vars, u8 lb)
1745 {
1746  u32 val, xmac_base;
1747  struct bnx2x *bp = params->bp;
1748  DP(NETIF_MSG_LINK, "enabling XMAC\n");
1749 
1750  xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
1751 
1752  bnx2x_xmac_init(params, vars->line_speed);
1753 
1754  /* This register determines on which events the MAC will assert
1755  * error on the i/f to the NIG along w/ EOP.
1756  */
1757 
1758  /* This register tells the NIG whether to send traffic to UMAC
1759  * or XMAC
1760  */
1761  REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
1762 
1763  /* Set Max packet size */
1764  REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
1765 
1766  /* CRC append for Tx packets */
1767  REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800);
1768 
1769  /* update PFC */
1770  bnx2x_update_pfc_xmac(params, vars, 0);
1771 
1772  if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
1773  DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n");
1774  REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
1775  REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
1776  } else {
1777  REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0);
1778  }
1779 
1780  /* Enable TX and RX */
1782 
1783  /* Check loopback mode */
1784  if (lb)
1786  REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
1787  bnx2x_set_xumac_nig(params,
1788  ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
1789 
1790  vars->mac_type = MAC_TYPE_XMAC;
1791 
1792  return 0;
1793 }
1794 
1795 static int bnx2x_emac_enable(struct link_params *params,
1796  struct link_vars *vars, u8 lb)
1797 {
1798  struct bnx2x *bp = params->bp;
1799  u8 port = params->port;
1800  u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1801  u32 val;
1802 
1803  DP(NETIF_MSG_LINK, "enabling EMAC\n");
1804 
1805  /* Disable BMAC */
1808 
1809  /* enable emac and not bmac */
1810  REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
1811 
1812  /* ASIC */
1813  if (vars->phy_flags & PHY_XGXS_FLAG) {
1814  u32 ser_lane = ((params->lane_config &
1817 
1818  DP(NETIF_MSG_LINK, "XGXS\n");
1819  /* select the master lanes (out of 0-3) */
1820  REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
1821  /* select XGXS */
1822  REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
1823 
1824  } else { /* SerDes */
1825  DP(NETIF_MSG_LINK, "SerDes\n");
1826  /* select SerDes */
1827  REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
1828  }
1829 
1830  bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
1832  bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
1834 
1835  /* pause enable/disable */
1836  bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
1838 
1839  bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
1842  if (!(params->feature_config_flags &
1844  if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
1845  bnx2x_bits_en(bp, emac_base +
1848 
1849  if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
1850  bnx2x_bits_en(bp, emac_base +
1854  } else
1855  bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
1857 
1858  /* KEEP_VLAN_TAG, promiscuous */
1859  val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
1861 
1862  /* Setting this bit causes MAC control frames (except for pause
1863  * frames) to be passed on for processing. This setting has no
1864  * affect on the operation of the pause frames. This bit effects
1865  * all packets regardless of RX Parser packet sorting logic.
1866  * Turn the PFC off to make sure we are in Xon state before
1867  * enabling it.
1868  */
1869  EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
1871  DP(NETIF_MSG_LINK, "PFC is enabled\n");
1872  /* Enable PFC again */
1877 
1879  ((0x0101 <<
1881  (0x00ff <<
1884  }
1885  EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
1886 
1887  /* Set Loopback */
1888  val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1889  if (lb)
1890  val |= 0x810;
1891  else
1892  val &= ~0x810;
1893  EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
1894 
1895  /* Enable emac */
1896  REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
1897 
1898  /* Enable emac for jumbo packets */
1902 
1903  /* Strip CRC */
1904  REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
1905 
1906  /* Disable the NIG in/out to the bmac */
1907  REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
1908  REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
1909  REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
1910 
1911  /* Enable the NIG in/out to the emac */
1912  REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
1913  val = 0;
1914  if ((params->feature_config_flags &
1916  (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
1917  val = 1;
1918 
1919  REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
1920  REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
1921 
1922  REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
1923 
1924  vars->mac_type = MAC_TYPE_EMAC;
1925  return 0;
1926 }
1927 
1928 static void bnx2x_update_pfc_bmac1(struct link_params *params,
1929  struct link_vars *vars)
1930 {
1931  u32 wb_data[2];
1932  struct bnx2x *bp = params->bp;
1933  u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
1935 
1936  u32 val = 0x14;
1937  if ((!(params->feature_config_flags &
1939  (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
1940  /* Enable BigMAC to react on received Pause packets */
1941  val |= (1<<5);
1942  wb_data[0] = val;
1943  wb_data[1] = 0;
1944  REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
1945 
1946  /* TX control */
1947  val = 0xc0;
1948  if (!(params->feature_config_flags &
1950  (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
1951  val |= 0x800000;
1952  wb_data[0] = val;
1953  wb_data[1] = 0;
1954  REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
1955 }
1956 
1957 static void bnx2x_update_pfc_bmac2(struct link_params *params,
1958  struct link_vars *vars,
1959  u8 is_lb)
1960 {
1961  /* Set rx control: Strip CRC and enable BigMAC to relay
1962  * control packets to the system as well
1963  */
1964  u32 wb_data[2];
1965  struct bnx2x *bp = params->bp;
1966  u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
1968  u32 val = 0x14;
1969 
1970  if ((!(params->feature_config_flags &
1972  (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
1973  /* Enable BigMAC to react on received Pause packets */
1974  val |= (1<<5);
1975  wb_data[0] = val;
1976  wb_data[1] = 0;
1977  REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
1978  udelay(30);
1979 
1980  /* Tx control */
1981  val = 0xc0;
1982  if (!(params->feature_config_flags &
1984  (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
1985  val |= 0x800000;
1986  wb_data[0] = val;
1987  wb_data[1] = 0;
1988  REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
1989 
1991  DP(NETIF_MSG_LINK, "PFC is enabled\n");
1992  /* Enable PFC RX & TX & STATS and set 8 COS */
1993  wb_data[0] = 0x0;
1994  wb_data[0] |= (1<<0); /* RX */
1995  wb_data[0] |= (1<<1); /* TX */
1996  wb_data[0] |= (1<<2); /* Force initial Xon */
1997  wb_data[0] |= (1<<3); /* 8 cos */
1998  wb_data[0] |= (1<<5); /* STATS */
1999  wb_data[1] = 0;
2000  REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
2001  wb_data, 2);
2002  /* Clear the force Xon */
2003  wb_data[0] &= ~(1<<2);
2004  } else {
2005  DP(NETIF_MSG_LINK, "PFC is disabled\n");
2006  /* Disable PFC RX & TX & STATS and set 8 COS */
2007  wb_data[0] = 0x8;
2008  wb_data[1] = 0;
2009  }
2010 
2011  REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
2012 
2013  /* Set Time (based unit is 512 bit time) between automatic
2014  * re-sending of PP packets amd enable automatic re-send of
2015  * Per-Priroity Packet as long as pp_gen is asserted and
2016  * pp_disable is low.
2017  */
2018  val = 0x8000;
2020  val |= (1<<16); /* enable automatic re-send */
2021 
2022  wb_data[0] = val;
2023  wb_data[1] = 0;
2025  wb_data, 2);
2026 
2027  /* mac control */
2028  val = 0x3; /* Enable RX and TX */
2029  if (is_lb) {
2030  val |= 0x4; /* Local loopback */
2031  DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2032  }
2033  /* When PFC enabled, Pass pause frames towards the NIG. */
2035  val |= ((1<<6)|(1<<5));
2036 
2037  wb_data[0] = val;
2038  wb_data[1] = 0;
2039  REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
2040 }
2041 
2042 /******************************************************************************
2043 * Description:
2044 * This function is needed because NIG ARB_CREDIT_WEIGHT_X are
2045 * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
2046 ******************************************************************************/
2047 static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
2048  u8 cos_entry,
2049  u32 priority_mask, u8 port)
2050 {
2051  u32 nig_reg_rx_priority_mask_add = 0;
2052 
2053  switch (cos_entry) {
2054  case 0:
2055  nig_reg_rx_priority_mask_add = (port) ?
2058  break;
2059  case 1:
2060  nig_reg_rx_priority_mask_add = (port) ?
2063  break;
2064  case 2:
2065  nig_reg_rx_priority_mask_add = (port) ?
2068  break;
2069  case 3:
2070  if (port)
2071  return -EINVAL;
2072  nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
2073  break;
2074  case 4:
2075  if (port)
2076  return -EINVAL;
2077  nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
2078  break;
2079  case 5:
2080  if (port)
2081  return -EINVAL;
2082  nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
2083  break;
2084  }
2085 
2086  REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask);
2087 
2088  return 0;
2089 }
2090 static void bnx2x_update_mng(struct link_params *params, u32 link_status)
2091 {
2092  struct bnx2x *bp = params->bp;
2093 
2094  REG_WR(bp, params->shmem_base +
2095  offsetof(struct shmem_region,
2096  port_mb[params->port].link_status), link_status);
2097 }
2098 
2099 static void bnx2x_update_pfc_nig(struct link_params *params,
2100  struct link_vars *vars,
2101  struct bnx2x_nig_brb_pfc_port_params *nig_params)
2102 {
2103  u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
2104  u32 llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
2105  u32 pkt_priority_to_cos = 0;
2106  struct bnx2x *bp = params->bp;
2107  u8 port = params->port;
2108 
2109  int set_pfc = params->feature_config_flags &
2111  DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
2112 
2113  /* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
2114  * MAC control frames (that are not pause packets)
2115  * will be forwarded to the XCM.
2116  */
2117  xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
2119  /* NIG params will override non PFC params, since it's possible to
2120  * do transition from PFC to SAFC
2121  */
2122  if (set_pfc) {
2123  pause_enable = 0;
2124  llfc_out_en = 0;
2125  llfc_enable = 0;
2126  if (CHIP_IS_E3(bp))
2127  ppp_enable = 0;
2128  else
2129  ppp_enable = 1;
2130  xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
2132  xcm_out_en = 0;
2133  hwpfc_enable = 1;
2134  } else {
2135  if (nig_params) {
2136  llfc_out_en = nig_params->llfc_out_en;
2137  llfc_enable = nig_params->llfc_enable;
2138  pause_enable = nig_params->pause_enable;
2139  } else /* Default non PFC mode - PAUSE */
2140  pause_enable = 1;
2141 
2142  xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
2144  xcm_out_en = 1;
2145  }
2146 
2147  if (CHIP_IS_E3(bp))
2148  REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN :
2149  NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
2150  REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
2151  NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
2152  REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
2153  NIG_REG_LLFC_ENABLE_0, llfc_enable);
2154  REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
2155  NIG_REG_PAUSE_ENABLE_0, pause_enable);
2156 
2157  REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
2158  NIG_REG_PPP_ENABLE_0, ppp_enable);
2159 
2160  REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
2161  NIG_REG_LLH0_XCM_MASK, xcm_mask);
2162 
2165 
2166  /* Output enable for RX_XCM # IF */
2167  REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
2168  NIG_REG_XCM0_OUT_EN, xcm_out_en);
2169 
2170  /* HW PFC TX enable */
2171  REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE :
2172  NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable);
2173 
2174  if (nig_params) {
2175  u8 i = 0;
2176  pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
2177 
2178  for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
2179  bnx2x_pfc_nig_rx_priority_mask(bp, i,
2180  nig_params->rx_cos_priority_mask[i], port);
2181 
2184  nig_params->llfc_high_priority_classes);
2185 
2188  nig_params->llfc_low_priority_classes);
2189  }
2192  pkt_priority_to_cos);
2193 }
2194 
2195 int bnx2x_update_pfc(struct link_params *params,
2196  struct link_vars *vars,
2197  struct bnx2x_nig_brb_pfc_port_params *pfc_params)
2198 {
2199  /* The PFC and pause are orthogonal to one another, meaning when
2200  * PFC is enabled, the pause are disabled, and when PFC is
2201  * disabled, pause are set according to the pause result.
2202  */
2203  u32 val;
2204  struct bnx2x *bp = params->bp;
2205  int bnx2x_status = 0;
2206  u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
2207 
2210  else
2212 
2213  bnx2x_update_mng(params, vars->link_status);
2214 
2215  /* Update NIG params */
2216  bnx2x_update_pfc_nig(params, vars, pfc_params);
2217 
2218  if (!vars->link_up)
2219  return bnx2x_status;
2220 
2221  DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
2222 
2223  if (CHIP_IS_E3(bp)) {
2224  if (vars->mac_type == MAC_TYPE_XMAC)
2225  bnx2x_update_pfc_xmac(params, vars, 0);
2226  } else {
2227  val = REG_RD(bp, MISC_REG_RESET_REG_2);
2228  if ((val &
2230  == 0) {
2231  DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
2232  bnx2x_emac_enable(params, vars, 0);
2233  return bnx2x_status;
2234  }
2235  if (CHIP_IS_E2(bp))
2236  bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
2237  else
2238  bnx2x_update_pfc_bmac1(params, vars);
2239 
2240  val = 0;
2241  if ((params->feature_config_flags &
2243  (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
2244  val = 1;
2245  REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
2246  }
2247  return bnx2x_status;
2248 }
2249 
2250 
2251 static int bnx2x_bmac1_enable(struct link_params *params,
2252  struct link_vars *vars,
2253  u8 is_lb)
2254 {
2255  struct bnx2x *bp = params->bp;
2256  u8 port = params->port;
2257  u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2259  u32 wb_data[2];
2260  u32 val;
2261 
2262  DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
2263 
2264  /* XGXS control */
2265  wb_data[0] = 0x3c;
2266  wb_data[1] = 0;
2268  wb_data, 2);
2269 
2270  /* TX MAC SA */
2271  wb_data[0] = ((params->mac_addr[2] << 24) |
2272  (params->mac_addr[3] << 16) |
2273  (params->mac_addr[4] << 8) |
2274  params->mac_addr[5]);
2275  wb_data[1] = ((params->mac_addr[0] << 8) |
2276  params->mac_addr[1]);
2277  REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
2278 
2279  /* MAC control */
2280  val = 0x3;
2281  if (is_lb) {
2282  val |= 0x4;
2283  DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2284  }
2285  wb_data[0] = val;
2286  wb_data[1] = 0;
2287  REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
2288 
2289  /* Set rx mtu */
2290  wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2291  wb_data[1] = 0;
2292  REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
2293 
2294  bnx2x_update_pfc_bmac1(params, vars);
2295 
2296  /* Set tx mtu */
2297  wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2298  wb_data[1] = 0;
2299  REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
2300 
2301  /* Set cnt max size */
2302  wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2303  wb_data[1] = 0;
2304  REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
2305 
2306  /* Configure SAFC */
2307  wb_data[0] = 0x1000200;
2308  wb_data[1] = 0;
2310  wb_data, 2);
2311 
2312  return 0;
2313 }
2314 
2315 static int bnx2x_bmac2_enable(struct link_params *params,
2316  struct link_vars *vars,
2317  u8 is_lb)
2318 {
2319  struct bnx2x *bp = params->bp;
2320  u8 port = params->port;
2321  u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2323  u32 wb_data[2];
2324 
2325  DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
2326 
2327  wb_data[0] = 0;
2328  wb_data[1] = 0;
2329  REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
2330  udelay(30);
2331 
2332  /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
2333  wb_data[0] = 0x3c;
2334  wb_data[1] = 0;
2336  wb_data, 2);
2337 
2338  udelay(30);
2339 
2340  /* TX MAC SA */
2341  wb_data[0] = ((params->mac_addr[2] << 24) |
2342  (params->mac_addr[3] << 16) |
2343  (params->mac_addr[4] << 8) |
2344  params->mac_addr[5]);
2345  wb_data[1] = ((params->mac_addr[0] << 8) |
2346  params->mac_addr[1]);
2348  wb_data, 2);
2349 
2350  udelay(30);
2351 
2352  /* Configure SAFC */
2353  wb_data[0] = 0x1000200;
2354  wb_data[1] = 0;
2356  wb_data, 2);
2357  udelay(30);
2358 
2359  /* Set RX MTU */
2360  wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2361  wb_data[1] = 0;
2362  REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
2363  udelay(30);
2364 
2365  /* Set TX MTU */
2366  wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2367  wb_data[1] = 0;
2368  REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
2369  udelay(30);
2370  /* Set cnt max size */
2371  wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
2372  wb_data[1] = 0;
2373  REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
2374  udelay(30);
2375  bnx2x_update_pfc_bmac2(params, vars, is_lb);
2376 
2377  return 0;
2378 }
2379 
2380 static int bnx2x_bmac_enable(struct link_params *params,
2381  struct link_vars *vars,
2382  u8 is_lb, u8 reset_bmac)
2383 {
2384  int rc = 0;
2385  u8 port = params->port;
2386  struct bnx2x *bp = params->bp;
2387  u32 val;
2388  /* Reset and unreset the BigMac */
2389  if (reset_bmac) {
2392  usleep_range(1000, 2000);
2393  }
2394 
2397 
2398  /* Enable access for bmac registers */
2399  REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2400 
2401  /* Enable BMAC according to BMAC type*/
2402  if (CHIP_IS_E2(bp))
2403  rc = bnx2x_bmac2_enable(params, vars, is_lb);
2404  else
2405  rc = bnx2x_bmac1_enable(params, vars, is_lb);
2406  REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
2407  REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
2408  REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
2409  val = 0;
2410  if ((params->feature_config_flags &
2412  (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
2413  val = 1;
2414  REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
2415  REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
2416  REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
2417  REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
2418  REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
2419  REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
2420 
2421  vars->mac_type = MAC_TYPE_BMAC;
2422  return rc;
2423 }
2424 
2425 static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en)
2426 {
2427  u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2429  u32 wb_data[2];
2430  u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
2431 
2432  if (CHIP_IS_E2(bp))
2433  bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL;
2434  else
2435  bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL;
2436  /* Only if the bmac is out of reset */
2437  if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2439  nig_bmac_enable) {
2440  /* Clear Rx Enable bit in BMAC_CONTROL register */
2441  REG_RD_DMAE(bp, bmac_addr, wb_data, 2);
2442  if (en)
2443  wb_data[0] |= BMAC_CONTROL_RX_ENABLE;
2444  else
2445  wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
2446  REG_WR_DMAE(bp, bmac_addr, wb_data, 2);
2447  usleep_range(1000, 2000);
2448  }
2449 }
2450 
2451 static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
2452  u32 line_speed)
2453 {
2454  struct bnx2x *bp = params->bp;
2455  u8 port = params->port;
2456  u32 init_crd, crd;
2457  u32 count = 1000;
2458 
2459  /* Disable port */
2460  REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2461 
2462  /* Wait for init credit */
2463  init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2464  crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2465  DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2466 
2467  while ((init_crd != crd) && count) {
2468  usleep_range(5000, 10000);
2469  crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2470  count--;
2471  }
2472  crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2473  if (init_crd != crd) {
2474  DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n",
2475  init_crd, crd);
2476  return -EINVAL;
2477  }
2478 
2479  if (flow_ctrl & BNX2X_FLOW_CTRL_RX ||
2480  line_speed == SPEED_10 ||
2481  line_speed == SPEED_100 ||
2482  line_speed == SPEED_1000 ||
2483  line_speed == SPEED_2500) {
2484  REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
2485  /* Update threshold */
2486  REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2487  /* Update init credit */
2488  init_crd = 778; /* (800-18-4) */
2489 
2490  } else {
2491  u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
2492  ETH_OVREHEAD)/16;
2493  REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
2494  /* Update threshold */
2495  REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2496  /* Update init credit */
2497  switch (line_speed) {
2498  case SPEED_10000:
2499  init_crd = thresh + 553 - 22;
2500  break;
2501  default:
2502  DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
2503  line_speed);
2504  return -EINVAL;
2505  }
2506  }
2507  REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2508  DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2509  line_speed, init_crd);
2510 
2511  /* Probe the credit changes */
2512  REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2513  usleep_range(5000, 10000);
2514  REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2515 
2516  /* Enable port */
2517  REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2518  return 0;
2519 }
2520 
2536 static u32 bnx2x_get_emac_base(struct bnx2x *bp,
2537  u32 mdc_mdio_access, u8 port)
2538 {
2539  u32 emac_base = 0;
2540  switch (mdc_mdio_access) {
2542  break;
2544  if (REG_RD(bp, NIG_REG_PORT_SWAP))
2545  emac_base = GRCBASE_EMAC1;
2546  else
2547  emac_base = GRCBASE_EMAC0;
2548  break;
2550  if (REG_RD(bp, NIG_REG_PORT_SWAP))
2551  emac_base = GRCBASE_EMAC0;
2552  else
2553  emac_base = GRCBASE_EMAC1;
2554  break;
2556  emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2557  break;
2559  emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
2560  break;
2561  default:
2562  break;
2563  }
2564  return emac_base;
2565 
2566 }
2567 
2568 /******************************************************************/
2569 /* CL22 access functions */
2570 /******************************************************************/
2571 static int bnx2x_cl22_write(struct bnx2x *bp,
2572  struct bnx2x_phy *phy,
2573  u16 reg, u16 val)
2574 {
2575  u32 tmp, mode;
2576  u8 i;
2577  int rc = 0;
2578  /* Switch to CL22 */
2579  mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
2581  mode & ~EMAC_MDIO_MODE_CLAUSE_45);
2582 
2583  /* Address */
2584  tmp = ((phy->addr << 21) | (reg << 16) | val |
2587  REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
2588 
2589  for (i = 0; i < 50; i++) {
2590  udelay(10);
2591 
2592  tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
2593  if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
2594  udelay(5);
2595  break;
2596  }
2597  }
2598  if (tmp & EMAC_MDIO_COMM_START_BUSY) {
2599  DP(NETIF_MSG_LINK, "write phy register failed\n");
2600  rc = -EFAULT;
2601  }
2602  REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
2603  return rc;
2604 }
2605 
2606 static int bnx2x_cl22_read(struct bnx2x *bp,
2607  struct bnx2x_phy *phy,
2608  u16 reg, u16 *ret_val)
2609 {
2610  u32 val, mode;
2611  u16 i;
2612  int rc = 0;
2613 
2614  /* Switch to CL22 */
2615  mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
2617  mode & ~EMAC_MDIO_MODE_CLAUSE_45);
2618 
2619  /* Address */
2620  val = ((phy->addr << 21) | (reg << 16) |
2623  REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
2624 
2625  for (i = 0; i < 50; i++) {
2626  udelay(10);
2627 
2628  val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
2629  if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
2630  *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
2631  udelay(5);
2632  break;
2633  }
2634  }
2635  if (val & EMAC_MDIO_COMM_START_BUSY) {
2636  DP(NETIF_MSG_LINK, "read phy register failed\n");
2637 
2638  *ret_val = 0;
2639  rc = -EFAULT;
2640  }
2641  REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
2642  return rc;
2643 }
2644 
2645 /******************************************************************/
2646 /* CL45 access functions */
2647 /******************************************************************/
2648 static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
2649  u8 devad, u16 reg, u16 *ret_val)
2650 {
2651  u32 val;
2652  u16 i;
2653  int rc = 0;
2654  if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
2655  bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
2657  /* Address */
2658  val = ((phy->addr << 21) | (devad << 16) | reg |
2661  REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
2662 
2663  for (i = 0; i < 50; i++) {
2664  udelay(10);
2665 
2666  val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
2667  if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
2668  udelay(5);
2669  break;
2670  }
2671  }
2672  if (val & EMAC_MDIO_COMM_START_BUSY) {
2673  DP(NETIF_MSG_LINK, "read phy register failed\n");
2674  netdev_err(bp->dev, "MDC/MDIO access timeout\n");
2675  *ret_val = 0;
2676  rc = -EFAULT;
2677  } else {
2678  /* Data */
2679  val = ((phy->addr << 21) | (devad << 16) |
2682  REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
2683 
2684  for (i = 0; i < 50; i++) {
2685  udelay(10);
2686 
2687  val = REG_RD(bp, phy->mdio_ctrl +
2689  if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
2690  *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
2691  break;
2692  }
2693  }
2694  if (val & EMAC_MDIO_COMM_START_BUSY) {
2695  DP(NETIF_MSG_LINK, "read phy register failed\n");
2696  netdev_err(bp->dev, "MDC/MDIO access timeout\n");
2697  *ret_val = 0;
2698  rc = -EFAULT;
2699  }
2700  }
2701  /* Work around for E3 A0 */
2702  if (phy->flags & FLAGS_MDC_MDIO_WA) {
2703  phy->flags ^= FLAGS_DUMMY_READ;
2704  if (phy->flags & FLAGS_DUMMY_READ) {
2705  u16 temp_val;
2706  bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
2707  }
2708  }
2709 
2710  if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
2711  bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
2713  return rc;
2714 }
2715 
2716 static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
2717  u8 devad, u16 reg, u16 val)
2718 {
2719  u32 tmp;
2720  u8 i;
2721  int rc = 0;
2722  if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
2723  bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
2725 
2726  /* Address */
2727  tmp = ((phy->addr << 21) | (devad << 16) | reg |
2730  REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
2731 
2732  for (i = 0; i < 50; i++) {
2733  udelay(10);
2734 
2735  tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
2736  if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
2737  udelay(5);
2738  break;
2739  }
2740  }
2741  if (tmp & EMAC_MDIO_COMM_START_BUSY) {
2742  DP(NETIF_MSG_LINK, "write phy register failed\n");
2743  netdev_err(bp->dev, "MDC/MDIO access timeout\n");
2744  rc = -EFAULT;
2745  } else {
2746  /* Data */
2747  tmp = ((phy->addr << 21) | (devad << 16) | val |
2750  REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
2751 
2752  for (i = 0; i < 50; i++) {
2753  udelay(10);
2754 
2755  tmp = REG_RD(bp, phy->mdio_ctrl +
2757  if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
2758  udelay(5);
2759  break;
2760  }
2761  }
2762  if (tmp & EMAC_MDIO_COMM_START_BUSY) {
2763  DP(NETIF_MSG_LINK, "write phy register failed\n");
2764  netdev_err(bp->dev, "MDC/MDIO access timeout\n");
2765  rc = -EFAULT;
2766  }
2767  }
2768  /* Work around for E3 A0 */
2769  if (phy->flags & FLAGS_MDC_MDIO_WA) {
2770  phy->flags ^= FLAGS_DUMMY_READ;
2771  if (phy->flags & FLAGS_DUMMY_READ) {
2772  u16 temp_val;
2773  bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
2774  }
2775  }
2776  if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
2777  bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
2779  return rc;
2780 }
2781 
2782 /******************************************************************/
2783 /* EEE section */
2784 /******************************************************************/
2785 static u8 bnx2x_eee_has_cap(struct link_params *params)
2786 {
2787  struct bnx2x *bp = params->bp;
2788 
2789  if (REG_RD(bp, params->shmem2_base) <=
2790  offsetof(struct shmem2_region, eee_status[params->port]))
2791  return 0;
2792 
2793  return 1;
2794 }
2795 
2796 static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
2797 {
2798  switch (nvram_mode) {
2800  *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
2801  break;
2803  *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
2804  break;
2806  *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
2807  break;
2808  default:
2809  *idle_timer = 0;
2810  break;
2811  }
2812 
2813  return 0;
2814 }
2815 
2816 static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
2817 {
2818  switch (idle_timer) {
2821  break;
2824  break;
2827  break;
2828  default:
2830  break;
2831  }
2832 
2833  return 0;
2834 }
2835 
2836 static u32 bnx2x_eee_calc_timer(struct link_params *params)
2837 {
2838  u32 eee_mode, eee_idle;
2839  struct bnx2x *bp = params->bp;
2840 
2841  if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
2842  if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
2843  /* time value in eee_mode --> used directly*/
2844  eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
2845  } else {
2846  /* hsi value in eee_mode --> time */
2847  if (bnx2x_eee_nvram_to_time(params->eee_mode &
2849  &eee_idle))
2850  return 0;
2851  }
2852  } else {
2853  /* hsi values in nvram --> time*/
2854  eee_mode = ((REG_RD(bp, params->shmem_base +
2855  offsetof(struct shmem_region, dev_info.
2856  port_feature_config[params->port].
2857  eee_power_mode)) &
2860 
2861  if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
2862  return 0;
2863  }
2864 
2865  return eee_idle;
2866 }
2867 
2868 static int bnx2x_eee_set_timers(struct link_params *params,
2869  struct link_vars *vars)
2870 {
2871  u32 eee_idle = 0, eee_mode;
2872  struct bnx2x *bp = params->bp;
2873 
2874  eee_idle = bnx2x_eee_calc_timer(params);
2875 
2876  if (eee_idle) {
2877  REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
2878  eee_idle);
2879  } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
2880  (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
2881  (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
2882  DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
2883  return -EINVAL;
2884  }
2885 
2887  if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
2888  /* eee_idle in 1u --> eee_status in 16u */
2889  eee_idle >>= 4;
2890  vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
2892  } else {
2893  if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
2894  return -EINVAL;
2895  vars->eee_status |= eee_mode;
2896  }
2897 
2898  return 0;
2899 }
2900 
2901 static int bnx2x_eee_initial_config(struct link_params *params,
2902  struct link_vars *vars, u8 mode)
2903 {
2904  vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
2905 
2906  /* Propogate params' bits --> vars (for migration exposure) */
2907  if (params->eee_mode & EEE_MODE_ENABLE_LPI)
2909  else
2910  vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
2911 
2912  if (params->eee_mode & EEE_MODE_ADV_LPI)
2914  else
2916 
2917  return bnx2x_eee_set_timers(params, vars);
2918 }
2919 
2920 static int bnx2x_eee_disable(struct bnx2x_phy *phy,
2921  struct link_params *params,
2922  struct link_vars *vars)
2923 {
2924  struct bnx2x *bp = params->bp;
2925 
2926  /* Make Certain LPI is disabled */
2927  REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
2928 
2929  bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0);
2930 
2932 
2933  return 0;
2934 }
2935 
2936 static int bnx2x_eee_advertise(struct bnx2x_phy *phy,
2937  struct link_params *params,
2938  struct link_vars *vars, u8 modes)
2939 {
2940  struct bnx2x *bp = params->bp;
2941  u16 val = 0;
2942 
2943  /* Mask events preventing LPI generation */
2944  REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
2945 
2946  if (modes & SHMEM_EEE_10G_ADV) {
2947  DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
2948  val |= 0x8;
2949  }
2950  if (modes & SHMEM_EEE_1G_ADV) {
2951  DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n");
2952  val |= 0x4;
2953  }
2954 
2955  bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val);
2956 
2958  vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT);
2959 
2960  return 0;
2961 }
2962 
2963 static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
2964 {
2965  struct bnx2x *bp = params->bp;
2966 
2967  if (bnx2x_eee_has_cap(params))
2968  REG_WR(bp, params->shmem2_base +
2969  offsetof(struct shmem2_region,
2970  eee_status[params->port]), eee_status);
2971 }
2972 
2973 static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy,
2974  struct link_params *params,
2975  struct link_vars *vars)
2976 {
2977  struct bnx2x *bp = params->bp;
2978  u16 adv = 0, lp = 0;
2979  u32 lp_adv = 0;
2980  u8 neg = 0;
2981 
2982  bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv);
2983  bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp);
2984 
2985  if (lp & 0x2) {
2986  lp_adv |= SHMEM_EEE_100M_ADV;
2987  if (adv & 0x2) {
2988  if (vars->line_speed == SPEED_100)
2989  neg = 1;
2990  DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n");
2991  }
2992  }
2993  if (lp & 0x14) {
2994  lp_adv |= SHMEM_EEE_1G_ADV;
2995  if (adv & 0x14) {
2996  if (vars->line_speed == SPEED_1000)
2997  neg = 1;
2998  DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n");
2999  }
3000  }
3001  if (lp & 0x68) {
3002  lp_adv |= SHMEM_EEE_10G_ADV;
3003  if (adv & 0x68) {
3004  if (vars->line_speed == SPEED_10000)
3005  neg = 1;
3006  DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n");
3007  }
3008  }
3009 
3011  vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT);
3012 
3013  if (neg) {
3014  DP(NETIF_MSG_LINK, "EEE is active\n");
3016  }
3017 
3018 }
3019 
3020 /******************************************************************/
3021 /* BSC access functions from E3 */
3022 /******************************************************************/
3023 static void bnx2x_bsc_module_sel(struct link_params *params)
3024 {
3025  int idx;
3026  u32 board_cfg, sfp_ctrl;
3027  u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH];
3028  struct bnx2x *bp = params->bp;
3029  u8 port = params->port;
3030  /* Read I2C output PINs */
3031  board_cfg = REG_RD(bp, params->shmem_base +
3032  offsetof(struct shmem_region,
3033  dev_info.shared_hw_config.board));
3034  i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK;
3035  i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >>
3037 
3038  /* Read I2C output value */
3039  sfp_ctrl = REG_RD(bp, params->shmem_base +
3040  offsetof(struct shmem_region,
3041  dev_info.port_hw_config[port].e3_cmn_pin_cfg));
3042  i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
3043  i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
3044  DP(NETIF_MSG_LINK, "Setting BSC switch\n");
3045  for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
3046  bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]);
3047 }
3048 
3049 static int bnx2x_bsc_read(struct link_params *params,
3050  struct bnx2x_phy *phy,
3051  u8 sl_devid,
3052  u16 sl_addr,
3053  u8 lc_addr,
3054  u8 xfer_cnt,
3055  u32 *data_array)
3056 {
3057  u32 val, i;
3058  int rc = 0;
3059  struct bnx2x *bp = params->bp;
3060 
3061  if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
3062  DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
3063  return -EINVAL;
3064  }
3065 
3066  if (xfer_cnt > 16) {
3067  DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
3068  xfer_cnt);
3069  return -EINVAL;
3070  }
3071  bnx2x_bsc_module_sel(params);
3072 
3073  xfer_cnt = 16 - lc_addr;
3074 
3075  /* Enable the engine */
3076  val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3077  val |= MCPR_IMC_COMMAND_ENABLE;
3078  REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
3079 
3080  /* Program slave device ID */
3081  val = (sl_devid << 16) | sl_addr;
3083 
3084  /* Start xfer with 0 byte to update the address pointer ???*/
3085  val = (MCPR_IMC_COMMAND_ENABLE) |
3088  (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
3089  REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
3090 
3091  /* Poll for completion */
3092  i = 0;
3093  val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3094  while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
3095  udelay(10);
3096  val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3097  if (i++ > 1000) {
3098  DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n",
3099  i);
3100  rc = -EFAULT;
3101  break;
3102  }
3103  }
3104  if (rc == -EFAULT)
3105  return rc;
3106 
3107  /* Start xfer with read op */
3108  val = (MCPR_IMC_COMMAND_ENABLE) |
3112  (xfer_cnt);
3113  REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
3114 
3115  /* Poll for completion */
3116  i = 0;
3117  val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3118  while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
3119  udelay(10);
3120  val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
3121  if (i++ > 1000) {
3122  DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i);
3123  rc = -EFAULT;
3124  break;
3125  }
3126  }
3127  if (rc == -EFAULT)
3128  return rc;
3129 
3130  for (i = (lc_addr >> 2); i < 4; i++) {
3131  data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4));
3132 #ifdef __BIG_ENDIAN
3133  data_array[i] = ((data_array[i] & 0x000000ff) << 24) |
3134  ((data_array[i] & 0x0000ff00) << 8) |
3135  ((data_array[i] & 0x00ff0000) >> 8) |
3136  ((data_array[i] & 0xff000000) >> 24);
3137 #endif
3138  }
3139  return rc;
3140 }
3141 
3142 static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3143  u8 devad, u16 reg, u16 or_val)
3144 {
3145  u16 val;
3146  bnx2x_cl45_read(bp, phy, devad, reg, &val);
3147  bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
3148 }
3149 
3150 int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
3151  u8 devad, u16 reg, u16 *ret_val)
3152 {
3153  u8 phy_index;
3154  /* Probe for the phy according to the given phy_addr, and execute
3155  * the read request on it
3156  */
3157  for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
3158  if (params->phy[phy_index].addr == phy_addr) {
3159  return bnx2x_cl45_read(params->bp,
3160  &params->phy[phy_index], devad,
3161  reg, ret_val);
3162  }
3163  }
3164  return -EINVAL;
3165 }
3166 
3167 int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
3168  u8 devad, u16 reg, u16 val)
3169 {
3170  u8 phy_index;
3171  /* Probe for the phy according to the given phy_addr, and execute
3172  * the write request on it
3173  */
3174  for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
3175  if (params->phy[phy_index].addr == phy_addr) {
3176  return bnx2x_cl45_write(params->bp,
3177  &params->phy[phy_index], devad,
3178  reg, val);
3179  }
3180  }
3181  return -EINVAL;
3182 }
3183 static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
3184  struct link_params *params)
3185 {
3186  u8 lane = 0;
3187  struct bnx2x *bp = params->bp;
3188  u32 path_swap, path_swap_ovr;
3189  u8 path, port;
3190 
3191  path = BP_PATH(bp);
3192  port = params->port;
3193 
3194  if (bnx2x_is_4_port_mode(bp)) {
3195  u32 port_swap, port_swap_ovr;
3196 
3197  /* Figure out path swap value */
3198  path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
3199  if (path_swap_ovr & 0x1)
3200  path_swap = (path_swap_ovr & 0x2);
3201  else
3202  path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP);
3203 
3204  if (path_swap)
3205  path = path ^ 1;
3206 
3207  /* Figure out port swap value */
3208  port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
3209  if (port_swap_ovr & 0x1)
3210  port_swap = (port_swap_ovr & 0x2);
3211  else
3212  port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP);
3213 
3214  if (port_swap)
3215  port = port ^ 1;
3216 
3217  lane = (port<<1) + path;
3218  } else { /* Two port mode - no port swap */
3219 
3220  /* Figure out path swap value */
3221  path_swap_ovr =
3223  if (path_swap_ovr & 0x1) {
3224  path_swap = (path_swap_ovr & 0x2);
3225  } else {
3226  path_swap =
3228  }
3229  if (path_swap)
3230  path = path ^ 1;
3231 
3232  lane = path << 1 ;
3233  }
3234  return lane;
3235 }
3236 
3237 static void bnx2x_set_aer_mmd(struct link_params *params,
3238  struct bnx2x_phy *phy)
3239 {
3240  u32 ser_lane;
3241  u16 offset, aer_val;
3242  struct bnx2x *bp = params->bp;
3243  ser_lane = ((params->lane_config &
3246 
3247  offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
3248  (phy->addr + ser_lane) : 0;
3249 
3250  if (USES_WARPCORE(bp)) {
3251  aer_val = bnx2x_get_warpcore_lane(phy, params);
3252  /* In Dual-lane mode, two lanes are joined together,
3253  * so in order to configure them, the AER broadcast method is
3254  * used here.
3255  * 0x200 is the broadcast address for lanes 0,1
3256  * 0x201 is the broadcast address for lanes 2,3
3257  */
3258  if (phy->flags & FLAGS_WC_DUAL_MODE)
3259  aer_val = (aer_val >> 1) | 0x200;
3260  } else if (CHIP_IS_E2(bp))
3261  aer_val = 0x3800 + offset - 1;
3262  else
3263  aer_val = 0x3800 + offset;
3264 
3266  MDIO_AER_BLOCK_AER_REG, aer_val);
3267 
3268 }
3269 
3270 /******************************************************************/
3271 /* Internal phy section */
3272 /******************************************************************/
3273 
3274 static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
3275 {
3276  u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
3277 
3278  /* Set Clause 22 */
3279  REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
3280  REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
3281  udelay(500);
3282  REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
3283  udelay(500);
3284  /* Set Clause 45 */
3285  REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
3286 }
3287 
3288 static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
3289 {
3290  u32 val;
3291 
3292  DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
3293 
3294  val = SERDES_RESET_BITS << (port*16);
3295 
3296  /* Reset and unreset the SerDes/XGXS */
3298  udelay(500);
3300 
3301  bnx2x_set_serdes_access(bp, port);
3302 
3303  REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
3305 }
3306 
3307 static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy,
3308  struct link_params *params,
3309  u32 action)
3310 {
3311  struct bnx2x *bp = params->bp;
3312  switch (action) {
3313  case PHY_INIT:
3314  /* Set correct devad */
3315  REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0);
3316  REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
3317  phy->def_md_devad);
3318  break;
3319  }
3320 }
3321 
3322 static void bnx2x_xgxs_deassert(struct link_params *params)
3323 {
3324  struct bnx2x *bp = params->bp;
3325  u8 port;
3326  u32 val;
3327  DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
3328  port = params->port;
3329 
3330  val = XGXS_RESET_BITS << (port*16);
3331 
3332  /* Reset and unreset the SerDes/XGXS */
3334  udelay(500);
3336  bnx2x_xgxs_specific_func(&params->phy[INT_PHY], params,
3337  PHY_INIT);
3338 }
3339 
3340 static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
3341  struct link_params *params, u16 *ieee_fc)
3342 {
3343  struct bnx2x *bp = params->bp;
3345  /* Resolve pause mode and advertisement Please refer to Table
3346  * 28B-3 of the 802.3ab-1999 spec
3347  */
3348 
3349  switch (phy->req_flow_ctrl) {
3350  case BNX2X_FLOW_CTRL_AUTO:
3351  if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
3353  else
3354  *ieee_fc |=
3356  break;
3357 
3358  case BNX2X_FLOW_CTRL_TX:
3360  break;
3361 
3362  case BNX2X_FLOW_CTRL_RX:
3363  case BNX2X_FLOW_CTRL_BOTH:
3365  break;
3366 
3367  case BNX2X_FLOW_CTRL_NONE:
3368  default:
3370  break;
3371  }
3372  DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
3373 }
3374 
3375 static void set_phy_vars(struct link_params *params,
3376  struct link_vars *vars)
3377 {
3378  struct bnx2x *bp = params->bp;
3379  u8 actual_phy_idx, phy_index, link_cfg_idx;
3380  u8 phy_config_swapped = params->multi_phy_config &
3382  for (phy_index = INT_PHY; phy_index < params->num_phys;
3383  phy_index++) {
3384  link_cfg_idx = LINK_CONFIG_IDX(phy_index);
3385  actual_phy_idx = phy_index;
3386  if (phy_config_swapped) {
3387  if (phy_index == EXT_PHY1)
3388  actual_phy_idx = EXT_PHY2;
3389  else if (phy_index == EXT_PHY2)
3390  actual_phy_idx = EXT_PHY1;
3391  }
3392  params->phy[actual_phy_idx].req_flow_ctrl =
3393  params->req_flow_ctrl[link_cfg_idx];
3394 
3395  params->phy[actual_phy_idx].req_line_speed =
3396  params->req_line_speed[link_cfg_idx];
3397 
3398  params->phy[actual_phy_idx].speed_cap_mask =
3399  params->speed_cap_mask[link_cfg_idx];
3400 
3401  params->phy[actual_phy_idx].req_duplex =
3402  params->req_duplex[link_cfg_idx];
3403 
3404  if (params->req_line_speed[link_cfg_idx] ==
3407 
3408  DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
3409  " speed_cap_mask %x\n",
3410  params->phy[actual_phy_idx].req_flow_ctrl,
3411  params->phy[actual_phy_idx].req_line_speed,
3412  params->phy[actual_phy_idx].speed_cap_mask);
3413  }
3414 }
3415 
3416 static void bnx2x_ext_phy_set_pause(struct link_params *params,
3417  struct bnx2x_phy *phy,
3418  struct link_vars *vars)
3419 {
3420  u16 val;
3421  struct bnx2x *bp = params->bp;
3422  /* Read modify write pause advertizing */
3423  bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
3424 
3426 
3427  /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3428  bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
3429  if ((vars->ieee_fc &
3433  }
3434  if ((vars->ieee_fc &
3438  }
3439  DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
3440  bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
3441 }
3442 
3443 static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
3444 { /* LD LP */
3445  switch (pause_result) { /* ASYM P ASYM P */
3446  case 0xb: /* 1 0 1 1 */
3447  vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
3448  break;
3449 
3450  case 0xe: /* 1 1 1 0 */
3451  vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
3452  break;
3453 
3454  case 0x5: /* 0 1 0 1 */
3455  case 0x7: /* 0 1 1 1 */
3456  case 0xd: /* 1 1 0 1 */
3457  case 0xf: /* 1 1 1 1 */
3459  break;
3460 
3461  default:
3462  break;
3463  }
3464  if (pause_result & (1<<0))
3466  if (pause_result & (1<<1))
3468 
3469 }
3470 
3471 static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
3472  struct link_params *params,
3473  struct link_vars *vars)
3474 {
3475  u16 ld_pause; /* local */
3476  u16 lp_pause; /* link partner */
3477  u16 pause_result;
3478  struct bnx2x *bp = params->bp;
3480  bnx2x_cl22_read(bp, phy, 0x4, &ld_pause);
3481  bnx2x_cl22_read(bp, phy, 0x5, &lp_pause);
3482  } else if (CHIP_IS_E3(bp) &&
3483  SINGLE_MEDIA_DIRECT(params)) {
3484  u8 lane = bnx2x_get_warpcore_lane(phy, params);
3485  u16 gp_status, gp_mask;
3486  bnx2x_cl45_read(bp, phy,
3488  &gp_status);
3491  lane;
3492  if ((gp_status & gp_mask) == gp_mask) {
3493  bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3494  MDIO_AN_REG_ADV_PAUSE, &ld_pause);
3495  bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3496  MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
3497  } else {
3498  bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3499  MDIO_AN_REG_CL37_FC_LD, &ld_pause);
3500  bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
3501  MDIO_AN_REG_CL37_FC_LP, &lp_pause);
3502  ld_pause = ((ld_pause &
3504  << 3);
3505  lp_pause = ((lp_pause &
3507  << 3);
3508  }
3509  } else {
3510  bnx2x_cl45_read(bp, phy,
3511  MDIO_AN_DEVAD,
3512  MDIO_AN_REG_ADV_PAUSE, &ld_pause);
3513  bnx2x_cl45_read(bp, phy,
3514  MDIO_AN_DEVAD,
3515  MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
3516  }
3517  pause_result = (ld_pause &
3519  pause_result |= (lp_pause &
3521  DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result);
3522  bnx2x_pause_resolve(vars, pause_result);
3523 
3524 }
3525 
3526 static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3527  struct link_params *params,
3528  struct link_vars *vars)
3529 {
3530  u8 ret = 0;
3532  if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
3533  /* Update the advertised flow-controled of LD/LP in AN */
3534  if (phy->req_line_speed == SPEED_AUTO_NEG)
3535  bnx2x_ext_phy_update_adv_fc(phy, params, vars);
3536  /* But set the flow-control result as the requested one */
3537  vars->flow_ctrl = phy->req_flow_ctrl;
3538  } else if (phy->req_line_speed != SPEED_AUTO_NEG)
3539  vars->flow_ctrl = params->req_fc_auto_adv;
3541  ret = 1;
3542  bnx2x_ext_phy_update_adv_fc(phy, params, vars);
3543  }
3544  return ret;
3545 }
3546 /******************************************************************/
3547 /* Warpcore section */
3548 /******************************************************************/
3549 /* The init_internal_warpcore should mirror the xgxs,
3550  * i.e. reset the lane (if needed), set aer for the
3551  * init configuration, and set/clear SGMII flag. Internal
3552  * phy init is done purely in phy_init stage.
3553  */
3554 
3555 static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3556  struct link_params *params)
3557 {
3558  struct bnx2x *bp = params->bp;
3559 
3560  DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
3561  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3563  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3564  MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
3565 }
3566 
3567 static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3568  struct link_params *params,
3569  struct link_vars *vars) {
3570  u16 lane, i, cl72_ctrl, an_adv = 0;
3571  u16 ucode_ver;
3572  struct bnx2x *bp = params->bp;
3573  static struct bnx2x_reg_set reg_set[] = {
3578  /* Disable Autoneg: re-enable it after adv is done. */
3580  };
3581  DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
3582  /* Set to default registers that may be overriden by 10G force */
3583  for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
3584  bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3585  reg_set[i].val);
3586 
3587  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3589  cl72_ctrl &= 0xf8ff;
3590  cl72_ctrl |= 0x3800;
3591  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3593 
3594  /* Check adding advertisement for 1G KX */
3595  if (((vars->line_speed == SPEED_AUTO_NEG) &&
3597  (vars->line_speed == SPEED_1000)) {
3599  an_adv |= (1<<5);
3600 
3601  /* Enable CL37 1G Parallel Detect */
3602  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
3603  DP(NETIF_MSG_LINK, "Advertize 1G\n");
3604  }
3605  if (((vars->line_speed == SPEED_AUTO_NEG) &&
3607  (vars->line_speed == SPEED_10000)) {
3608  /* Check adding advertisement for 10G KR */
3609  an_adv |= (1<<7);
3610  /* Enable 10G Parallel Detect */
3613 
3614  bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3616  bnx2x_set_aer_mmd(params, phy);
3617  DP(NETIF_MSG_LINK, "Advertize 10G\n");
3618  }
3619 
3620  /* Set Transmit PMD settings */
3621  lane = bnx2x_get_warpcore_lane(phy, params);
3622  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3623  MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
3627  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3629  0x03f0);
3630  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3632  0x03f0);
3633 
3634  /* Advertised speeds */
3635  bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3637 
3638  /* Advertised and set FEC (Forward Error Correction) */
3639  bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3643 
3644  /* Enable CL37 BAM */
3645  if (REG_RD(bp, params->shmem_base +
3646  offsetof(struct shmem_region, dev_info.
3647  port_hw_config[params->port].default_cfg)) &
3649  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3651  1);
3652  DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
3653  }
3654 
3655  /* Advertise pause */
3656  bnx2x_ext_phy_set_pause(params, phy, vars);
3657  /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
3658  */
3659  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3660  MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
3661  if (ucode_ver < 0xd108) {
3662  DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
3663  ucode_ver);
3665  }
3666  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3668 
3669  /* Over 1G - AN local device user page 1 */
3670  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3671  MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
3672 
3673  /* Enable Autoneg */
3674  bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3676 
3677 }
3678 
3679 static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3680  struct link_params *params,
3681  struct link_vars *vars)
3682 {
3683  struct bnx2x *bp = params->bp;
3684  u16 val16, i, lane;
3685  static struct bnx2x_reg_set reg_set[] = {
3686  /* Disable Autoneg */
3689  0x3f00},
3694  /* Leave cl72 training enable, needed for KR */
3695  {MDIO_PMA_DEVAD,
3697  0x2}
3698  };
3699 
3700  for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
3701  bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3702  reg_set[i].val);
3703 
3704  lane = bnx2x_get_warpcore_lane(phy, params);
3705  /* Global registers */
3708  /* Disable CL36 PCS Tx */
3709  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3711  val16 &= ~(0x0011 << lane);
3712  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3714 
3715  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3717  val16 |= (0x0303 << (lane << 1));
3718  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3720  /* Restore AER */
3721  bnx2x_set_aer_mmd(params, phy);
3722  /* Set speed via PMA/PMD register */
3723  bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3725 
3726  bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3728 
3729  /* Enable encoded forced speed */
3730  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3732 
3733  /* Turn TX scramble payload only the 64/66 scrambler */
3734  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3736 
3737  /* Turn RX scramble payload only the 64/66 scrambler */
3738  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3739  MDIO_WC_REG_RX66_CONTROL, 0xF9);
3740 
3741  /* Set and clear loopback to cause a reset to 64/66 decoder */
3742  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3744  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3746 
3747 }
3748 
3749 static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3750  struct link_params *params,
3751  u8 is_xfi)
3752 {
3753  struct bnx2x *bp = params->bp;
3754  u16 misc1_val, tap_val, tx_driver_val, lane, val;
3755  /* Hold rxSeqStart */
3756  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3758 
3759  /* Hold tx_fifo_reset */
3760  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3762 
3763  /* Disable CL73 AN */
3764  bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
3765 
3766  /* Disable 100FX Enable and Auto-Detect */
3767  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3768  MDIO_WC_REG_FX100_CTRL1, &val);
3769  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3770  MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
3771 
3772  /* Disable 100FX Idle detect */
3773  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3774  MDIO_WC_REG_FX100_CTRL3, 0x0080);
3775 
3776  /* Set Block address to Remote PHY & Clear forced_speed[5] */
3777  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3779  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3780  MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F));
3781 
3782  /* Turn off auto-detect & fiber mode */
3783  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3785  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3787  (val & 0xFFEE));
3788 
3789  /* Set filter_force_link, disable_false_link and parallel_detect */
3790  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3792  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3794  ((val | 0x0006) & 0xFFFE));
3795 
3796  /* Set XFI / SFI */
3797  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3798  MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val);
3799 
3800  misc1_val &= ~(0x1f);
3801 
3802  if (is_xfi) {
3803  misc1_val |= 0x5;
3804  tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
3807  tx_driver_val =
3811 
3812  } else {
3813  misc1_val |= 0x9;
3814  tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
3817  tx_driver_val =
3821  }
3822  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3823  MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
3824 
3825  /* Set Transmit PMD settings */
3826  lane = bnx2x_get_warpcore_lane(phy, params);
3827  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3829  tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE);
3830  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3831  MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
3832  tx_driver_val);
3833 
3834  /* Enable fiber mode, enable and invert sig_det */
3835  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3837 
3838  /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
3839  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3840  MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
3841 
3842  bnx2x_warpcore_set_lpi_passthrough(phy, params);
3843 
3844  /* 10G XFI Full Duplex */
3845  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3847 
3848  /* Release tx_fifo_reset */
3849  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3851  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3853 
3854  /* Release rxSeqStart */
3855  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3857  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3858  MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF));
3859 }
3860 
3861 static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp,
3862  struct bnx2x_phy *phy)
3863 {
3864  DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n");
3865 }
3866 
3867 static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
3868  struct bnx2x_phy *phy,
3869  u16 lane)
3870 {
3871  /* Rx0 anaRxControl1G */
3872  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3874 
3875  /* Rx2 anaRxControl1G */
3876  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3878 
3879  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3880  MDIO_WC_REG_RX66_SCW0, 0xE070);
3881 
3882  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3883  MDIO_WC_REG_RX66_SCW1, 0xC0D0);
3884 
3885  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3886  MDIO_WC_REG_RX66_SCW2, 0xA0B0);
3887 
3888  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3889  MDIO_WC_REG_RX66_SCW3, 0x8090);
3890 
3891  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3892  MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0);
3893 
3894  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3895  MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0);
3896 
3897  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3898  MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0);
3899 
3900  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3901  MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0);
3902 
3903  /* Serdes Digital Misc1 */
3904  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3906 
3907  /* Serdes Digital4 Misc3 */
3908  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3909  MDIO_WC_REG_DIGITAL4_MISC3, 0x8088);
3910 
3911  /* Set Transmit PMD settings */
3912  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3918  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3919  MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
3923 }
3924 
3925 static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
3926  struct link_params *params,
3927  u8 fiber_mode,
3928  u8 always_autoneg)
3929 {
3930  struct bnx2x *bp = params->bp;
3931  u16 val16, digctrl_kx1, digctrl_kx2;
3932 
3933  /* Clear XFI clock comp in non-10G single lane mode. */
3934  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3935  MDIO_WC_REG_RX66_CONTROL, &val16);
3936  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3937  MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
3938 
3939  bnx2x_warpcore_set_lpi_passthrough(phy, params);
3940 
3941  if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
3942  /* SGMII Autoneg */
3943  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3945  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3947  val16 | 0x1000);
3948  DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
3949  } else {
3950  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3952  val16 &= 0xcebf;
3953  switch (phy->req_line_speed) {
3954  case SPEED_10:
3955  break;
3956  case SPEED_100:
3957  val16 |= 0x2000;
3958  break;
3959  case SPEED_1000:
3960  val16 |= 0x0040;
3961  break;
3962  default:
3964  "Speed not supported: 0x%x\n", phy->req_line_speed);
3965  return;
3966  }
3967 
3968  if (phy->req_duplex == DUPLEX_FULL)
3969  val16 |= 0x0100;
3970 
3971  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3973 
3974  DP(NETIF_MSG_LINK, "set SGMII force speed %d\n",
3975  phy->req_line_speed);
3976  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3978  DP(NETIF_MSG_LINK, " (readback) %x\n", val16);
3979  }
3980 
3981  /* SGMII Slave mode and disable signal detect */
3982  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3984  if (fiber_mode)
3985  digctrl_kx1 = 1;
3986  else
3987  digctrl_kx1 &= 0xff4a;
3988 
3989  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3991  digctrl_kx1);
3992 
3993  /* Turn off parallel detect */
3994  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3996  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3998  (digctrl_kx2 & ~(1<<2)));
3999 
4000  /* Re-enable parallel detect */
4001  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4003  (digctrl_kx2 | (1<<2)));
4004 
4005  /* Enable autodet */
4006  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4008  (digctrl_kx1 | 0x10));
4009 }
4010 
4011 static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
4012  struct bnx2x_phy *phy,
4013  u8 reset)
4014 {
4015  u16 val;
4016  /* Take lane out of reset after configuration is finished */
4017  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4019  if (reset)
4020  val |= 0xC000;
4021  else
4022  val &= 0x3FFF;
4023  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4025  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4027 }
4028 /* Clear SFI/XFI link settings registers */
4029 static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
4030  struct link_params *params,
4031  u16 lane)
4032 {
4033  struct bnx2x *bp = params->bp;
4034  u16 i;
4035  static struct bnx2x_reg_set wc_regs[] = {
4041  0x0195},
4043  0x0007},
4045  0x0002},
4050  };
4051  /* Set XFI clock comp as default. */
4052  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4053  MDIO_WC_REG_RX66_CONTROL, (3<<13));
4054 
4055  for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++)
4056  bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
4057  wc_regs[i].val);
4058 
4059  lane = bnx2x_get_warpcore_lane(phy, params);
4060  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4061  MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
4062 
4063 }
4064 
4065 static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
4066  u32 chip_id,
4067  u32 shmem_base, u8 port,
4068  u8 *gpio_num, u8 *gpio_port)
4069 {
4070  u32 cfg_pin;
4071  *gpio_num = 0;
4072  *gpio_port = 0;
4073  if (CHIP_IS_E3(bp)) {
4074  cfg_pin = (REG_RD(bp, shmem_base +
4075  offsetof(struct shmem_region,
4076  dev_info.port_hw_config[port].e3_sfp_ctrl)) &
4079 
4080  /* Should not happen. This function called upon interrupt
4081  * triggered by GPIO ( since EPIO can only generate interrupts
4082  * to MCP).
4083  * So if this function was called and none of the GPIOs was set,
4084  * it means the shit hit the fan.
4085  */
4086  if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
4087  (cfg_pin > PIN_CFG_GPIO3_P1)) {
4089  "ERROR: Invalid cfg pin %x for module detect indication\n",
4090  cfg_pin);
4091  return -EINVAL;
4092  }
4093 
4094  *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3;
4095  *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2;
4096  } else {
4097  *gpio_num = MISC_REGISTERS_GPIO_3;
4098  *gpio_port = port;
4099  }
4100  DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port);
4101  return 0;
4102 }
4103 
4104 static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
4105  struct link_params *params)
4106 {
4107  struct bnx2x *bp = params->bp;
4108  u8 gpio_num, gpio_port;
4109  u32 gpio_val;
4110  if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id,
4111  params->shmem_base, params->port,
4112  &gpio_num, &gpio_port) != 0)
4113  return 0;
4114  gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
4115 
4116  /* Call the handling function in case module is detected */
4117  if (gpio_val == 0)
4118  return 1;
4119  else
4120  return 0;
4121 }
4122 static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy,
4123  struct link_params *params)
4124 {
4125  u16 gp2_status_reg0, lane;
4126  struct bnx2x *bp = params->bp;
4127 
4128  lane = bnx2x_get_warpcore_lane(phy, params);
4129 
4130  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0,
4131  &gp2_status_reg0);
4132 
4133  return (gp2_status_reg0 >> (8+lane)) & 0x1;
4134 }
4135 
4136 static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4137  struct link_params *params,
4138  struct link_vars *vars)
4139 {
4140  struct bnx2x *bp = params->bp;
4141  u32 serdes_net_if;
4142  u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
4143  u16 lane = bnx2x_get_warpcore_lane(phy, params);
4144 
4145  vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
4146 
4147  if (!vars->turn_to_run_wc_rt)
4148  return;
4149 
4150  /* Return if there is no link partner */
4151  if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
4152  DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
4153  return;
4154  }
4155 
4156  if (vars->rx_tx_asic_rst) {
4157  serdes_net_if = (REG_RD(bp, params->shmem_base +
4158  offsetof(struct shmem_region, dev_info.
4159  port_hw_config[params->port].default_cfg)) &
4161 
4162  switch (serdes_net_if) {
4164  /* Do we get link yet? */
4165  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1,
4166  &gp_status1);
4167  lnkup = (gp_status1 >> (8+lane)) & 0x1;/* 1G */
4168  /*10G KR*/
4169  lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
4170 
4172  "gp_status1 0x%x\n", gp_status1);
4173 
4174  if (lnkup_kr || lnkup) {
4175  vars->rx_tx_asic_rst = 0;
4177  "link up, rx_tx_asic_rst 0x%x\n",
4178  vars->rx_tx_asic_rst);
4179  } else {
4180  /* Reset the lane to see if link comes up.*/
4181  bnx2x_warpcore_reset_lane(bp, phy, 1);
4182  bnx2x_warpcore_reset_lane(bp, phy, 0);
4183 
4184  /* Restart Autoneg */
4185  bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
4187 
4188  vars->rx_tx_asic_rst--;
4189  DP(NETIF_MSG_LINK, "0x%x retry left\n",
4190  vars->rx_tx_asic_rst);
4191  }
4192  break;
4193 
4194  default:
4195  break;
4196  }
4197 
4198  } /*params->rx_tx_asic_rst*/
4199 
4200 }
4201 static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
4202  struct link_params *params)
4203 {
4204  u16 lane = bnx2x_get_warpcore_lane(phy, params);
4205  struct bnx2x *bp = params->bp;
4206  bnx2x_warpcore_clear_regs(phy, params, lane);
4207  if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] ==
4208  SPEED_10000) &&
4209  (phy->media_type != ETH_PHY_SFP_1G_FIBER)) {
4210  DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
4211  bnx2x_warpcore_set_10G_XFI(phy, params, 0);
4212  } else {
4213  DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
4214  bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0);
4215  }
4216 }
4217 
4218 static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4219  struct link_params *params,
4220  struct link_vars *vars)
4221 {
4222  struct bnx2x *bp = params->bp;
4223  u32 serdes_net_if;
4224  u8 fiber_mode;
4225  u16 lane = bnx2x_get_warpcore_lane(phy, params);
4226  serdes_net_if = (REG_RD(bp, params->shmem_base +
4227  offsetof(struct shmem_region, dev_info.
4228  port_hw_config[params->port].default_cfg)) &
4230  DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, "
4231  "serdes_net_if = 0x%x\n",
4232  vars->line_speed, serdes_net_if);
4233  bnx2x_set_aer_mmd(params, phy);
4234  bnx2x_warpcore_reset_lane(bp, phy, 1);
4235  vars->phy_flags |= PHY_XGXS_FLAG;
4236  if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
4237  (phy->req_line_speed &&
4238  ((phy->req_line_speed == SPEED_100) ||
4239  (phy->req_line_speed == SPEED_10)))) {
4240  vars->phy_flags |= PHY_SGMII_FLAG;
4241  DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
4242  bnx2x_warpcore_clear_regs(phy, params, lane);
4243  bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1);
4244  } else {
4245  switch (serdes_net_if) {
4247  /* Enable KR Auto Neg */
4248  if (params->loopback_mode != LOOPBACK_EXT)
4249  bnx2x_warpcore_enable_AN_KR(phy, params, vars);
4250  else {
4251  DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
4252  bnx2x_warpcore_set_10G_KR(phy, params, vars);
4253  }
4254  break;
4255 
4257  bnx2x_warpcore_clear_regs(phy, params, lane);
4258  if (vars->line_speed == SPEED_10000) {
4259  DP(NETIF_MSG_LINK, "Setting 10G XFI\n");
4260  bnx2x_warpcore_set_10G_XFI(phy, params, 1);
4261  } else {
4262  if (SINGLE_MEDIA_DIRECT(params)) {
4263  DP(NETIF_MSG_LINK, "1G Fiber\n");
4264  fiber_mode = 1;
4265  } else {
4266  DP(NETIF_MSG_LINK, "10/100/1G SGMII\n");
4267  fiber_mode = 0;
4268  }
4269  bnx2x_warpcore_set_sgmii_speed(phy,
4270  params,
4271  fiber_mode,
4272  0);
4273  }
4274 
4275  break;
4276 
4278  /* Issue Module detection */
4279  if (bnx2x_is_sfp_module_plugged(phy, params))
4280  bnx2x_sfp_module_detection(phy, params);
4281 
4282  bnx2x_warpcore_config_sfi(phy, params);
4283  break;
4284 
4286  if (vars->line_speed != SPEED_20000) {
4287  DP(NETIF_MSG_LINK, "Speed not supported yet\n");
4288  return;
4289  }
4290  DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n");
4291  bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane);
4292  /* Issue Module detection */
4293 
4294  bnx2x_sfp_module_detection(phy, params);
4295  break;
4296 
4298  if (vars->line_speed != SPEED_20000) {
4299  DP(NETIF_MSG_LINK, "Speed not supported yet\n");
4300  return;
4301  }
4302  DP(NETIF_MSG_LINK, "Setting 20G KR2\n");
4303  bnx2x_warpcore_set_20G_KR2(bp, phy);
4304  break;
4305 
4306  default:
4308  "Unsupported Serdes Net Interface 0x%x\n",
4309  serdes_net_if);
4310  return;
4311  }
4312  }
4313 
4314  /* Take lane out of reset after configuration is finished */
4315  bnx2x_warpcore_reset_lane(bp, phy, 0);
4316  DP(NETIF_MSG_LINK, "Exit config init\n");
4317 }
4318 
4319 static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
4320  struct bnx2x_phy *phy,
4321  u8 tx_en)
4322 {
4323  struct bnx2x *bp = params->bp;
4324  u32 cfg_pin;
4325  u8 port = params->port;
4326 
4327  cfg_pin = REG_RD(bp, params->shmem_base +
4328  offsetof(struct shmem_region,
4329  dev_info.port_hw_config[port].e3_sfp_ctrl)) &
4331  /* Set the !tx_en since this pin is DISABLE_TX_LASER */
4332  DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
4333  /* For 20G, the expected pin to be used is 3 pins after the current */
4334 
4335  bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
4337  bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
4338 }
4339 
4340 static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4341  struct link_params *params)
4342 {
4343  struct bnx2x *bp = params->bp;
4344  u16 val16, lane;
4345  bnx2x_sfp_e3_set_transmitter(params, phy, 0);
4346  bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
4347  bnx2x_set_aer_mmd(params, phy);
4348  /* Global register */
4349  bnx2x_warpcore_reset_lane(bp, phy, 1);
4350 
4351  /* Clear loopback settings (if any) */
4352  /* 10G & 20G */
4353  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4355  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4357  0xBFFF);
4358 
4359  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4361  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4362  MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe);
4363 
4364  /* Update those 1-copy registers */
4367  /* Enable 1G MDIO (1-copy) */
4368  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4370  &val16);
4371  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4373  val16 & ~0x10);
4374 
4375  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4377  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4379  val16 & 0xff00);
4380 
4381  lane = bnx2x_get_warpcore_lane(phy, params);
4382  /* Disable CL36 PCS Tx */
4383  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4385  val16 |= (0x11 << lane);
4386  if (phy->flags & FLAGS_WC_DUAL_MODE)
4387  val16 |= (0x22 << lane);
4388  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4390 
4391  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4393  val16 &= ~(0x0303 << (lane << 1));
4394  val16 |= (0x0101 << (lane << 1));
4395  if (phy->flags & FLAGS_WC_DUAL_MODE) {
4396  val16 &= ~(0x0c0c << (lane << 1));
4397  val16 |= (0x0404 << (lane << 1));
4398  }
4399 
4400  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4402  /* Restore AER */
4403  bnx2x_set_aer_mmd(params, phy);
4404 
4405 }
4406 
4407 static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
4408  struct link_params *params)
4409 {
4410  struct bnx2x *bp = params->bp;
4411  u16 val16;
4412  u32 lane;
4413  DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
4414  params->loopback_mode, phy->req_line_speed);
4415 
4416  if (phy->req_line_speed < SPEED_10000) {
4417  /* 10/100/1000 */
4418 
4419  /* Update those 1-copy registers */
4422  /* Enable 1G MDIO (1-copy) */
4423  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4425  0x10);
4426  /* Set 1G loopback based on lane (1-copy) */
4427  lane = bnx2x_get_warpcore_lane(phy, params);
4428  bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4430  bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4432  val16 | (1<<lane));
4433 
4434  /* Switch back to 4-copy registers */
4435  bnx2x_set_aer_mmd(params, phy);
4436  } else {
4437  /* 10G & 20G */
4438  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4440  0x4000);
4441 
4442  bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4444  }
4445 }
4446 
4447 
4448 
4449 static void bnx2x_sync_link(struct link_params *params,
4450  struct link_vars *vars)
4451 {
4452  struct bnx2x *bp = params->bp;
4453  u8 link_10g_plus;
4456  vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
4457  if (vars->link_up) {
4458  DP(NETIF_MSG_LINK, "phy link up\n");
4459 
4460  vars->phy_link_up = 1;
4461  vars->duplex = DUPLEX_FULL;
4462  switch (vars->link_status &
4464  case LINK_10THD:
4465  vars->duplex = DUPLEX_HALF;
4466  /* Fall thru */
4467  case LINK_10TFD:
4468  vars->line_speed = SPEED_10;
4469  break;
4470 
4471  case LINK_100TXHD:
4472  vars->duplex = DUPLEX_HALF;
4473  /* Fall thru */
4474  case LINK_100T4:
4475  case LINK_100TXFD:
4476  vars->line_speed = SPEED_100;
4477  break;
4478 
4479  case LINK_1000THD:
4480  vars->duplex = DUPLEX_HALF;
4481  /* Fall thru */
4482  case LINK_1000TFD:
4483  vars->line_speed = SPEED_1000;
4484  break;
4485 
4486  case LINK_2500THD:
4487  vars->duplex = DUPLEX_HALF;
4488  /* Fall thru */
4489  case LINK_2500TFD:
4490  vars->line_speed = SPEED_2500;
4491  break;
4492 
4493  case LINK_10GTFD:
4494  vars->line_speed = SPEED_10000;
4495  break;
4496  case LINK_20GTFD:
4497  vars->line_speed = SPEED_20000;
4498  break;
4499  default:
4500  break;
4501  }
4502  vars->flow_ctrl = 0;
4504  vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
4505 
4507  vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
4508 
4509  if (!vars->flow_ctrl)
4511 
4512  if (vars->line_speed &&
4513  ((vars->line_speed == SPEED_10) ||
4514  (vars->line_speed == SPEED_100))) {
4515  vars->phy_flags |= PHY_SGMII_FLAG;
4516  } else {
4517  vars->phy_flags &= ~PHY_SGMII_FLAG;
4518  }
4519  if (vars->line_speed &&
4520  USES_WARPCORE(bp) &&
4521  (vars->line_speed == SPEED_1000))
4522  vars->phy_flags |= PHY_SGMII_FLAG;
4523  /* Anything 10 and over uses the bmac */
4524  link_10g_plus = (vars->line_speed >= SPEED_10000);
4525 
4526  if (link_10g_plus) {
4527  if (USES_WARPCORE(bp))
4528  vars->mac_type = MAC_TYPE_XMAC;
4529  else
4530  vars->mac_type = MAC_TYPE_BMAC;
4531  } else {
4532  if (USES_WARPCORE(bp))
4533  vars->mac_type = MAC_TYPE_UMAC;
4534  else
4535  vars->mac_type = MAC_TYPE_EMAC;
4536  }
4537  } else { /* Link down */
4538  DP(NETIF_MSG_LINK, "phy link down\n");
4539 
4540  vars->phy_link_up = 0;
4541 
4542  vars->line_speed = 0;
4543  vars->duplex = DUPLEX_FULL;
4545 
4546  /* Indicate no mac active */
4547  vars->mac_type = MAC_TYPE_NONE;
4552  }
4553 }
4554 
4556  struct link_vars *vars)
4557 {
4558  struct bnx2x *bp = params->bp;
4559  u8 port = params->port;
4560  u32 sync_offset, media_types;
4561  /* Update PHY configuration */
4562  set_phy_vars(params, vars);
4563 
4564  vars->link_status = REG_RD(bp, params->shmem_base +
4565  offsetof(struct shmem_region,
4566  port_mb[port].link_status));
4567  if (bnx2x_eee_has_cap(params))
4568  vars->eee_status = REG_RD(bp, params->shmem2_base +
4569  offsetof(struct shmem2_region,
4570  eee_status[params->port]));
4571 
4572  vars->phy_flags = PHY_XGXS_FLAG;
4573  bnx2x_sync_link(params, vars);
4574  /* Sync media type */
4575  sync_offset = params->shmem_base +
4576  offsetof(struct shmem_region,
4577  dev_info.port_hw_config[port].media_type);
4578  media_types = REG_RD(bp, sync_offset);
4579 
4580  params->phy[INT_PHY].media_type =
4581  (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
4583  params->phy[EXT_PHY1].media_type =
4584  (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
4586  params->phy[EXT_PHY2].media_type =
4587  (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
4589  DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types);
4590 
4591  /* Sync AEU offset */
4592  sync_offset = params->shmem_base +
4593  offsetof(struct shmem_region,
4594  dev_info.port_hw_config[port].aeu_int_mask);
4595 
4596  vars->aeu_int_mask = REG_RD(bp, sync_offset);
4597 
4598  /* Sync PFC status */
4600  params->feature_config_flags |=
4602  else
4603  params->feature_config_flags &=
4605 
4606  DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
4607  vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
4608  DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
4609  vars->line_speed, vars->duplex, vars->flow_ctrl);
4610 }
4611 
4612 static void bnx2x_set_master_ln(struct link_params *params,
4613  struct bnx2x_phy *phy)
4614 {
4615  struct bnx2x *bp = params->bp;
4616  u16 new_master_ln, ser_lane;
4617  ser_lane = ((params->lane_config &
4620 
4621  /* Set the master_ln for AN */
4622  CL22_RD_OVER_CL45(bp, phy,
4625  &new_master_ln);
4626 
4627  CL22_WR_OVER_CL45(bp, phy,
4630  (new_master_ln | ser_lane));
4631 }
4632 
4633 static int bnx2x_reset_unicore(struct link_params *params,
4634  struct bnx2x_phy *phy,
4635  u8 set_serdes)
4636 {
4637  struct bnx2x *bp = params->bp;
4638  u16 mii_control;
4639  u16 i;
4640  CL22_RD_OVER_CL45(bp, phy,
4642  MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
4643 
4644  /* Reset the unicore */
4645  CL22_WR_OVER_CL45(bp, phy,
4648  (mii_control |
4650  if (set_serdes)
4651  bnx2x_set_serdes_access(bp, params->port);
4652 
4653  /* Wait for the reset to self clear */
4654  for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
4655  udelay(5);
4656 
4657  /* The reset erased the previous bank value */
4658  CL22_RD_OVER_CL45(bp, phy,
4661  &mii_control);
4662 
4663  if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
4664  udelay(5);
4665  return 0;
4666  }
4667  }
4668 
4669  netdev_err(bp->dev, "Warning: PHY was not initialized,"
4670  " Port %d\n",
4671  params->port);
4672  DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
4673  return -EINVAL;
4674 
4675 }
4676 
4677 static void bnx2x_set_swap_lanes(struct link_params *params,
4678  struct bnx2x_phy *phy)
4679 {
4680  struct bnx2x *bp = params->bp;
4681  /* Each two bits represents a lane number:
4682  * No swap is 0123 => 0x1b no need to enable the swap
4683  */
4684  u16 rx_lane_swap, tx_lane_swap;
4685 
4686  rx_lane_swap = ((params->lane_config &
4689  tx_lane_swap = ((params->lane_config &
4692 
4693  if (rx_lane_swap != 0x1b) {
4694  CL22_WR_OVER_CL45(bp, phy,
4697  (rx_lane_swap |
4700  } else {
4701  CL22_WR_OVER_CL45(bp, phy,
4704  }
4705 
4706  if (tx_lane_swap != 0x1b) {
4707  CL22_WR_OVER_CL45(bp, phy,
4710  (tx_lane_swap |
4712  } else {
4713  CL22_WR_OVER_CL45(bp, phy,
4716  }
4717 }
4718 
4719 static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
4720  struct link_params *params)
4721 {
4722  struct bnx2x *bp = params->bp;
4723  u16 control2;
4724  CL22_RD_OVER_CL45(bp, phy,
4727  &control2);
4730  else
4732  DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
4733  phy->speed_cap_mask, control2);
4734  CL22_WR_OVER_CL45(bp, phy,
4737  control2);
4738 
4740  (phy->speed_cap_mask &
4742  DP(NETIF_MSG_LINK, "XGXS\n");
4743 
4744  CL22_WR_OVER_CL45(bp, phy,
4748 
4749  CL22_RD_OVER_CL45(bp, phy,
4752  &control2);
4753 
4754 
4755  control2 |=
4757 
4758  CL22_WR_OVER_CL45(bp, phy,
4761  control2);
4762 
4763  /* Disable parallel detection of HiG */
4764  CL22_WR_OVER_CL45(bp, phy,
4769  }
4770 }
4771 
4772 static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
4773  struct link_params *params,
4774  struct link_vars *vars,
4775  u8 enable_cl73)
4776 {
4777  struct bnx2x *bp = params->bp;
4778  u16 reg_val;
4779 
4780  /* CL37 Autoneg */
4781  CL22_RD_OVER_CL45(bp, phy,
4783  MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
4784 
4785  /* CL37 Autoneg Enabled */
4786  if (vars->line_speed == SPEED_AUTO_NEG)
4788  else /* CL37 Autoneg Disabled */
4789  reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
4791 
4792  CL22_WR_OVER_CL45(bp, phy,
4794  MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
4795 
4796  /* Enable/Disable Autodetection */
4797 
4798  CL22_RD_OVER_CL45(bp, phy,
4804  if (vars->line_speed == SPEED_AUTO_NEG)
4806  else
4808 
4809  CL22_WR_OVER_CL45(bp, phy,
4812 
4813  /* Enable TetonII and BAM autoneg */
4814  CL22_RD_OVER_CL45(bp, phy,
4817  &reg_val);
4818  if (vars->line_speed == SPEED_AUTO_NEG) {
4819  /* Enable BAM aneg Mode and TetonII aneg Mode */
4822  } else {
4823  /* TetonII and BAM Autoneg Disabled */
4826  }
4827  CL22_WR_OVER_CL45(bp, phy,
4830  reg_val);
4831 
4832  if (enable_cl73) {
4833  /* Enable Cl73 FSM status bits */
4834  CL22_WR_OVER_CL45(bp, phy,
4837  0xe);
4838 
4839  /* Enable BAM Station Manager*/
4840  CL22_WR_OVER_CL45(bp, phy,
4846 
4847  /* Advertise CL73 link speeds */
4848  CL22_RD_OVER_CL45(bp, phy,
4851  &reg_val);
4852  if (phy->speed_cap_mask &
4855  if (phy->speed_cap_mask &
4858 
4859  CL22_WR_OVER_CL45(bp, phy,
4862  reg_val);
4863 
4864  /* CL73 Autoneg Enabled */
4866 
4867  } else /* CL73 Autoneg Disabled */
4868  reg_val = 0;
4869 
4870  CL22_WR_OVER_CL45(bp, phy,
4873 }
4874 
4875 /* Program SerDes, forced speed */
4876 static void bnx2x_program_serdes(struct bnx2x_phy *phy,
4877  struct link_params *params,
4878  struct link_vars *vars)
4879 {
4880  struct bnx2x *bp = params->bp;
4881  u16 reg_val;
4882 
4883  /* Program duplex, disable autoneg and sgmii*/
4884  CL22_RD_OVER_CL45(bp, phy,
4886  MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
4890  if (phy->req_duplex == DUPLEX_FULL)
4892  CL22_WR_OVER_CL45(bp, phy,
4894  MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
4895 
4896  /* Program speed
4897  * - needed only if the speed is greater than 1G (2.5G or 10G)
4898  */
4899  CL22_RD_OVER_CL45(bp, phy,
4901  MDIO_SERDES_DIGITAL_MISC1, &reg_val);
4902  /* Clearing the speed value before setting the right speed */
4903  DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
4904 
4907 
4908  if (!((vars->line_speed == SPEED_1000) ||
4909  (vars->line_speed == SPEED_100) ||
4910  (vars->line_speed == SPEED_10))) {
4911 
4914  if (vars->line_speed == SPEED_10000)
4915  reg_val |=
4917  }
4918 
4919  CL22_WR_OVER_CL45(bp, phy,
4921  MDIO_SERDES_DIGITAL_MISC1, reg_val);
4922 
4923 }
4924 
4925 static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
4926  struct link_params *params)
4927 {
4928  struct bnx2x *bp = params->bp;
4929  u16 val = 0;
4930 
4931  /* Set extended capabilities */
4933  val |= MDIO_OVER_1G_UP1_2_5G;
4935  val |= MDIO_OVER_1G_UP1_10G;
4936  CL22_WR_OVER_CL45(bp, phy,
4938  MDIO_OVER_1G_UP1, val);
4939 
4940  CL22_WR_OVER_CL45(bp, phy,
4942  MDIO_OVER_1G_UP3, 0x400);
4943 }
4944 
4945 static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
4946  struct link_params *params,
4947  u16 ieee_fc)
4948 {
4949  struct bnx2x *bp = params->bp;
4950  u16 val;
4951  /* For AN, we are always publishing full duplex */
4952 
4953  CL22_WR_OVER_CL45(bp, phy,
4956  CL22_RD_OVER_CL45(bp, phy,
4958  MDIO_CL73_IEEEB1_AN_ADV1, &val);
4960  val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
4961  CL22_WR_OVER_CL45(bp, phy,
4964 }
4965 
4966 static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
4967  struct link_params *params,
4968  u8 enable_cl73)
4969 {
4970  struct bnx2x *bp = params->bp;
4971  u16 mii_control;
4972 
4973  DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n");
4974  /* Enable and restart BAM/CL37 aneg */
4975 
4976  if (enable_cl73) {
4977  CL22_RD_OVER_CL45(bp, phy,
4980  &mii_control);
4981 
4982  CL22_WR_OVER_CL45(bp, phy,
4985  (mii_control |
4988  } else {
4989 
4990  CL22_RD_OVER_CL45(bp, phy,
4993  &mii_control);
4995  "bnx2x_restart_autoneg mii_control before = 0x%x\n",
4996  mii_control);
4997  CL22_WR_OVER_CL45(bp, phy,
5000  (mii_control |
5003  }
5004 }
5005 
5006 static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
5007  struct link_params *params,
5008  struct link_vars *vars)
5009 {
5010  struct bnx2x *bp = params->bp;
5011  u16 control1;
5012 
5013  /* In SGMII mode, the unicore is always slave */
5014 
5015  CL22_RD_OVER_CL45(bp, phy,
5018  &control1);
5020  /* Set sgmii mode (and not fiber) */
5024  CL22_WR_OVER_CL45(bp, phy,
5027  control1);
5028 
5029  /* If forced speed */
5030  if (!(vars->line_speed == SPEED_AUTO_NEG)) {
5031  /* Set speed, disable autoneg */
5032  u16 mii_control;
5033 
5034  CL22_RD_OVER_CL45(bp, phy,
5037  &mii_control);
5038  mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
5041 
5042  switch (vars->line_speed) {
5043  case SPEED_100:
5044  mii_control |=
5046  break;
5047  case SPEED_1000:
5048  mii_control |=
5050  break;
5051  case SPEED_10:
5052  /* There is nothing to set for 10M */
5053  break;
5054  default:
5055  /* Invalid speed for SGMII */
5056  DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
5057  vars->line_speed);
5058  break;
5059  }
5060 
5061  /* Setting the full duplex */
5062  if (phy->req_duplex == DUPLEX_FULL)
5063  mii_control |=
5065  CL22_WR_OVER_CL45(bp, phy,
5068  mii_control);
5069 
5070  } else { /* AN mode */
5071  /* Enable and restart AN */
5072  bnx2x_restart_autoneg(phy, params, 0);
5073  }
5074 }
5075 
5076 /* Link management
5077  */
5078 static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
5079  struct link_params *params)
5080 {
5081  struct bnx2x *bp = params->bp;
5082  u16 pd_10g, status2_1000x;
5083  if (phy->req_line_speed != SPEED_AUTO_NEG)
5084  return 0;
5085  CL22_RD_OVER_CL45(bp, phy,
5088  &status2_1000x);
5089  CL22_RD_OVER_CL45(bp, phy,
5092  &status2_1000x);
5094  DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
5095  params->port);
5096  return 1;
5097  }
5098 
5099  CL22_RD_OVER_CL45(bp, phy,
5102  &pd_10g);
5103 
5105  DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
5106  params->port);
5107  return 1;
5108  }
5109  return 0;
5110 }
5111 
5112 static void bnx2x_update_adv_fc(struct bnx2x_phy *phy,
5113  struct link_params *params,
5114  struct link_vars *vars,
5115  u32 gp_status)
5116 {
5117  u16 ld_pause; /* local driver */
5118  u16 lp_pause; /* link partner */
5119  u16 pause_result;
5120  struct bnx2x *bp = params->bp;
5121  if ((gp_status &
5126 
5127  CL22_RD_OVER_CL45(bp, phy,
5130  &ld_pause);
5131  CL22_RD_OVER_CL45(bp, phy,
5134  &lp_pause);
5135  pause_result = (ld_pause &
5137  pause_result |= (lp_pause &
5139  DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", pause_result);
5140  } else {
5141  CL22_RD_OVER_CL45(bp, phy,
5144  &ld_pause);
5145  CL22_RD_OVER_CL45(bp, phy,
5148  &lp_pause);
5149  pause_result = (ld_pause &
5151  pause_result |= (lp_pause &
5153  DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result);
5154  }
5155  bnx2x_pause_resolve(vars, pause_result);
5156 
5157 }
5158 
5159 static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
5160  struct link_params *params,
5161  struct link_vars *vars,
5162  u32 gp_status)
5163 {
5164  struct bnx2x *bp = params->bp;
5166 
5167  /* Resolve from gp_status in case of AN complete and not sgmii */
5168  if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
5169  /* Update the advertised flow-controled of LD/LP in AN */
5170  if (phy->req_line_speed == SPEED_AUTO_NEG)
5171  bnx2x_update_adv_fc(phy, params, vars, gp_status);
5172  /* But set the flow-control result as the requested one */
5173  vars->flow_ctrl = phy->req_flow_ctrl;
5174  } else if (phy->req_line_speed != SPEED_AUTO_NEG)
5175  vars->flow_ctrl = params->req_fc_auto_adv;
5176  else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
5177  (!(vars->phy_flags & PHY_SGMII_FLAG))) {
5178  if (bnx2x_direct_parallel_detect_used(phy, params)) {
5179  vars->flow_ctrl = params->req_fc_auto_adv;
5180  return;
5181  }
5182  bnx2x_update_adv_fc(phy, params, vars, gp_status);
5183  }
5184  DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
5185 }
5186 
5187 static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
5188  struct link_params *params)
5189 {
5190  struct bnx2x *bp = params->bp;
5191  u16 rx_status, ustat_val, cl37_fsm_received;
5192  DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
5193  /* Step 1: Make sure signal is detected */
5194  CL22_RD_OVER_CL45(bp, phy,
5197  &rx_status);
5198  if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
5199  (MDIO_RX0_RX_STATUS_SIGDET)) {
5200  DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
5201  "rx_status(0x80b0) = 0x%x\n", rx_status);
5202  CL22_WR_OVER_CL45(bp, phy,
5206  return;
5207  }
5208  /* Step 2: Check CL73 state machine */
5209  CL22_RD_OVER_CL45(bp, phy,
5212  &ustat_val);
5213  if ((ustat_val &
5218  DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. "
5219  "ustat_val(0x8371) = 0x%x\n", ustat_val);
5220  return;
5221  }
5222  /* Step 3: Check CL37 Message Pages received to indicate LP
5223  * supports only CL37
5224  */
5225  CL22_RD_OVER_CL45(bp, phy,
5228  &cl37_fsm_received);
5229  if ((cl37_fsm_received &
5234  DP(NETIF_MSG_LINK, "No CL37 FSM were received. "
5235  "misc_rx_status(0x8330) = 0x%x\n",
5236  cl37_fsm_received);
5237  return;
5238  }
5239  /* The combined cl37/cl73 fsm state information indicating that
5240  * we are connected to a device which does not support cl73, but
5241  * does support cl37 BAM. In this case we disable cl73 and
5242  * restart cl37 auto-neg
5243  */
5244 
5245  /* Disable CL73 */
5246  CL22_WR_OVER_CL45(bp, phy,
5249  0);
5250  /* Restart CL37 autoneg */
5251  bnx2x_restart_autoneg(phy, params, 0);
5252  DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
5253 }
5254 
5255 static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
5256  struct link_params *params,
5257  struct link_vars *vars,
5258  u32 gp_status)
5259 {
5260  if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
5261  vars->link_status |=
5263 
5264  if (bnx2x_direct_parallel_detect_used(phy, params))
5265  vars->link_status |=
5267 }
5268 static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
5269  struct link_params *params,
5270  struct link_vars *vars,
5271  u16 is_link_up,
5272  u16 speed_mask,
5273  u16 is_duplex)
5274 {
5275  struct bnx2x *bp = params->bp;
5276  if (phy->req_line_speed == SPEED_AUTO_NEG)
5278  if (is_link_up) {
5279  DP(NETIF_MSG_LINK, "phy link up\n");
5280 
5281  vars->phy_link_up = 1;
5283 
5284  switch (speed_mask) {
5285  case GP_STATUS_10M:
5286  vars->line_speed = SPEED_10;
5287  if (is_duplex == DUPLEX_FULL)
5288  vars->link_status |= LINK_10TFD;
5289  else
5290  vars->link_status |= LINK_10THD;
5291  break;
5292 
5293  case GP_STATUS_100M:
5294  vars->line_speed = SPEED_100;
5295  if (is_duplex == DUPLEX_FULL)
5296  vars->link_status |= LINK_100TXFD;
5297  else
5298  vars->link_status |= LINK_100TXHD;
5299  break;
5300 
5301  case GP_STATUS_1G:
5302  case GP_STATUS_1G_KX:
5303  vars->line_speed = SPEED_1000;
5304  if (is_duplex == DUPLEX_FULL)
5305  vars->link_status |= LINK_1000TFD;
5306  else
5307  vars->link_status |= LINK_1000THD;
5308  break;
5309 
5310  case GP_STATUS_2_5G:
5311  vars->line_speed = SPEED_2500;
5312  if (is_duplex == DUPLEX_FULL)
5313  vars->link_status |= LINK_2500TFD;
5314  else
5315  vars->link_status |= LINK_2500THD;
5316  break;
5317 
5318  case GP_STATUS_5G:
5319  case GP_STATUS_6G:
5321  "link speed unsupported gp_status 0x%x\n",
5322  speed_mask);
5323  return -EINVAL;
5324 
5325  case GP_STATUS_10G_KX4:
5326  case GP_STATUS_10G_HIG:
5327  case GP_STATUS_10G_CX4:
5328  case GP_STATUS_10G_KR:
5329  case GP_STATUS_10G_SFI:
5330  case GP_STATUS_10G_XFI:
5331  vars->line_speed = SPEED_10000;
5332  vars->link_status |= LINK_10GTFD;
5333  break;
5334  case GP_STATUS_20G_DXGXS:
5335  vars->line_speed = SPEED_20000;
5336  vars->link_status |= LINK_20GTFD;
5337  break;
5338  default:
5340  "link speed unsupported gp_status 0x%x\n",
5341  speed_mask);
5342  return -EINVAL;
5343  }
5344  } else { /* link_down */
5345  DP(NETIF_MSG_LINK, "phy link down\n");
5346 
5347  vars->phy_link_up = 0;
5348 
5349  vars->duplex = DUPLEX_FULL;
5351  vars->mac_type = MAC_TYPE_NONE;
5352  }
5353  DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n",
5354  vars->phy_link_up, vars->line_speed);
5355  return 0;
5356 }
5357 
5358 static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
5359  struct link_params *params,
5360  struct link_vars *vars)
5361 {
5362  struct bnx2x *bp = params->bp;
5363 
5364  u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
5365  int rc = 0;
5366 
5367  /* Read gp_status */
5368  CL22_RD_OVER_CL45(bp, phy,
5371  &gp_status);
5373  duplex = DUPLEX_FULL;
5375  link_up = 1;
5376  speed_mask = gp_status & GP_STATUS_SPEED_MASK;
5377  DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n",
5378  gp_status, link_up, speed_mask);
5379  rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
5380  duplex);
5381  if (rc == -EINVAL)
5382  return rc;
5383 
5384  if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
5385  if (SINGLE_MEDIA_DIRECT(params)) {
5386  vars->duplex = duplex;
5387  bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
5388  if (phy->req_line_speed == SPEED_AUTO_NEG)
5389  bnx2x_xgxs_an_resolve(phy, params, vars,
5390  gp_status);
5391  }
5392  } else { /* Link_down */
5393  if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
5394  SINGLE_MEDIA_DIRECT(params)) {
5395  /* Check signal is detected */
5396  bnx2x_check_fallback_to_cl37(phy, params);
5397  }
5398  }
5399 
5400  /* Read LP advertised speeds*/
5401  if (SINGLE_MEDIA_DIRECT(params) &&
5403  u16 val;
5404 
5407 
5409  vars->link_status |=
5413  vars->link_status |=
5415 
5417  MDIO_OVER_1G_LP_UP1, &val);
5418 
5419  if (val & MDIO_OVER_1G_UP1_2_5G)
5420  vars->link_status |=
5423  vars->link_status |=
5425  }