Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ixgbe_dcb_82598.c
Go to the documentation of this file.
1 /*******************************************************************************
2 
3  Intel 10 Gigabit PCI Express Linux driver
4  Copyright(c) 1999 - 2012 Intel Corporation.
5 
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9 
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  more details.
14 
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21 
22  Contact Information:
23  Linux NICS <[email protected]>
24  e1000-devel Mailing List <[email protected]>
25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 
27 *******************************************************************************/
28 
29 #include "ixgbe.h"
30 #include "ixgbe_type.h"
31 #include "ixgbe_dcb.h"
32 #include "ixgbe_dcb_82598.h"
33 
42  u16 *refill,
43  u16 *max,
44  u8 *prio_type)
45 {
46  u32 reg = 0;
47  u32 credit_refill = 0;
48  u32 credit_max = 0;
49  u8 i = 0;
50 
53 
54  reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
55  /* Enable Arbiter */
56  reg &= ~IXGBE_RMCS_ARBDIS;
57  /* Enable Receive Recycle within the BWG */
58  reg |= IXGBE_RMCS_RRM;
59  /* Enable Deficit Fixed Priority arbitration*/
60  reg |= IXGBE_RMCS_DFP;
61 
62  IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
63 
64  /* Configure traffic class credits and priority */
65  for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
66  credit_refill = refill[i];
67  credit_max = max[i];
68 
69  reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
70 
71  if (prio_type[i] == prio_link)
72  reg |= IXGBE_RT2CR_LSP;
73 
74  IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
75  }
76 
77  reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
79  reg |= IXGBE_RDRXCTL_MPBEN;
80  reg |= IXGBE_RDRXCTL_MCEN;
82 
83  reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
84  /* Make sure there is enough descriptors before arbitration */
85  reg &= ~IXGBE_RXCTRL_DMBYPS;
86  IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
87 
88  return 0;
89 }
90 
99  u16 *refill,
100  u16 *max,
101  u8 *bwg_id,
102  u8 *prio_type)
103 {
104  u32 reg, max_credits;
105  u8 i;
106 
107  reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
108 
109  /* Enable arbiter */
110  reg &= ~IXGBE_DPMCS_ARBDIS;
111  /* Enable DFP and Recycle mode */
113  reg |= IXGBE_DPMCS_TSOEF;
114  /* Configure Max TSO packet size 34KB including payload and headers */
115  reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
116 
117  IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
118 
119  /* Configure traffic class credits and priority */
120  for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
121  max_credits = max[i];
122  reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
123  reg |= refill[i];
124  reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
125 
126  if (prio_type[i] == prio_group)
127  reg |= IXGBE_TDTQ2TCCR_GSP;
128 
129  if (prio_type[i] == prio_link)
130  reg |= IXGBE_TDTQ2TCCR_LSP;
131 
132  IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
133  }
134 
135  return 0;
136 }
137 
146  u16 *refill,
147  u16 *max,
148  u8 *bwg_id,
149  u8 *prio_type)
150 {
151  u32 reg;
152  u8 i;
153 
154  reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
155  /* Enable Data Plane Arbiter */
156  reg &= ~IXGBE_PDPMCS_ARBDIS;
157  /* Enable DFP and Transmit Recycle Mode */
159 
160  IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
161 
162  /* Configure traffic class credits and priority */
163  for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
164  reg = refill[i];
165  reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
166  reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
167 
168  if (prio_type[i] == prio_group)
169  reg |= IXGBE_TDPT2TCCR_GSP;
170 
171  if (prio_type[i] == prio_link)
172  reg |= IXGBE_TDPT2TCCR_LSP;
173 
174  IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
175  }
176 
177  /* Enable Tx packet buffer division */
178  reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
179  reg |= IXGBE_DTXCTL_ENDBUBD;
180  IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
181 
182  return 0;
183 }
184 
193 {
194  u32 fcrtl, reg;
195  u8 i;
196 
197  /* Enable Transmit Priority Flow Control */
198  reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
199  reg &= ~IXGBE_RMCS_TFCE_802_3X;
201  IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
202 
203  /* Enable Receive Priority Flow Control */
204  reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
206 
207  if (pfc_en)
208  reg |= IXGBE_FCTRL_RPFCE;
209 
210  IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
211 
212  fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
213  /* Configure PFC Tx thresholds per TC */
214  for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
215  if (!(pfc_en & (1 << i))) {
216  IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
217  IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
218  continue;
219  }
220 
221  reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
222  IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
223  IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
224  }
225 
226  /* Configure pause time */
227  reg = hw->fc.pause_time * 0x00010001;
228  for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
230 
231  /* Configure flow control refresh threshold value */
232  IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
233 
234 
235  return 0;
236 }
237 
245 static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
246 {
247  u32 reg = 0;
248  u8 i = 0;
249  u8 j = 0;
250 
251  /* Receive Queues stats setting - 8 queues per statistics reg */
252  for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
253  reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
254  reg |= ((0x1010101) * j);
255  IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
256  reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
257  reg |= ((0x1010101) * j);
258  IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
259  }
260  /* Transmit Queues stats setting - 4 queues per statistics reg */
261  for (i = 0; i < 8; i++) {
262  reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
263  reg |= ((0x1010101) * i);
264  IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
265  }
266 
267  return 0;
268 }
269 
277 s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
278  u16 *max, u8 *bwg_id, u8 *prio_type)
279 {
280  ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
282  bwg_id, prio_type);
284  bwg_id, prio_type);
285  ixgbe_dcb_config_pfc_82598(hw, pfc_en);
286  ixgbe_dcb_config_tc_stats_82598(hw);
287 
288  return 0;
289 }