Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
s2io.c
Go to the documentation of this file.
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2010 Exar Corp.
4  *
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice. This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik : For pointing out the improper error condition
15  * check in the s2io_xmit routine and also some
16  * issues in the Tx watch dog function. Also for
17  * patiently answering all those innumerable
18  * questions regaring the 2.6 porting issues.
19  * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20  * macros available only in 2.6 Kernel.
21  * Francois Romieu : For pointing out all code part that were
22  * deprecated and also styling related comments.
23  * Grant Grundler : For helping me get rid of some Architecture
24  * dependent code.
25  * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explanation of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  * This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  * values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  * 2(MSI_X). Default value is '2(MSI_X)'
41  * lro_max_pkts: This parameter defines maximum number of packets can be
42  * aggregated as a single large packet
43  * napi: This parameter used to enable/disable NAPI (polling Rx)
44  * Possible values '1' for enable and '0' for disable. Default is '1'
45  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
46  * Possible values '1' for enable and '0' for disable. Default is '0'
47  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
48  * Possible values '1' for enable , '0' for disable.
49  * Default is '2' - which means disable in promisc mode
50  * and enable in non-promiscuous mode.
51  * multiq: This parameter used to enable/disable MULTIQUEUE support.
52  * Possible values '1' for enable and '0' for disable. Default is '0'
53  ************************************************************************/
54 
55 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/mdio.h>
67 #include <linux/skbuff.h>
68 #include <linux/init.h>
69 #include <linux/delay.h>
70 #include <linux/stddef.h>
71 #include <linux/ioctl.h>
72 #include <linux/timex.h>
73 #include <linux/ethtool.h>
74 #include <linux/workqueue.h>
75 #include <linux/if_vlan.h>
76 #include <linux/ip.h>
77 #include <linux/tcp.h>
78 #include <linux/uaccess.h>
79 #include <linux/io.h>
80 #include <linux/slab.h>
81 #include <linux/prefetch.h>
82 #include <net/tcp.h>
83 
84 #include <asm/div64.h>
85 #include <asm/irq.h>
86 
87 /* local include */
88 #include "s2io.h"
89 #include "s2io-regs.h"
90 
91 #define DRV_VERSION "2.0.26.28"
92 
93 /* S2io Driver name & version. */
94 static const char s2io_driver_name[] = "Neterion";
95 static const char s2io_driver_version[] = DRV_VERSION;
96 
97 static const int rxd_size[2] = {32, 48};
98 static const int rxd_count[2] = {127, 85};
99 
100 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
101 {
102  int ret;
103 
104  ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
106 
107  return ret;
108 }
109 
110 /*
111  * Cards with following subsystem_id have a link state indication
112  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113  * macro below identifies these cards given the subsystem_id.
114  */
115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
116  (dev_type == XFRAME_I_DEVICE) ? \
117  ((((subid >= 0x600B) && (subid <= 0x600D)) || \
118  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
119 
120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121  ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
122 
123 static inline int is_s2io_card_up(const struct s2io_nic *sp)
124 {
125  return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126 }
127 
128 /* Ethtool related variables and Macros. */
129 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
130  "Register test\t(offline)",
131  "Eeprom test\t(offline)",
132  "Link test\t(online)",
133  "RLDRAM test\t(offline)",
134  "BIST Test\t(offline)"
135 };
136 
137 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
138  {"tmac_frms"},
139  {"tmac_data_octets"},
140  {"tmac_drop_frms"},
141  {"tmac_mcst_frms"},
142  {"tmac_bcst_frms"},
143  {"tmac_pause_ctrl_frms"},
144  {"tmac_ttl_octets"},
145  {"tmac_ucst_frms"},
146  {"tmac_nucst_frms"},
147  {"tmac_any_err_frms"},
148  {"tmac_ttl_less_fb_octets"},
149  {"tmac_vld_ip_octets"},
150  {"tmac_vld_ip"},
151  {"tmac_drop_ip"},
152  {"tmac_icmp"},
153  {"tmac_rst_tcp"},
154  {"tmac_tcp"},
155  {"tmac_udp"},
156  {"rmac_vld_frms"},
157  {"rmac_data_octets"},
158  {"rmac_fcs_err_frms"},
159  {"rmac_drop_frms"},
160  {"rmac_vld_mcst_frms"},
161  {"rmac_vld_bcst_frms"},
162  {"rmac_in_rng_len_err_frms"},
163  {"rmac_out_rng_len_err_frms"},
164  {"rmac_long_frms"},
165  {"rmac_pause_ctrl_frms"},
166  {"rmac_unsup_ctrl_frms"},
167  {"rmac_ttl_octets"},
168  {"rmac_accepted_ucst_frms"},
169  {"rmac_accepted_nucst_frms"},
170  {"rmac_discarded_frms"},
171  {"rmac_drop_events"},
172  {"rmac_ttl_less_fb_octets"},
173  {"rmac_ttl_frms"},
174  {"rmac_usized_frms"},
175  {"rmac_osized_frms"},
176  {"rmac_frag_frms"},
177  {"rmac_jabber_frms"},
178  {"rmac_ttl_64_frms"},
179  {"rmac_ttl_65_127_frms"},
180  {"rmac_ttl_128_255_frms"},
181  {"rmac_ttl_256_511_frms"},
182  {"rmac_ttl_512_1023_frms"},
183  {"rmac_ttl_1024_1518_frms"},
184  {"rmac_ip"},
185  {"rmac_ip_octets"},
186  {"rmac_hdr_err_ip"},
187  {"rmac_drop_ip"},
188  {"rmac_icmp"},
189  {"rmac_tcp"},
190  {"rmac_udp"},
191  {"rmac_err_drp_udp"},
192  {"rmac_xgmii_err_sym"},
193  {"rmac_frms_q0"},
194  {"rmac_frms_q1"},
195  {"rmac_frms_q2"},
196  {"rmac_frms_q3"},
197  {"rmac_frms_q4"},
198  {"rmac_frms_q5"},
199  {"rmac_frms_q6"},
200  {"rmac_frms_q7"},
201  {"rmac_full_q0"},
202  {"rmac_full_q1"},
203  {"rmac_full_q2"},
204  {"rmac_full_q3"},
205  {"rmac_full_q4"},
206  {"rmac_full_q5"},
207  {"rmac_full_q6"},
208  {"rmac_full_q7"},
209  {"rmac_pause_cnt"},
210  {"rmac_xgmii_data_err_cnt"},
211  {"rmac_xgmii_ctrl_err_cnt"},
212  {"rmac_accepted_ip"},
213  {"rmac_err_tcp"},
214  {"rd_req_cnt"},
215  {"new_rd_req_cnt"},
216  {"new_rd_req_rtry_cnt"},
217  {"rd_rtry_cnt"},
218  {"wr_rtry_rd_ack_cnt"},
219  {"wr_req_cnt"},
220  {"new_wr_req_cnt"},
221  {"new_wr_req_rtry_cnt"},
222  {"wr_rtry_cnt"},
223  {"wr_disc_cnt"},
224  {"rd_rtry_wr_ack_cnt"},
225  {"txp_wr_cnt"},
226  {"txd_rd_cnt"},
227  {"txd_wr_cnt"},
228  {"rxd_rd_cnt"},
229  {"rxd_wr_cnt"},
230  {"txf_rd_cnt"},
231  {"rxf_wr_cnt"}
232 };
233 
234 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
235  {"rmac_ttl_1519_4095_frms"},
236  {"rmac_ttl_4096_8191_frms"},
237  {"rmac_ttl_8192_max_frms"},
238  {"rmac_ttl_gt_max_frms"},
239  {"rmac_osized_alt_frms"},
240  {"rmac_jabber_alt_frms"},
241  {"rmac_gt_max_alt_frms"},
242  {"rmac_vlan_frms"},
243  {"rmac_len_discard"},
244  {"rmac_fcs_discard"},
245  {"rmac_pf_discard"},
246  {"rmac_da_discard"},
247  {"rmac_red_discard"},
248  {"rmac_rts_discard"},
249  {"rmac_ingm_full_discard"},
250  {"link_fault_cnt"}
251 };
252 
253 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
254  {"\n DRIVER STATISTICS"},
255  {"single_bit_ecc_errs"},
256  {"double_bit_ecc_errs"},
257  {"parity_err_cnt"},
258  {"serious_err_cnt"},
259  {"soft_reset_cnt"},
260  {"fifo_full_cnt"},
261  {"ring_0_full_cnt"},
262  {"ring_1_full_cnt"},
263  {"ring_2_full_cnt"},
264  {"ring_3_full_cnt"},
265  {"ring_4_full_cnt"},
266  {"ring_5_full_cnt"},
267  {"ring_6_full_cnt"},
268  {"ring_7_full_cnt"},
269  {"alarm_transceiver_temp_high"},
270  {"alarm_transceiver_temp_low"},
271  {"alarm_laser_bias_current_high"},
272  {"alarm_laser_bias_current_low"},
273  {"alarm_laser_output_power_high"},
274  {"alarm_laser_output_power_low"},
275  {"warn_transceiver_temp_high"},
276  {"warn_transceiver_temp_low"},
277  {"warn_laser_bias_current_high"},
278  {"warn_laser_bias_current_low"},
279  {"warn_laser_output_power_high"},
280  {"warn_laser_output_power_low"},
281  {"lro_aggregated_pkts"},
282  {"lro_flush_both_count"},
283  {"lro_out_of_sequence_pkts"},
284  {"lro_flush_due_to_max_pkts"},
285  {"lro_avg_aggr_pkts"},
286  {"mem_alloc_fail_cnt"},
287  {"pci_map_fail_cnt"},
288  {"watchdog_timer_cnt"},
289  {"mem_allocated"},
290  {"mem_freed"},
291  {"link_up_cnt"},
292  {"link_down_cnt"},
293  {"link_up_time"},
294  {"link_down_time"},
295  {"tx_tcode_buf_abort_cnt"},
296  {"tx_tcode_desc_abort_cnt"},
297  {"tx_tcode_parity_err_cnt"},
298  {"tx_tcode_link_loss_cnt"},
299  {"tx_tcode_list_proc_err_cnt"},
300  {"rx_tcode_parity_err_cnt"},
301  {"rx_tcode_abort_cnt"},
302  {"rx_tcode_parity_abort_cnt"},
303  {"rx_tcode_rda_fail_cnt"},
304  {"rx_tcode_unkn_prot_cnt"},
305  {"rx_tcode_fcs_err_cnt"},
306  {"rx_tcode_buf_size_err_cnt"},
307  {"rx_tcode_rxd_corrupt_cnt"},
308  {"rx_tcode_unkn_err_cnt"},
309  {"tda_err_cnt"},
310  {"pfc_err_cnt"},
311  {"pcc_err_cnt"},
312  {"tti_err_cnt"},
313  {"tpa_err_cnt"},
314  {"sm_err_cnt"},
315  {"lso_err_cnt"},
316  {"mac_tmac_err_cnt"},
317  {"mac_rmac_err_cnt"},
318  {"xgxs_txgxs_err_cnt"},
319  {"xgxs_rxgxs_err_cnt"},
320  {"rc_err_cnt"},
321  {"prc_pcix_err_cnt"},
322  {"rpa_err_cnt"},
323  {"rda_err_cnt"},
324  {"rti_err_cnt"},
325  {"mc_err_cnt"}
326 };
327 
328 #define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
329 #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
330 #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
331 
332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
334 
335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
337 
338 #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
339 #define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
340 
341 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
342  init_timer(&timer); \
343  timer.function = handle; \
344  timer.data = (unsigned long)arg; \
345  mod_timer(&timer, (jiffies + exp)) \
346 
347 /* copy mac addr to def_mac_addr array */
348 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
349 {
350  sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
351  sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
352  sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
353  sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
354  sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
355  sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
356 }
357 
358 /*
359  * Constants to be programmed into the Xena's registers, to configure
360  * the XAUI.
361  */
362 
363 #define END_SIGN 0x0
364 static const u64 herc_act_dtx_cfg[] = {
365  /* Set address */
366  0x8000051536750000ULL, 0x80000515367500E0ULL,
367  /* Write data */
368  0x8000051536750004ULL, 0x80000515367500E4ULL,
369  /* Set address */
370  0x80010515003F0000ULL, 0x80010515003F00E0ULL,
371  /* Write data */
372  0x80010515003F0004ULL, 0x80010515003F00E4ULL,
373  /* Set address */
374  0x801205150D440000ULL, 0x801205150D4400E0ULL,
375  /* Write data */
376  0x801205150D440004ULL, 0x801205150D4400E4ULL,
377  /* Set address */
378  0x80020515F2100000ULL, 0x80020515F21000E0ULL,
379  /* Write data */
380  0x80020515F2100004ULL, 0x80020515F21000E4ULL,
381  /* Done */
382  END_SIGN
383 };
384 
385 static const u64 xena_dtx_cfg[] = {
386  /* Set address */
387  0x8000051500000000ULL, 0x80000515000000E0ULL,
388  /* Write data */
389  0x80000515D9350004ULL, 0x80000515D93500E4ULL,
390  /* Set address */
391  0x8001051500000000ULL, 0x80010515000000E0ULL,
392  /* Write data */
393  0x80010515001E0004ULL, 0x80010515001E00E4ULL,
394  /* Set address */
395  0x8002051500000000ULL, 0x80020515000000E0ULL,
396  /* Write data */
397  0x80020515F2100004ULL, 0x80020515F21000E4ULL,
398  END_SIGN
399 };
400 
401 /*
402  * Constants for Fixing the MacAddress problem seen mostly on
403  * Alpha machines.
404  */
405 static const u64 fix_mac[] = {
406  0x0060000000000000ULL, 0x0060600000000000ULL,
407  0x0040600000000000ULL, 0x0000600000000000ULL,
408  0x0020600000000000ULL, 0x0060600000000000ULL,
409  0x0020600000000000ULL, 0x0060600000000000ULL,
410  0x0020600000000000ULL, 0x0060600000000000ULL,
411  0x0020600000000000ULL, 0x0060600000000000ULL,
412  0x0020600000000000ULL, 0x0060600000000000ULL,
413  0x0020600000000000ULL, 0x0060600000000000ULL,
414  0x0020600000000000ULL, 0x0060600000000000ULL,
415  0x0020600000000000ULL, 0x0060600000000000ULL,
416  0x0020600000000000ULL, 0x0060600000000000ULL,
417  0x0020600000000000ULL, 0x0060600000000000ULL,
418  0x0020600000000000ULL, 0x0000600000000000ULL,
419  0x0040600000000000ULL, 0x0060600000000000ULL,
420  END_SIGN
421 };
422 
423 MODULE_LICENSE("GPL");
425 
426 
427 /* Module Loadable parameters. */
428 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
429 S2IO_PARM_INT(rx_ring_num, 1);
430 S2IO_PARM_INT(multiq, 0);
431 S2IO_PARM_INT(rx_ring_mode, 1);
432 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
433 S2IO_PARM_INT(rmac_pause_time, 0x100);
434 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
435 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
436 S2IO_PARM_INT(shared_splits, 0);
437 S2IO_PARM_INT(tmac_util_period, 5);
438 S2IO_PARM_INT(rmac_util_period, 5);
439 S2IO_PARM_INT(l3l4hdr_size, 128);
440 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
441 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
442 /* Frequency of Rx desc syncs expressed as power of 2 */
443 S2IO_PARM_INT(rxsync_frequency, 3);
444 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
445 S2IO_PARM_INT(intr_type, 2);
446 /* Large receive offload feature */
447 
448 /* Max pkts to be aggregated by LRO at one time. If not specified,
449  * aggregation happens until we hit max IP pkt size(64K)
450  */
451 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
452 S2IO_PARM_INT(indicate_max_pkts, 0);
453 
454 S2IO_PARM_INT(napi, 1);
455 S2IO_PARM_INT(ufo, 0);
456 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
457 
458 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
459 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
460 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
461 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
462 static unsigned int rts_frm_len[MAX_RX_RINGS] =
463 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
464 
465 module_param_array(tx_fifo_len, uint, NULL, 0);
466 module_param_array(rx_ring_sz, uint, NULL, 0);
467 module_param_array(rts_frm_len, uint, NULL, 0);
468 
469 /*
470  * S2IO device table.
471  * This table lists all the devices that this driver supports.
472  */
473 static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
475  PCI_ANY_ID, PCI_ANY_ID},
477  PCI_ANY_ID, PCI_ANY_ID},
479  PCI_ANY_ID, PCI_ANY_ID},
481  PCI_ANY_ID, PCI_ANY_ID},
482  {0,}
483 };
484 
485 MODULE_DEVICE_TABLE(pci, s2io_tbl);
486 
487 static const struct pci_error_handlers s2io_err_handler = {
488  .error_detected = s2io_io_error_detected,
489  .slot_reset = s2io_io_slot_reset,
490  .resume = s2io_io_resume,
491 };
492 
493 static struct pci_driver s2io_driver = {
494  .name = "S2IO",
495  .id_table = s2io_tbl,
496  .probe = s2io_init_nic,
497  .remove = __devexit_p(s2io_rem_nic),
498  .err_handler = &s2io_err_handler,
499 };
500 
501 /* A simplifier macro used both by init and free shared_mem Fns(). */
502 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
503 
504 /* netqueue manipulation helper functions */
505 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
506 {
507  if (!sp->config.multiq) {
508  int i;
509 
510  for (i = 0; i < sp->config.tx_fifo_num; i++)
511  sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
512  }
513  netif_tx_stop_all_queues(sp->dev);
514 }
515 
516 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
517 {
518  if (!sp->config.multiq)
519  sp->mac_control.fifos[fifo_no].queue_state =
521 
522  netif_tx_stop_all_queues(sp->dev);
523 }
524 
525 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
526 {
527  if (!sp->config.multiq) {
528  int i;
529 
530  for (i = 0; i < sp->config.tx_fifo_num; i++)
531  sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
532  }
533  netif_tx_start_all_queues(sp->dev);
534 }
535 
536 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
537 {
538  if (!sp->config.multiq)
539  sp->mac_control.fifos[fifo_no].queue_state =
541 
542  netif_tx_start_all_queues(sp->dev);
543 }
544 
545 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
546 {
547  if (!sp->config.multiq) {
548  int i;
549 
550  for (i = 0; i < sp->config.tx_fifo_num; i++)
551  sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
552  }
553  netif_tx_wake_all_queues(sp->dev);
554 }
555 
556 static inline void s2io_wake_tx_queue(
557  struct fifo_info *fifo, int cnt, u8 multiq)
558 {
559 
560  if (multiq) {
561  if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
562  netif_wake_subqueue(fifo->dev, fifo->fifo_no);
563  } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
564  if (netif_queue_stopped(fifo->dev)) {
566  netif_wake_queue(fifo->dev);
567  }
568  }
569 }
570 
579 static int init_shared_mem(struct s2io_nic *nic)
580 {
581  u32 size;
582  void *tmp_v_addr, *tmp_v_addr_next;
583  dma_addr_t tmp_p_addr, tmp_p_addr_next;
584  struct RxD_block *pre_rxd_blk = NULL;
585  int i, j, blk_cnt;
586  int lst_size, lst_per_page;
587  struct net_device *dev = nic->dev;
588  unsigned long tmp;
589  struct buffAdd *ba;
590  struct config_param *config = &nic->config;
591  struct mac_info *mac_control = &nic->mac_control;
592  unsigned long long mem_allocated = 0;
593 
594  /* Allocation and initialization of TXDLs in FIFOs */
595  size = 0;
596  for (i = 0; i < config->tx_fifo_num; i++) {
597  struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
598 
599  size += tx_cfg->fifo_len;
600  }
601  if (size > MAX_AVAILABLE_TXDS) {
603  "Too many TxDs requested: %d, max supported: %d\n",
604  size, MAX_AVAILABLE_TXDS);
605  return -EINVAL;
606  }
607 
608  size = 0;
609  for (i = 0; i < config->tx_fifo_num; i++) {
610  struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
611 
612  size = tx_cfg->fifo_len;
613  /*
614  * Legal values are from 2 to 8192
615  */
616  if (size < 2) {
617  DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
618  "Valid lengths are 2 through 8192\n",
619  i, size);
620  return -EINVAL;
621  }
622  }
623 
624  lst_size = (sizeof(struct TxD) * config->max_txds);
625  lst_per_page = PAGE_SIZE / lst_size;
626 
627  for (i = 0; i < config->tx_fifo_num; i++) {
628  struct fifo_info *fifo = &mac_control->fifos[i];
629  struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
630  int fifo_len = tx_cfg->fifo_len;
631  int list_holder_size = fifo_len * sizeof(struct list_info_hold);
632 
633  fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
634  if (!fifo->list_info) {
635  DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
636  return -ENOMEM;
637  }
638  mem_allocated += list_holder_size;
639  }
640  for (i = 0; i < config->tx_fifo_num; i++) {
641  int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
642  lst_per_page);
643  struct fifo_info *fifo = &mac_control->fifos[i];
644  struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
645 
646  fifo->tx_curr_put_info.offset = 0;
647  fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
648  fifo->tx_curr_get_info.offset = 0;
649  fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
650  fifo->fifo_no = i;
651  fifo->nic = nic;
652  fifo->max_txds = MAX_SKB_FRAGS + 2;
653  fifo->dev = dev;
654 
655  for (j = 0; j < page_num; j++) {
656  int k = 0;
657  dma_addr_t tmp_p;
658  void *tmp_v;
659  tmp_v = pci_alloc_consistent(nic->pdev,
660  PAGE_SIZE, &tmp_p);
661  if (!tmp_v) {
663  "pci_alloc_consistent failed for TxDL\n");
664  return -ENOMEM;
665  }
666  /* If we got a zero DMA address(can happen on
667  * certain platforms like PPC), reallocate.
668  * Store virtual address of page we don't want,
669  * to be freed later.
670  */
671  if (!tmp_p) {
672  mac_control->zerodma_virt_addr = tmp_v;
674  "%s: Zero DMA address for TxDL. "
675  "Virtual address %p\n",
676  dev->name, tmp_v);
677  tmp_v = pci_alloc_consistent(nic->pdev,
678  PAGE_SIZE, &tmp_p);
679  if (!tmp_v) {
681  "pci_alloc_consistent failed for TxDL\n");
682  return -ENOMEM;
683  }
684  mem_allocated += PAGE_SIZE;
685  }
686  while (k < lst_per_page) {
687  int l = (j * lst_per_page) + k;
688  if (l == tx_cfg->fifo_len)
689  break;
690  fifo->list_info[l].list_virt_addr =
691  tmp_v + (k * lst_size);
692  fifo->list_info[l].list_phy_addr =
693  tmp_p + (k * lst_size);
694  k++;
695  }
696  }
697  }
698 
699  for (i = 0; i < config->tx_fifo_num; i++) {
700  struct fifo_info *fifo = &mac_control->fifos[i];
701  struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
702 
703  size = tx_cfg->fifo_len;
704  fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
705  if (!fifo->ufo_in_band_v)
706  return -ENOMEM;
707  mem_allocated += (size * sizeof(u64));
708  }
709 
710  /* Allocation and initialization of RXDs in Rings */
711  size = 0;
712  for (i = 0; i < config->rx_ring_num; i++) {
713  struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
714  struct ring_info *ring = &mac_control->rings[i];
715 
716  if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
717  DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
718  "multiple of RxDs per Block\n",
719  dev->name, i);
720  return FAILURE;
721  }
722  size += rx_cfg->num_rxd;
723  ring->block_count = rx_cfg->num_rxd /
724  (rxd_count[nic->rxd_mode] + 1);
725  ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
726  }
727  if (nic->rxd_mode == RXD_MODE_1)
728  size = (size * (sizeof(struct RxD1)));
729  else
730  size = (size * (sizeof(struct RxD3)));
731 
732  for (i = 0; i < config->rx_ring_num; i++) {
733  struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
734  struct ring_info *ring = &mac_control->rings[i];
735 
736  ring->rx_curr_get_info.block_index = 0;
737  ring->rx_curr_get_info.offset = 0;
738  ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
739  ring->rx_curr_put_info.block_index = 0;
740  ring->rx_curr_put_info.offset = 0;
741  ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
742  ring->nic = nic;
743  ring->ring_no = i;
744 
745  blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
746  /* Allocating all the Rx blocks */
747  for (j = 0; j < blk_cnt; j++) {
748  struct rx_block_info *rx_blocks;
749  int l;
750 
751  rx_blocks = &ring->rx_blocks[j];
752  size = SIZE_OF_BLOCK; /* size is always page size */
753  tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
754  &tmp_p_addr);
755  if (tmp_v_addr == NULL) {
756  /*
757  * In case of failure, free_shared_mem()
758  * is called, which should free any
759  * memory that was alloced till the
760  * failure happened.
761  */
762  rx_blocks->block_virt_addr = tmp_v_addr;
763  return -ENOMEM;
764  }
765  mem_allocated += size;
766  memset(tmp_v_addr, 0, size);
767 
768  size = sizeof(struct rxd_info) *
769  rxd_count[nic->rxd_mode];
770  rx_blocks->block_virt_addr = tmp_v_addr;
771  rx_blocks->block_dma_addr = tmp_p_addr;
772  rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
773  if (!rx_blocks->rxds)
774  return -ENOMEM;
775  mem_allocated += size;
776  for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
777  rx_blocks->rxds[l].virt_addr =
778  rx_blocks->block_virt_addr +
779  (rxd_size[nic->rxd_mode] * l);
780  rx_blocks->rxds[l].dma_addr =
781  rx_blocks->block_dma_addr +
782  (rxd_size[nic->rxd_mode] * l);
783  }
784  }
785  /* Interlinking all Rx Blocks */
786  for (j = 0; j < blk_cnt; j++) {
787  int next = (j + 1) % blk_cnt;
788  tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
789  tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
790  tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
791  tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
792 
793  pre_rxd_blk = tmp_v_addr;
794  pre_rxd_blk->reserved_2_pNext_RxD_block =
795  (unsigned long)tmp_v_addr_next;
796  pre_rxd_blk->pNext_RxD_Blk_physical =
797  (u64)tmp_p_addr_next;
798  }
799  }
800  if (nic->rxd_mode == RXD_MODE_3B) {
801  /*
802  * Allocation of Storages for buffer addresses in 2BUFF mode
803  * and the buffers as well.
804  */
805  for (i = 0; i < config->rx_ring_num; i++) {
806  struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
807  struct ring_info *ring = &mac_control->rings[i];
808 
809  blk_cnt = rx_cfg->num_rxd /
810  (rxd_count[nic->rxd_mode] + 1);
811  size = sizeof(struct buffAdd *) * blk_cnt;
812  ring->ba = kmalloc(size, GFP_KERNEL);
813  if (!ring->ba)
814  return -ENOMEM;
815  mem_allocated += size;
816  for (j = 0; j < blk_cnt; j++) {
817  int k = 0;
818 
819  size = sizeof(struct buffAdd) *
820  (rxd_count[nic->rxd_mode] + 1);
821  ring->ba[j] = kmalloc(size, GFP_KERNEL);
822  if (!ring->ba[j])
823  return -ENOMEM;
824  mem_allocated += size;
825  while (k != rxd_count[nic->rxd_mode]) {
826  ba = &ring->ba[j][k];
827  size = BUF0_LEN + ALIGN_SIZE;
828  ba->ba_0_org = kmalloc(size, GFP_KERNEL);
829  if (!ba->ba_0_org)
830  return -ENOMEM;
831  mem_allocated += size;
832  tmp = (unsigned long)ba->ba_0_org;
833  tmp += ALIGN_SIZE;
834  tmp &= ~((unsigned long)ALIGN_SIZE);
835  ba->ba_0 = (void *)tmp;
836 
837  size = BUF1_LEN + ALIGN_SIZE;
838  ba->ba_1_org = kmalloc(size, GFP_KERNEL);
839  if (!ba->ba_1_org)
840  return -ENOMEM;
841  mem_allocated += size;
842  tmp = (unsigned long)ba->ba_1_org;
843  tmp += ALIGN_SIZE;
844  tmp &= ~((unsigned long)ALIGN_SIZE);
845  ba->ba_1 = (void *)tmp;
846  k++;
847  }
848  }
849  }
850  }
851 
852  /* Allocation and initialization of Statistics block */
853  size = sizeof(struct stat_block);
854  mac_control->stats_mem =
855  pci_alloc_consistent(nic->pdev, size,
856  &mac_control->stats_mem_phy);
857 
858  if (!mac_control->stats_mem) {
859  /*
860  * In case of failure, free_shared_mem() is called, which
861  * should free any memory that was alloced till the
862  * failure happened.
863  */
864  return -ENOMEM;
865  }
866  mem_allocated += size;
867  mac_control->stats_mem_sz = size;
868 
869  tmp_v_addr = mac_control->stats_mem;
870  mac_control->stats_info = tmp_v_addr;
871  memset(tmp_v_addr, 0, size);
872  DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
873  dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
874  mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
875  return SUCCESS;
876 }
877 
885 static void free_shared_mem(struct s2io_nic *nic)
886 {
887  int i, j, blk_cnt, size;
888  void *tmp_v_addr;
889  dma_addr_t tmp_p_addr;
890  int lst_size, lst_per_page;
891  struct net_device *dev;
892  int page_num = 0;
893  struct config_param *config;
894  struct mac_info *mac_control;
895  struct stat_block *stats;
896  struct swStat *swstats;
897 
898  if (!nic)
899  return;
900 
901  dev = nic->dev;
902 
903  config = &nic->config;
904  mac_control = &nic->mac_control;
905  stats = mac_control->stats_info;
906  swstats = &stats->sw_stat;
907 
908  lst_size = sizeof(struct TxD) * config->max_txds;
909  lst_per_page = PAGE_SIZE / lst_size;
910 
911  for (i = 0; i < config->tx_fifo_num; i++) {
912  struct fifo_info *fifo = &mac_control->fifos[i];
913  struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
914 
915  page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
916  for (j = 0; j < page_num; j++) {
917  int mem_blks = (j * lst_per_page);
918  struct list_info_hold *fli;
919 
920  if (!fifo->list_info)
921  return;
922 
923  fli = &fifo->list_info[mem_blks];
924  if (!fli->list_virt_addr)
925  break;
927  fli->list_virt_addr,
928  fli->list_phy_addr);
929  swstats->mem_freed += PAGE_SIZE;
930  }
931  /* If we got a zero DMA address during allocation,
932  * free the page now
933  */
934  if (mac_control->zerodma_virt_addr) {
936  mac_control->zerodma_virt_addr,
937  (dma_addr_t)0);
939  "%s: Freeing TxDL with zero DMA address. "
940  "Virtual address %p\n",
941  dev->name, mac_control->zerodma_virt_addr);
942  swstats->mem_freed += PAGE_SIZE;
943  }
944  kfree(fifo->list_info);
945  swstats->mem_freed += tx_cfg->fifo_len *
946  sizeof(struct list_info_hold);
947  }
948 
949  size = SIZE_OF_BLOCK;
950  for (i = 0; i < config->rx_ring_num; i++) {
951  struct ring_info *ring = &mac_control->rings[i];
952 
953  blk_cnt = ring->block_count;
954  for (j = 0; j < blk_cnt; j++) {
955  tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
956  tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
957  if (tmp_v_addr == NULL)
958  break;
959  pci_free_consistent(nic->pdev, size,
960  tmp_v_addr, tmp_p_addr);
961  swstats->mem_freed += size;
962  kfree(ring->rx_blocks[j].rxds);
963  swstats->mem_freed += sizeof(struct rxd_info) *
964  rxd_count[nic->rxd_mode];
965  }
966  }
967 
968  if (nic->rxd_mode == RXD_MODE_3B) {
969  /* Freeing buffer storage addresses in 2BUFF mode. */
970  for (i = 0; i < config->rx_ring_num; i++) {
971  struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
972  struct ring_info *ring = &mac_control->rings[i];
973 
974  blk_cnt = rx_cfg->num_rxd /
975  (rxd_count[nic->rxd_mode] + 1);
976  for (j = 0; j < blk_cnt; j++) {
977  int k = 0;
978  if (!ring->ba[j])
979  continue;
980  while (k != rxd_count[nic->rxd_mode]) {
981  struct buffAdd *ba = &ring->ba[j][k];
982  kfree(ba->ba_0_org);
983  swstats->mem_freed +=
985  kfree(ba->ba_1_org);
986  swstats->mem_freed +=
988  k++;
989  }
990  kfree(ring->ba[j]);
991  swstats->mem_freed += sizeof(struct buffAdd) *
992  (rxd_count[nic->rxd_mode] + 1);
993  }
994  kfree(ring->ba);
995  swstats->mem_freed += sizeof(struct buffAdd *) *
996  blk_cnt;
997  }
998  }
999 
1000  for (i = 0; i < nic->config.tx_fifo_num; i++) {
1001  struct fifo_info *fifo = &mac_control->fifos[i];
1002  struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1003 
1004  if (fifo->ufo_in_band_v) {
1005  swstats->mem_freed += tx_cfg->fifo_len *
1006  sizeof(u64);
1007  kfree(fifo->ufo_in_band_v);
1008  }
1009  }
1010 
1011  if (mac_control->stats_mem) {
1012  swstats->mem_freed += mac_control->stats_mem_sz;
1014  mac_control->stats_mem_sz,
1015  mac_control->stats_mem,
1016  mac_control->stats_mem_phy);
1017  }
1018 }
1019 
1024 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1025 {
1026  struct XENA_dev_config __iomem *bar0 = nic->bar0;
1027  register u64 val64 = 0;
1028  int mode;
1029 
1030  val64 = readq(&bar0->pci_mode);
1031  mode = (u8)GET_PCI_MODE(val64);
1032 
1033  if (val64 & PCI_MODE_UNKNOWN_MODE)
1034  return -1; /* Unknown PCI mode */
1035  return mode;
1036 }
1037 
1038 #define NEC_VENID 0x1033
1039 #define NEC_DEVID 0x0125
1040 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1041 {
1042  struct pci_dev *tdev = NULL;
1043  while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1044  if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1045  if (tdev->bus == s2io_pdev->bus->parent) {
1046  pci_dev_put(tdev);
1047  return 1;
1048  }
1049  }
1050  }
1051  return 0;
1052 }
1053 
1054 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1058 static int s2io_print_pci_mode(struct s2io_nic *nic)
1059 {
1060  struct XENA_dev_config __iomem *bar0 = nic->bar0;
1061  register u64 val64 = 0;
1062  int mode;
1063  struct config_param *config = &nic->config;
1064  const char *pcimode;
1065 
1066  val64 = readq(&bar0->pci_mode);
1067  mode = (u8)GET_PCI_MODE(val64);
1068 
1069  if (val64 & PCI_MODE_UNKNOWN_MODE)
1070  return -1; /* Unknown PCI mode */
1071 
1072  config->bus_speed = bus_speed[mode];
1073 
1074  if (s2io_on_nec_bridge(nic->pdev)) {
1075  DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1076  nic->dev->name);
1077  return mode;
1078  }
1079 
1080  switch (mode) {
1081  case PCI_MODE_PCI_33:
1082  pcimode = "33MHz PCI bus";
1083  break;
1084  case PCI_MODE_PCI_66:
1085  pcimode = "66MHz PCI bus";
1086  break;
1087  case PCI_MODE_PCIX_M1_66:
1088  pcimode = "66MHz PCIX(M1) bus";
1089  break;
1090  case PCI_MODE_PCIX_M1_100:
1091  pcimode = "100MHz PCIX(M1) bus";
1092  break;
1093  case PCI_MODE_PCIX_M1_133:
1094  pcimode = "133MHz PCIX(M1) bus";
1095  break;
1096  case PCI_MODE_PCIX_M2_66:
1097  pcimode = "133MHz PCIX(M2) bus";
1098  break;
1099  case PCI_MODE_PCIX_M2_100:
1100  pcimode = "200MHz PCIX(M2) bus";
1101  break;
1102  case PCI_MODE_PCIX_M2_133:
1103  pcimode = "266MHz PCIX(M2) bus";
1104  break;
1105  default:
1106  pcimode = "unsupported bus!";
1107  mode = -1;
1108  }
1109 
1110  DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1111  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1112 
1113  return mode;
1114 }
1115 
1126 static int init_tti(struct s2io_nic *nic, int link)
1127 {
1128  struct XENA_dev_config __iomem *bar0 = nic->bar0;
1129  register u64 val64 = 0;
1130  int i;
1131  struct config_param *config = &nic->config;
1132 
1133  for (i = 0; i < config->tx_fifo_num; i++) {
1134  /*
1135  * TTI Initialization. Default Tx timer gets us about
1136  * 250 interrupts per sec. Continuous interrupts are enabled
1137  * by default.
1138  */
1139  if (nic->device_type == XFRAME_II_DEVICE) {
1140  int count = (nic->config.bus_speed * 125)/2;
1141  val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1142  } else
1143  val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1144 
1145  val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1146  TTI_DATA1_MEM_TX_URNG_B(0x10) |
1147  TTI_DATA1_MEM_TX_URNG_C(0x30) |
1149  if (i == 0)
1150  if (use_continuous_tx_intrs && (link == LINK_UP))
1152  writeq(val64, &bar0->tti_data1_mem);
1153 
1154  if (nic->config.intr_type == MSI_X) {
1155  val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1156  TTI_DATA2_MEM_TX_UFC_B(0x100) |
1157  TTI_DATA2_MEM_TX_UFC_C(0x200) |
1158  TTI_DATA2_MEM_TX_UFC_D(0x300);
1159  } else {
1160  if ((nic->config.tx_steering_type ==
1162  (config->tx_fifo_num > 1) &&
1163  (i >= nic->udp_fifo_idx) &&
1164  (i < (nic->udp_fifo_idx +
1165  nic->total_udp_fifos)))
1166  val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1167  TTI_DATA2_MEM_TX_UFC_B(0x80) |
1168  TTI_DATA2_MEM_TX_UFC_C(0x100) |
1169  TTI_DATA2_MEM_TX_UFC_D(0x120);
1170  else
1171  val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1172  TTI_DATA2_MEM_TX_UFC_B(0x20) |
1173  TTI_DATA2_MEM_TX_UFC_C(0x40) |
1174  TTI_DATA2_MEM_TX_UFC_D(0x80);
1175  }
1176 
1177  writeq(val64, &bar0->tti_data2_mem);
1178 
1179  val64 = TTI_CMD_MEM_WE |
1181  TTI_CMD_MEM_OFFSET(i);
1182  writeq(val64, &bar0->tti_command_mem);
1183 
1184  if (wait_for_cmd_complete(&bar0->tti_command_mem,
1186  S2IO_BIT_RESET) != SUCCESS)
1187  return FAILURE;
1188  }
1189 
1190  return SUCCESS;
1191 }
1192 
1202 static int init_nic(struct s2io_nic *nic)
1203 {
1204  struct XENA_dev_config __iomem *bar0 = nic->bar0;
1205  struct net_device *dev = nic->dev;
1206  register u64 val64 = 0;
1207  void __iomem *add;
1208  u32 time;
1209  int i, j;
1210  int dtx_cnt = 0;
1211  unsigned long long mem_share;
1212  int mem_size;
1213  struct config_param *config = &nic->config;
1214  struct mac_info *mac_control = &nic->mac_control;
1215 
1216  /* to set the swapper controle on the card */
1217  if (s2io_set_swapper(nic)) {
1218  DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1219  return -EIO;
1220  }
1221 
1222  /*
1223  * Herc requires EOI to be removed from reset before XGXS, so..
1224  */
1225  if (nic->device_type & XFRAME_II_DEVICE) {
1226  val64 = 0xA500000000ULL;
1227  writeq(val64, &bar0->sw_reset);
1228  msleep(500);
1229  val64 = readq(&bar0->sw_reset);
1230  }
1231 
1232  /* Remove XGXS from reset state */
1233  val64 = 0;
1234  writeq(val64, &bar0->sw_reset);
1235  msleep(500);
1236  val64 = readq(&bar0->sw_reset);
1237 
1238  /* Ensure that it's safe to access registers by checking
1239  * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1240  */
1241  if (nic->device_type == XFRAME_II_DEVICE) {
1242  for (i = 0; i < 50; i++) {
1243  val64 = readq(&bar0->adapter_status);
1244  if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1245  break;
1246  msleep(10);
1247  }
1248  if (i == 50)
1249  return -ENODEV;
1250  }
1251 
1252  /* Enable Receiving broadcasts */
1253  add = &bar0->mac_cfg;
1254  val64 = readq(&bar0->mac_cfg);
1255  val64 |= MAC_RMAC_BCAST_ENABLE;
1256  writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1257  writel((u32)val64, add);
1258  writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1259  writel((u32) (val64 >> 32), (add + 4));
1260 
1261  /* Read registers in all blocks */
1262  val64 = readq(&bar0->mac_int_mask);
1263  val64 = readq(&bar0->mc_int_mask);
1264  val64 = readq(&bar0->xgxs_int_mask);
1265 
1266  /* Set MTU */
1267  val64 = dev->mtu;
1268  writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1269 
1270  if (nic->device_type & XFRAME_II_DEVICE) {
1271  while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1272  SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1273  &bar0->dtx_control, UF);
1274  if (dtx_cnt & 0x1)
1275  msleep(1); /* Necessary!! */
1276  dtx_cnt++;
1277  }
1278  } else {
1279  while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1280  SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1281  &bar0->dtx_control, UF);
1282  val64 = readq(&bar0->dtx_control);
1283  dtx_cnt++;
1284  }
1285  }
1286 
1287  /* Tx DMA Initialization */
1288  val64 = 0;
1289  writeq(val64, &bar0->tx_fifo_partition_0);
1290  writeq(val64, &bar0->tx_fifo_partition_1);
1291  writeq(val64, &bar0->tx_fifo_partition_2);
1292  writeq(val64, &bar0->tx_fifo_partition_3);
1293 
1294  for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1295  struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1296 
1297  val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1298  vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1299 
1300  if (i == (config->tx_fifo_num - 1)) {
1301  if (i % 2 == 0)
1302  i++;
1303  }
1304 
1305  switch (i) {
1306  case 1:
1307  writeq(val64, &bar0->tx_fifo_partition_0);
1308  val64 = 0;
1309  j = 0;
1310  break;
1311  case 3:
1312  writeq(val64, &bar0->tx_fifo_partition_1);
1313  val64 = 0;
1314  j = 0;
1315  break;
1316  case 5:
1317  writeq(val64, &bar0->tx_fifo_partition_2);
1318  val64 = 0;
1319  j = 0;
1320  break;
1321  case 7:
1322  writeq(val64, &bar0->tx_fifo_partition_3);
1323  val64 = 0;
1324  j = 0;
1325  break;
1326  default:
1327  j++;
1328  break;
1329  }
1330  }
1331 
1332  /*
1333  * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1334  * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1335  */
1336  if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1338 
1339  val64 = readq(&bar0->tx_fifo_partition_0);
1340  DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1341  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1342 
1343  /*
1344  * Initialization of Tx_PA_CONFIG register to ignore packet
1345  * integrity checking.
1346  */
1347  val64 = readq(&bar0->tx_pa_cfg);
1348  val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1352  writeq(val64, &bar0->tx_pa_cfg);
1353 
1354  /* Rx DMA intialization. */
1355  val64 = 0;
1356  for (i = 0; i < config->rx_ring_num; i++) {
1357  struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1358 
1359  val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1360  }
1361  writeq(val64, &bar0->rx_queue_priority);
1362 
1363  /*
1364  * Allocating equal share of memory to all the
1365  * configured Rings.
1366  */
1367  val64 = 0;
1368  if (nic->device_type & XFRAME_II_DEVICE)
1369  mem_size = 32;
1370  else
1371  mem_size = 64;
1372 
1373  for (i = 0; i < config->rx_ring_num; i++) {
1374  switch (i) {
1375  case 0:
1376  mem_share = (mem_size / config->rx_ring_num +
1377  mem_size % config->rx_ring_num);
1378  val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1379  continue;
1380  case 1:
1381  mem_share = (mem_size / config->rx_ring_num);
1382  val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1383  continue;
1384  case 2:
1385  mem_share = (mem_size / config->rx_ring_num);
1386  val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1387  continue;
1388  case 3:
1389  mem_share = (mem_size / config->rx_ring_num);
1390  val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1391  continue;
1392  case 4:
1393  mem_share = (mem_size / config->rx_ring_num);
1394  val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1395  continue;
1396  case 5:
1397  mem_share = (mem_size / config->rx_ring_num);
1398  val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1399  continue;
1400  case 6:
1401  mem_share = (mem_size / config->rx_ring_num);
1402  val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1403  continue;
1404  case 7:
1405  mem_share = (mem_size / config->rx_ring_num);
1406  val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1407  continue;
1408  }
1409  }
1410  writeq(val64, &bar0->rx_queue_cfg);
1411 
1412  /*
1413  * Filling Tx round robin registers
1414  * as per the number of FIFOs for equal scheduling priority
1415  */
1416  switch (config->tx_fifo_num) {
1417  case 1:
1418  val64 = 0x0;
1419  writeq(val64, &bar0->tx_w_round_robin_0);
1420  writeq(val64, &bar0->tx_w_round_robin_1);
1421  writeq(val64, &bar0->tx_w_round_robin_2);
1422  writeq(val64, &bar0->tx_w_round_robin_3);
1423  writeq(val64, &bar0->tx_w_round_robin_4);
1424  break;
1425  case 2:
1426  val64 = 0x0001000100010001ULL;
1427  writeq(val64, &bar0->tx_w_round_robin_0);
1428  writeq(val64, &bar0->tx_w_round_robin_1);
1429  writeq(val64, &bar0->tx_w_round_robin_2);
1430  writeq(val64, &bar0->tx_w_round_robin_3);
1431  val64 = 0x0001000100000000ULL;
1432  writeq(val64, &bar0->tx_w_round_robin_4);
1433  break;
1434  case 3:
1435  val64 = 0x0001020001020001ULL;
1436  writeq(val64, &bar0->tx_w_round_robin_0);
1437  val64 = 0x0200010200010200ULL;
1438  writeq(val64, &bar0->tx_w_round_robin_1);
1439  val64 = 0x0102000102000102ULL;
1440  writeq(val64, &bar0->tx_w_round_robin_2);
1441  val64 = 0x0001020001020001ULL;
1442  writeq(val64, &bar0->tx_w_round_robin_3);
1443  val64 = 0x0200010200000000ULL;
1444  writeq(val64, &bar0->tx_w_round_robin_4);
1445  break;
1446  case 4:
1447  val64 = 0x0001020300010203ULL;
1448  writeq(val64, &bar0->tx_w_round_robin_0);
1449  writeq(val64, &bar0->tx_w_round_robin_1);
1450  writeq(val64, &bar0->tx_w_round_robin_2);
1451  writeq(val64, &bar0->tx_w_round_robin_3);
1452  val64 = 0x0001020300000000ULL;
1453  writeq(val64, &bar0->tx_w_round_robin_4);
1454  break;
1455  case 5:
1456  val64 = 0x0001020304000102ULL;
1457  writeq(val64, &bar0->tx_w_round_robin_0);
1458  val64 = 0x0304000102030400ULL;
1459  writeq(val64, &bar0->tx_w_round_robin_1);
1460  val64 = 0x0102030400010203ULL;
1461  writeq(val64, &bar0->tx_w_round_robin_2);
1462  val64 = 0x0400010203040001ULL;
1463  writeq(val64, &bar0->tx_w_round_robin_3);
1464  val64 = 0x0203040000000000ULL;
1465  writeq(val64, &bar0->tx_w_round_robin_4);
1466  break;
1467  case 6:
1468  val64 = 0x0001020304050001ULL;
1469  writeq(val64, &bar0->tx_w_round_robin_0);
1470  val64 = 0x0203040500010203ULL;
1471  writeq(val64, &bar0->tx_w_round_robin_1);
1472  val64 = 0x0405000102030405ULL;
1473  writeq(val64, &bar0->tx_w_round_robin_2);
1474  val64 = 0x0001020304050001ULL;
1475  writeq(val64, &bar0->tx_w_round_robin_3);
1476  val64 = 0x0203040500000000ULL;
1477  writeq(val64, &bar0->tx_w_round_robin_4);
1478  break;
1479  case 7:
1480  val64 = 0x0001020304050600ULL;
1481  writeq(val64, &bar0->tx_w_round_robin_0);
1482  val64 = 0x0102030405060001ULL;
1483  writeq(val64, &bar0->tx_w_round_robin_1);
1484  val64 = 0x0203040506000102ULL;
1485  writeq(val64, &bar0->tx_w_round_robin_2);
1486  val64 = 0x0304050600010203ULL;
1487  writeq(val64, &bar0->tx_w_round_robin_3);
1488  val64 = 0x0405060000000000ULL;
1489  writeq(val64, &bar0->tx_w_round_robin_4);
1490  break;
1491  case 8:
1492  val64 = 0x0001020304050607ULL;
1493  writeq(val64, &bar0->tx_w_round_robin_0);
1494  writeq(val64, &bar0->tx_w_round_robin_1);
1495  writeq(val64, &bar0->tx_w_round_robin_2);
1496  writeq(val64, &bar0->tx_w_round_robin_3);
1497  val64 = 0x0001020300000000ULL;
1498  writeq(val64, &bar0->tx_w_round_robin_4);
1499  break;
1500  }
1501 
1502  /* Enable all configured Tx FIFO partitions */
1503  val64 = readq(&bar0->tx_fifo_partition_0);
1504  val64 |= (TX_FIFO_PARTITION_EN);
1505  writeq(val64, &bar0->tx_fifo_partition_0);
1506 
1507  /* Filling the Rx round robin registers as per the
1508  * number of Rings and steering based on QoS with
1509  * equal priority.
1510  */
1511  switch (config->rx_ring_num) {
1512  case 1:
1513  val64 = 0x0;
1514  writeq(val64, &bar0->rx_w_round_robin_0);
1515  writeq(val64, &bar0->rx_w_round_robin_1);
1516  writeq(val64, &bar0->rx_w_round_robin_2);
1517  writeq(val64, &bar0->rx_w_round_robin_3);
1518  writeq(val64, &bar0->rx_w_round_robin_4);
1519 
1520  val64 = 0x8080808080808080ULL;
1521  writeq(val64, &bar0->rts_qos_steering);
1522  break;
1523  case 2:
1524  val64 = 0x0001000100010001ULL;
1525  writeq(val64, &bar0->rx_w_round_robin_0);
1526  writeq(val64, &bar0->rx_w_round_robin_1);
1527  writeq(val64, &bar0->rx_w_round_robin_2);
1528  writeq(val64, &bar0->rx_w_round_robin_3);
1529  val64 = 0x0001000100000000ULL;
1530  writeq(val64, &bar0->rx_w_round_robin_4);
1531 
1532  val64 = 0x8080808040404040ULL;
1533  writeq(val64, &bar0->rts_qos_steering);
1534  break;
1535  case 3:
1536  val64 = 0x0001020001020001ULL;
1537  writeq(val64, &bar0->rx_w_round_robin_0);
1538  val64 = 0x0200010200010200ULL;
1539  writeq(val64, &bar0->rx_w_round_robin_1);
1540  val64 = 0x0102000102000102ULL;
1541  writeq(val64, &bar0->rx_w_round_robin_2);
1542  val64 = 0x0001020001020001ULL;
1543  writeq(val64, &bar0->rx_w_round_robin_3);
1544  val64 = 0x0200010200000000ULL;
1545  writeq(val64, &bar0->rx_w_round_robin_4);
1546 
1547  val64 = 0x8080804040402020ULL;
1548  writeq(val64, &bar0->rts_qos_steering);
1549  break;
1550  case 4:
1551  val64 = 0x0001020300010203ULL;
1552  writeq(val64, &bar0->rx_w_round_robin_0);
1553  writeq(val64, &bar0->rx_w_round_robin_1);
1554  writeq(val64, &bar0->rx_w_round_robin_2);
1555  writeq(val64, &bar0->rx_w_round_robin_3);
1556  val64 = 0x0001020300000000ULL;
1557  writeq(val64, &bar0->rx_w_round_robin_4);
1558 
1559  val64 = 0x8080404020201010ULL;
1560  writeq(val64, &bar0->rts_qos_steering);
1561  break;
1562  case 5:
1563  val64 = 0x0001020304000102ULL;
1564  writeq(val64, &bar0->rx_w_round_robin_0);
1565  val64 = 0x0304000102030400ULL;
1566  writeq(val64, &bar0->rx_w_round_robin_1);
1567  val64 = 0x0102030400010203ULL;
1568  writeq(val64, &bar0->rx_w_round_robin_2);
1569  val64 = 0x0400010203040001ULL;
1570  writeq(val64, &bar0->rx_w_round_robin_3);
1571  val64 = 0x0203040000000000ULL;
1572  writeq(val64, &bar0->rx_w_round_robin_4);
1573 
1574  val64 = 0x8080404020201008ULL;
1575  writeq(val64, &bar0->rts_qos_steering);
1576  break;
1577  case 6:
1578  val64 = 0x0001020304050001ULL;
1579  writeq(val64, &bar0->rx_w_round_robin_0);
1580  val64 = 0x0203040500010203ULL;
1581  writeq(val64, &bar0->rx_w_round_robin_1);
1582  val64 = 0x0405000102030405ULL;
1583  writeq(val64, &bar0->rx_w_round_robin_2);
1584  val64 = 0x0001020304050001ULL;
1585  writeq(val64, &bar0->rx_w_round_robin_3);
1586  val64 = 0x0203040500000000ULL;
1587  writeq(val64, &bar0->rx_w_round_robin_4);
1588 
1589  val64 = 0x8080404020100804ULL;
1590  writeq(val64, &bar0->rts_qos_steering);
1591  break;
1592  case 7:
1593  val64 = 0x0001020304050600ULL;
1594  writeq(val64, &bar0->rx_w_round_robin_0);
1595  val64 = 0x0102030405060001ULL;
1596  writeq(val64, &bar0->rx_w_round_robin_1);
1597  val64 = 0x0203040506000102ULL;
1598  writeq(val64, &bar0->rx_w_round_robin_2);
1599  val64 = 0x0304050600010203ULL;
1600  writeq(val64, &bar0->rx_w_round_robin_3);
1601  val64 = 0x0405060000000000ULL;
1602  writeq(val64, &bar0->rx_w_round_robin_4);
1603 
1604  val64 = 0x8080402010080402ULL;
1605  writeq(val64, &bar0->rts_qos_steering);
1606  break;
1607  case 8:
1608  val64 = 0x0001020304050607ULL;
1609  writeq(val64, &bar0->rx_w_round_robin_0);
1610  writeq(val64, &bar0->rx_w_round_robin_1);
1611  writeq(val64, &bar0->rx_w_round_robin_2);
1612  writeq(val64, &bar0->rx_w_round_robin_3);
1613  val64 = 0x0001020300000000ULL;
1614  writeq(val64, &bar0->rx_w_round_robin_4);
1615 
1616  val64 = 0x8040201008040201ULL;
1617  writeq(val64, &bar0->rts_qos_steering);
1618  break;
1619  }
1620 
1621  /* UDP Fix */
1622  val64 = 0;
1623  for (i = 0; i < 8; i++)
1624  writeq(val64, &bar0->rts_frm_len_n[i]);
1625 
1626  /* Set the default rts frame length for the rings configured */
1627  val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1628  for (i = 0 ; i < config->rx_ring_num ; i++)
1629  writeq(val64, &bar0->rts_frm_len_n[i]);
1630 
1631  /* Set the frame length for the configured rings
1632  * desired by the user
1633  */
1634  for (i = 0; i < config->rx_ring_num; i++) {
1635  /* If rts_frm_len[i] == 0 then it is assumed that user not
1636  * specified frame length steering.
1637  * If the user provides the frame length then program
1638  * the rts_frm_len register for those values or else
1639  * leave it as it is.
1640  */
1641  if (rts_frm_len[i] != 0) {
1642  writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1643  &bar0->rts_frm_len_n[i]);
1644  }
1645  }
1646 
1647  /* Disable differentiated services steering logic */
1648  for (i = 0; i < 64; i++) {
1649  if (rts_ds_steer(nic, i, 0) == FAILURE) {
1651  "%s: rts_ds_steer failed on codepoint %d\n",
1652  dev->name, i);
1653  return -ENODEV;
1654  }
1655  }
1656 
1657  /* Program statistics memory */
1658  writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1659 
1660  if (nic->device_type == XFRAME_II_DEVICE) {
1661  val64 = STAT_BC(0x320);
1662  writeq(val64, &bar0->stat_byte_cnt);
1663  }
1664 
1665  /*
1666  * Initializing the sampling rate for the device to calculate the
1667  * bandwidth utilization.
1668  */
1669  val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1670  MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1671  writeq(val64, &bar0->mac_link_util);
1672 
1673  /*
1674  * Initializing the Transmit and Receive Traffic Interrupt
1675  * Scheme.
1676  */
1677 
1678  /* Initialize TTI */
1679  if (SUCCESS != init_tti(nic, nic->last_link_state))
1680  return -ENODEV;
1681 
1682  /* RTI Initialization */
1683  if (nic->device_type == XFRAME_II_DEVICE) {
1684  /*
1685  * Programmed to generate Apprx 500 Intrs per
1686  * second
1687  */
1688  int count = (nic->config.bus_speed * 125)/4;
1689  val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1690  } else
1691  val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1692  val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1693  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1694  RTI_DATA1_MEM_RX_URNG_C(0x30) |
1696 
1697  writeq(val64, &bar0->rti_data1_mem);
1698 
1699  val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1700  RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1701  if (nic->config.intr_type == MSI_X)
1702  val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1703  RTI_DATA2_MEM_RX_UFC_D(0x40));
1704  else
1705  val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1706  RTI_DATA2_MEM_RX_UFC_D(0x80));
1707  writeq(val64, &bar0->rti_data2_mem);
1708 
1709  for (i = 0; i < config->rx_ring_num; i++) {
1710  val64 = RTI_CMD_MEM_WE |
1712  RTI_CMD_MEM_OFFSET(i);
1713  writeq(val64, &bar0->rti_command_mem);
1714 
1715  /*
1716  * Once the operation completes, the Strobe bit of the
1717  * command register will be reset. We poll for this
1718  * particular condition. We wait for a maximum of 500ms
1719  * for the operation to complete, if it's not complete
1720  * by then we return error.
1721  */
1722  time = 0;
1723  while (true) {
1724  val64 = readq(&bar0->rti_command_mem);
1725  if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1726  break;
1727 
1728  if (time > 10) {
1729  DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1730  dev->name);
1731  return -ENODEV;
1732  }
1733  time++;
1734  msleep(50);
1735  }
1736  }
1737 
1738  /*
1739  * Initializing proper values as Pause threshold into all
1740  * the 8 Queues on Rx side.
1741  */
1742  writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1743  writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1744 
1745  /* Disable RMAC PAD STRIPPING */
1746  add = &bar0->mac_cfg;
1747  val64 = readq(&bar0->mac_cfg);
1748  val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1749  writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1750  writel((u32) (val64), add);
1751  writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1752  writel((u32) (val64 >> 32), (add + 4));
1753  val64 = readq(&bar0->mac_cfg);
1754 
1755  /* Enable FCS stripping by adapter */
1756  add = &bar0->mac_cfg;
1757  val64 = readq(&bar0->mac_cfg);
1758  val64 |= MAC_CFG_RMAC_STRIP_FCS;
1759  if (nic->device_type == XFRAME_II_DEVICE)
1760  writeq(val64, &bar0->mac_cfg);
1761  else {
1762  writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1763  writel((u32) (val64), add);
1764  writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1765  writel((u32) (val64 >> 32), (add + 4));
1766  }
1767 
1768  /*
1769  * Set the time value to be inserted in the pause frame
1770  * generated by xena.
1771  */
1772  val64 = readq(&bar0->rmac_pause_cfg);
1773  val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1774  val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1775  writeq(val64, &bar0->rmac_pause_cfg);
1776 
1777  /*
1778  * Set the Threshold Limit for Generating the pause frame
1779  * If the amount of data in any Queue exceeds ratio of
1780  * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1781  * pause frame is generated
1782  */
1783  val64 = 0;
1784  for (i = 0; i < 4; i++) {
1785  val64 |= (((u64)0xFF00 |
1786  nic->mac_control.mc_pause_threshold_q0q3)
1787  << (i * 2 * 8));
1788  }
1789  writeq(val64, &bar0->mc_pause_thresh_q0q3);
1790 
1791  val64 = 0;
1792  for (i = 0; i < 4; i++) {
1793  val64 |= (((u64)0xFF00 |
1794  nic->mac_control.mc_pause_threshold_q4q7)
1795  << (i * 2 * 8));
1796  }
1797  writeq(val64, &bar0->mc_pause_thresh_q4q7);
1798 
1799  /*
1800  * TxDMA will stop Read request if the number of read split has
1801  * exceeded the limit pointed by shared_splits
1802  */
1803  val64 = readq(&bar0->pic_control);
1804  val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1805  writeq(val64, &bar0->pic_control);
1806 
1807  if (nic->config.bus_speed == 266) {
1808  writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1809  writeq(0x0, &bar0->read_retry_delay);
1810  writeq(0x0, &bar0->write_retry_delay);
1811  }
1812 
1813  /*
1814  * Programming the Herc to split every write transaction
1815  * that does not start on an ADB to reduce disconnects.
1816  */
1817  if (nic->device_type == XFRAME_II_DEVICE) {
1818  val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1820  writeq(val64, &bar0->misc_control);
1821  val64 = readq(&bar0->pic_control2);
1822  val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1823  writeq(val64, &bar0->pic_control2);
1824  }
1825  if (strstr(nic->product_name, "CX4")) {
1826  val64 = TMAC_AVG_IPG(0x17);
1827  writeq(val64, &bar0->tmac_avg_ipg);
1828  }
1829 
1830  return SUCCESS;
1831 }
1832 #define LINK_UP_DOWN_INTERRUPT 1
1833 #define MAC_RMAC_ERR_TIMER 2
1834 
1835 static int s2io_link_fault_indication(struct s2io_nic *nic)
1836 {
1837  if (nic->device_type == XFRAME_II_DEVICE)
1838  return LINK_UP_DOWN_INTERRUPT;
1839  else
1840  return MAC_RMAC_ERR_TIMER;
1841 }
1842 
1852 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1853 {
1854  u64 temp64;
1855 
1856  temp64 = readq(addr);
1857 
1858  if (flag == ENABLE_INTRS)
1859  temp64 &= ~((u64)value);
1860  else
1861  temp64 |= ((u64)value);
1862  writeq(temp64, addr);
1863 }
1864 
1865 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1866 {
1867  struct XENA_dev_config __iomem *bar0 = nic->bar0;
1868  register u64 gen_int_mask = 0;
1869  u64 interruptible;
1870 
1872  if (mask & TX_DMA_INTR) {
1873  gen_int_mask |= TXDMA_INT_M;
1874 
1875  do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1878  TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1879 
1880  do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1882  PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1883  &bar0->pfc_err_mask);
1884 
1885  do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1887  TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1888 
1889  do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1895  flag, &bar0->pcc_err_mask);
1896 
1897  do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1898  TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1899 
1900  do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1903  flag, &bar0->lso_err_mask);
1904 
1905  do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1906  flag, &bar0->tpa_err_mask);
1907 
1908  do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1909  }
1910 
1911  if (mask & TX_MAC_INTR) {
1912  gen_int_mask |= TXMAC_INT_M;
1913  do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1914  &bar0->mac_int_mask);
1915  do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1918  flag, &bar0->mac_tmac_err_mask);
1919  }
1920 
1921  if (mask & TX_XGXS_INTR) {
1922  gen_int_mask |= TXXGXS_INT_M;
1923  do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1924  &bar0->xgxs_int_mask);
1925  do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1927  flag, &bar0->xgxs_txgxs_err_mask);
1928  }
1929 
1930  if (mask & RX_DMA_INTR) {
1931  gen_int_mask |= RXDMA_INT_M;
1932  do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1934  flag, &bar0->rxdma_int_mask);
1935  do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1938  RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1939  do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1942  &bar0->prc_pcix_err_mask);
1943  do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1945  &bar0->rpa_err_mask);
1946  do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1951  flag, &bar0->rda_err_mask);
1952  do_s2io_write_bits(RTI_SM_ERR_ALARM |
1954  flag, &bar0->rti_err_mask);
1955  }
1956 
1957  if (mask & RX_MAC_INTR) {
1958  gen_int_mask |= RXMAC_INT_M;
1959  do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1960  &bar0->mac_int_mask);
1961  interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1964  if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1965  interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1966  do_s2io_write_bits(interruptible,
1967  flag, &bar0->mac_rmac_err_mask);
1968  }
1969 
1970  if (mask & RX_XGXS_INTR) {
1971  gen_int_mask |= RXXGXS_INT_M;
1972  do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1973  &bar0->xgxs_int_mask);
1974  do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1975  &bar0->xgxs_rxgxs_err_mask);
1976  }
1977 
1978  if (mask & MC_INTR) {
1979  gen_int_mask |= MC_INT_M;
1980  do_s2io_write_bits(MC_INT_MASK_MC_INT,
1981  flag, &bar0->mc_int_mask);
1982  do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1984  &bar0->mc_err_mask);
1985  }
1986  nic->general_int_mask = gen_int_mask;
1987 
1988  /* Remove this line when alarm interrupts are enabled */
1989  nic->general_int_mask = 0;
1990 }
1991 
2003 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2004 {
2005  struct XENA_dev_config __iomem *bar0 = nic->bar0;
2006  register u64 temp64 = 0, intr_mask = 0;
2007 
2008  intr_mask = nic->general_int_mask;
2009 
2010  /* Top level interrupt classification */
2011  /* PIC Interrupts */
2012  if (mask & TX_PIC_INTR) {
2013  /* Enable PIC Intrs in the general intr mask register */
2015  if (flag == ENABLE_INTRS) {
2016  /*
2017  * If Hercules adapter enable GPIO otherwise
2018  * disable all PCIX, Flash, MDIO, IIC and GPIO
2019  * interrupts for now.
2020  * TODO
2021  */
2022  if (s2io_link_fault_indication(nic) ==
2024  do_s2io_write_bits(PIC_INT_GPIO, flag,
2025  &bar0->pic_int_mask);
2026  do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2027  &bar0->gpio_int_mask);
2028  } else
2030  } else if (flag == DISABLE_INTRS) {
2031  /*
2032  * Disable PIC Intrs in the general
2033  * intr mask register
2034  */
2036  }
2037  }
2038 
2039  /* Tx traffic interrupts */
2040  if (mask & TX_TRAFFIC_INTR) {
2042  if (flag == ENABLE_INTRS) {
2043  /*
2044  * Enable all the Tx side interrupts
2045  * writing 0 Enables all 64 TX interrupt levels
2046  */
2047  writeq(0x0, &bar0->tx_traffic_mask);
2048  } else if (flag == DISABLE_INTRS) {
2049  /*
2050  * Disable Tx Traffic Intrs in the general intr mask
2051  * register.
2052  */
2054  }
2055  }
2056 
2057  /* Rx traffic interrupts */
2058  if (mask & RX_TRAFFIC_INTR) {
2060  if (flag == ENABLE_INTRS) {
2061  /* writing 0 Enables all 8 RX interrupt levels */
2062  writeq(0x0, &bar0->rx_traffic_mask);
2063  } else if (flag == DISABLE_INTRS) {
2064  /*
2065  * Disable Rx Traffic Intrs in the general intr mask
2066  * register.
2067  */
2069  }
2070  }
2071 
2072  temp64 = readq(&bar0->general_int_mask);
2073  if (flag == ENABLE_INTRS)
2074  temp64 &= ~((u64)intr_mask);
2075  else
2076  temp64 = DISABLE_ALL_INTRS;
2077  writeq(temp64, &bar0->general_int_mask);
2078 
2079  nic->general_int_mask = readq(&bar0->general_int_mask);
2080 }
2081 
2087 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2088 {
2089  int ret = 0, herc;
2090  struct XENA_dev_config __iomem *bar0 = sp->bar0;
2091  u64 val64 = readq(&bar0->adapter_status);
2092 
2093  herc = (sp->device_type == XFRAME_II_DEVICE);
2094 
2095  if (flag == false) {
2096  if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2097  if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2098  ret = 1;
2099  } else {
2100  if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2101  ret = 1;
2102  }
2103  } else {
2104  if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2105  if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2106  ADAPTER_STATUS_RMAC_PCC_IDLE))
2107  ret = 1;
2108  } else {
2109  if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2110  ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2111  ret = 1;
2112  }
2113  }
2114 
2115  return ret;
2116 }
2127 static int verify_xena_quiescence(struct s2io_nic *sp)
2128 {
2129  int mode;
2130  struct XENA_dev_config __iomem *bar0 = sp->bar0;
2131  u64 val64 = readq(&bar0->adapter_status);
2132  mode = s2io_verify_pci_mode(sp);
2133 
2134  if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2135  DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2136  return 0;
2137  }
2138  if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2139  DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2140  return 0;
2141  }
2142  if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2143  DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2144  return 0;
2145  }
2146  if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2147  DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2148  return 0;
2149  }
2150  if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2151  DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2152  return 0;
2153  }
2154  if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2155  DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2156  return 0;
2157  }
2158  if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2159  DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2160  return 0;
2161  }
2162  if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2163  DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2164  return 0;
2165  }
2166 
2167  /*
2168  * In PCI 33 mode, the P_PLL is not used, and therefore,
2169  * the the P_PLL_LOCK bit in the adapter_status register will
2170  * not be asserted.
2171  */
2172  if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2173  sp->device_type == XFRAME_II_DEVICE &&
2174  mode != PCI_MODE_PCI_33) {
2175  DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2176  return 0;
2177  }
2178  if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2179  ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2180  DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2181  return 0;
2182  }
2183  return 1;
2184 }
2185 
2194 static void fix_mac_address(struct s2io_nic *sp)
2195 {
2196  struct XENA_dev_config __iomem *bar0 = sp->bar0;
2197  int i = 0;
2198 
2199  while (fix_mac[i] != END_SIGN) {
2200  writeq(fix_mac[i++], &bar0->gpio_control);
2201  udelay(10);
2202  (void) readq(&bar0->gpio_control);
2203  }
2204 }
2205 
2219 static int start_nic(struct s2io_nic *nic)
2220 {
2221  struct XENA_dev_config __iomem *bar0 = nic->bar0;
2222  struct net_device *dev = nic->dev;
2223  register u64 val64 = 0;
2224  u16 subid, i;
2225  struct config_param *config = &nic->config;
2226  struct mac_info *mac_control = &nic->mac_control;
2227 
2228  /* PRC Initialization and configuration */
2229  for (i = 0; i < config->rx_ring_num; i++) {
2230  struct ring_info *ring = &mac_control->rings[i];
2231 
2232  writeq((u64)ring->rx_blocks[0].block_dma_addr,
2233  &bar0->prc_rxd0_n[i]);
2234 
2235  val64 = readq(&bar0->prc_ctrl_n[i]);
2236  if (nic->rxd_mode == RXD_MODE_1)
2237  val64 |= PRC_CTRL_RC_ENABLED;
2238  else
2240  if (nic->device_type == XFRAME_II_DEVICE)
2241  val64 |= PRC_CTRL_GROUP_READS;
2242  val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2243  val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2244  writeq(val64, &bar0->prc_ctrl_n[i]);
2245  }
2246 
2247  if (nic->rxd_mode == RXD_MODE_3B) {
2248  /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2249  val64 = readq(&bar0->rx_pa_cfg);
2250  val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2251  writeq(val64, &bar0->rx_pa_cfg);
2252  }
2253 
2254  if (vlan_tag_strip == 0) {
2255  val64 = readq(&bar0->rx_pa_cfg);
2256  val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2257  writeq(val64, &bar0->rx_pa_cfg);
2258  nic->vlan_strip_flag = 0;
2259  }
2260 
2261  /*
2262  * Enabling MC-RLDRAM. After enabling the device, we timeout
2263  * for around 100ms, which is approximately the time required
2264  * for the device to be ready for operation.
2265  */
2266  val64 = readq(&bar0->mc_rldram_mrs);
2268  SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2269  val64 = readq(&bar0->mc_rldram_mrs);
2270 
2271  msleep(100); /* Delay by around 100 ms. */
2272 
2273  /* Enabling ECC Protection. */
2274  val64 = readq(&bar0->adapter_control);
2275  val64 &= ~ADAPTER_ECC_EN;
2276  writeq(val64, &bar0->adapter_control);
2277 
2278  /*
2279  * Verify if the device is ready to be enabled, if so enable
2280  * it.
2281  */
2282  val64 = readq(&bar0->adapter_status);
2283  if (!verify_xena_quiescence(nic)) {
2284  DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2285  "Adapter status reads: 0x%llx\n",
2286  dev->name, (unsigned long long)val64);
2287  return FAILURE;
2288  }
2289 
2290  /*
2291  * With some switches, link might be already up at this point.
2292  * Because of this weird behavior, when we enable laser,
2293  * we may not get link. We need to handle this. We cannot
2294  * figure out which switch is misbehaving. So we are forced to
2295  * make a global change.
2296  */
2297 
2298  /* Enabling Laser. */
2299  val64 = readq(&bar0->adapter_control);
2300  val64 |= ADAPTER_EOI_TX_ON;
2301  writeq(val64, &bar0->adapter_control);
2302 
2303  if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2304  /*
2305  * Dont see link state interrupts initially on some switches,
2306  * so directly scheduling the link state task here.
2307  */
2309  }
2310  /* SXE-002: Initialize link and activity LED */
2311  subid = nic->pdev->subsystem_device;
2312  if (((subid & 0xFF) >= 0x07) &&
2313  (nic->device_type == XFRAME_I_DEVICE)) {
2314  val64 = readq(&bar0->gpio_control);
2315  val64 |= 0x0000800000000000ULL;
2316  writeq(val64, &bar0->gpio_control);
2317  val64 = 0x0411040400000000ULL;
2318  writeq(val64, (void __iomem *)bar0 + 0x2700);
2319  }
2320 
2321  return SUCCESS;
2322 }
2326 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2327  struct TxD *txdlp, int get_off)
2328 {
2329  struct s2io_nic *nic = fifo_data->nic;
2330  struct sk_buff *skb;
2331  struct TxD *txds;
2332  u16 j, frg_cnt;
2333 
2334  txds = txdlp;
2335  if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2336  pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2337  sizeof(u64), PCI_DMA_TODEVICE);
2338  txds++;
2339  }
2340 
2341  skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2342  if (!skb) {
2343  memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2344  return NULL;
2345  }
2346  pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2347  skb_headlen(skb), PCI_DMA_TODEVICE);
2348  frg_cnt = skb_shinfo(skb)->nr_frags;
2349  if (frg_cnt) {
2350  txds++;
2351  for (j = 0; j < frg_cnt; j++, txds++) {
2352  const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2353  if (!txds->Buffer_Pointer)
2354  break;
2355  pci_unmap_page(nic->pdev,
2356  (dma_addr_t)txds->Buffer_Pointer,
2357  skb_frag_size(frag), PCI_DMA_TODEVICE);
2358  }
2359  }
2360  memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2361  return skb;
2362 }
2363 
2372 static void free_tx_buffers(struct s2io_nic *nic)
2373 {
2374  struct net_device *dev = nic->dev;
2375  struct sk_buff *skb;
2376  struct TxD *txdp;
2377  int i, j;
2378  int cnt = 0;
2379  struct config_param *config = &nic->config;
2380  struct mac_info *mac_control = &nic->mac_control;
2381  struct stat_block *stats = mac_control->stats_info;
2382  struct swStat *swstats = &stats->sw_stat;
2383 
2384  for (i = 0; i < config->tx_fifo_num; i++) {
2385  struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2386  struct fifo_info *fifo = &mac_control->fifos[i];
2387  unsigned long flags;
2388 
2389  spin_lock_irqsave(&fifo->tx_lock, flags);
2390  for (j = 0; j < tx_cfg->fifo_len; j++) {
2391  txdp = fifo->list_info[j].list_virt_addr;
2392  skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2393  if (skb) {
2394  swstats->mem_freed += skb->truesize;
2395  dev_kfree_skb(skb);
2396  cnt++;
2397  }
2398  }
2400  "%s: forcibly freeing %d skbs on FIFO%d\n",
2401  dev->name, cnt, i);
2402  fifo->tx_curr_get_info.offset = 0;
2403  fifo->tx_curr_put_info.offset = 0;
2404  spin_unlock_irqrestore(&fifo->tx_lock, flags);
2405  }
2406 }
2407 
2418 static void stop_nic(struct s2io_nic *nic)
2419 {
2420  struct XENA_dev_config __iomem *bar0 = nic->bar0;
2421  register u64 val64 = 0;
2422  u16 interruptible;
2423 
2424  /* Disable all interrupts */
2425  en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2426  interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2427  interruptible |= TX_PIC_INTR;
2428  en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2429 
2430  /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2431  val64 = readq(&bar0->adapter_control);
2432  val64 &= ~(ADAPTER_CNTL_EN);
2433  writeq(val64, &bar0->adapter_control);
2434 }
2435 
2458 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2459  int from_card_up)
2460 {
2461  struct sk_buff *skb;
2462  struct RxD_t *rxdp;
2463  int off, size, block_no, block_no1;
2464  u32 alloc_tab = 0;
2465  u32 alloc_cnt;
2466  u64 tmp;
2467  struct buffAdd *ba;
2468  struct RxD_t *first_rxdp = NULL;
2469  u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2470  int rxd_index = 0;
2471  struct RxD1 *rxdp1;
2472  struct RxD3 *rxdp3;
2473  struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2474 
2475  alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2476 
2477  block_no1 = ring->rx_curr_get_info.block_index;
2478  while (alloc_tab < alloc_cnt) {
2479  block_no = ring->rx_curr_put_info.block_index;
2480 
2481  off = ring->rx_curr_put_info.offset;
2482 
2483  rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2484 
2485  rxd_index = off + 1;
2486  if (block_no)
2487  rxd_index += (block_no * ring->rxd_count);
2488 
2489  if ((block_no == block_no1) &&
2490  (off == ring->rx_curr_get_info.offset) &&
2491  (rxdp->Host_Control)) {
2492  DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2493  ring->dev->name);
2494  goto end;
2495  }
2496  if (off && (off == ring->rxd_count)) {
2497  ring->rx_curr_put_info.block_index++;
2498  if (ring->rx_curr_put_info.block_index ==
2499  ring->block_count)
2500  ring->rx_curr_put_info.block_index = 0;
2501  block_no = ring->rx_curr_put_info.block_index;
2502  off = 0;
2503  ring->rx_curr_put_info.offset = off;
2504  rxdp = ring->rx_blocks[block_no].block_virt_addr;
2505  DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2506  ring->dev->name, rxdp);
2507 
2508  }
2509 
2510  if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2511  ((ring->rxd_mode == RXD_MODE_3B) &&
2512  (rxdp->Control_2 & s2BIT(0)))) {
2513  ring->rx_curr_put_info.offset = off;
2514  goto end;
2515  }
2516  /* calculate size of skb based on ring mode */
2517  size = ring->mtu +
2520  if (ring->rxd_mode == RXD_MODE_1)
2521  size += NET_IP_ALIGN;
2522  else
2523  size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2524 
2525  /* allocate skb */
2526  skb = netdev_alloc_skb(nic->dev, size);
2527  if (!skb) {
2528  DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2529  ring->dev->name);
2530  if (first_rxdp) {
2531  wmb();
2532  first_rxdp->Control_1 |= RXD_OWN_XENA;
2533  }
2534  swstats->mem_alloc_fail_cnt++;
2535 
2536  return -ENOMEM ;
2537  }
2538  swstats->mem_allocated += skb->truesize;
2539 
2540  if (ring->rxd_mode == RXD_MODE_1) {
2541  /* 1 buffer mode - normal operation mode */
2542  rxdp1 = (struct RxD1 *)rxdp;
2543  memset(rxdp, 0, sizeof(struct RxD1));
2544  skb_reserve(skb, NET_IP_ALIGN);
2545  rxdp1->Buffer0_ptr =
2546  pci_map_single(ring->pdev, skb->data,
2547  size - NET_IP_ALIGN,
2549  if (pci_dma_mapping_error(nic->pdev,
2550  rxdp1->Buffer0_ptr))
2551  goto pci_map_failed;
2552 
2553  rxdp->Control_2 =
2555  rxdp->Host_Control = (unsigned long)skb;
2556  } else if (ring->rxd_mode == RXD_MODE_3B) {
2557  /*
2558  * 2 buffer mode -
2559  * 2 buffer mode provides 128
2560  * byte aligned receive buffers.
2561  */
2562 
2563  rxdp3 = (struct RxD3 *)rxdp;
2564  /* save buffer pointers to avoid frequent dma mapping */
2565  Buffer0_ptr = rxdp3->Buffer0_ptr;
2566  Buffer1_ptr = rxdp3->Buffer1_ptr;
2567  memset(rxdp, 0, sizeof(struct RxD3));
2568  /* restore the buffer pointers for dma sync*/
2569  rxdp3->Buffer0_ptr = Buffer0_ptr;
2570  rxdp3->Buffer1_ptr = Buffer1_ptr;
2571 
2572  ba = &ring->ba[block_no][off];
2573  skb_reserve(skb, BUF0_LEN);
2574  tmp = (u64)(unsigned long)skb->data;
2575  tmp += ALIGN_SIZE;
2576  tmp &= ~ALIGN_SIZE;
2577  skb->data = (void *) (unsigned long)tmp;
2578  skb_reset_tail_pointer(skb);
2579 
2580  if (from_card_up) {
2581  rxdp3->Buffer0_ptr =
2582  pci_map_single(ring->pdev, ba->ba_0,
2583  BUF0_LEN,
2585  if (pci_dma_mapping_error(nic->pdev,
2586  rxdp3->Buffer0_ptr))
2587  goto pci_map_failed;
2588  } else
2589  pci_dma_sync_single_for_device(ring->pdev,
2590  (dma_addr_t)rxdp3->Buffer0_ptr,
2591  BUF0_LEN,
2593 
2594  rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2595  if (ring->rxd_mode == RXD_MODE_3B) {
2596  /* Two buffer mode */
2597 
2598  /*
2599  * Buffer2 will have L3/L4 header plus
2600  * L4 payload
2601  */
2602  rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2603  skb->data,
2604  ring->mtu + 4,
2606 
2607  if (pci_dma_mapping_error(nic->pdev,
2608  rxdp3->Buffer2_ptr))
2609  goto pci_map_failed;
2610 
2611  if (from_card_up) {
2612  rxdp3->Buffer1_ptr =
2613  pci_map_single(ring->pdev,
2614  ba->ba_1,
2615  BUF1_LEN,
2617 
2618  if (pci_dma_mapping_error(nic->pdev,
2619  rxdp3->Buffer1_ptr)) {
2620  pci_unmap_single(ring->pdev,
2621  (dma_addr_t)(unsigned long)
2622  skb->data,
2623  ring->mtu + 4,
2625  goto pci_map_failed;
2626  }
2627  }
2628  rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2629  rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2630  (ring->mtu + 4);
2631  }
2632  rxdp->Control_2 |= s2BIT(0);
2633  rxdp->Host_Control = (unsigned long) (skb);
2634  }
2635  if (alloc_tab & ((1 << rxsync_frequency) - 1))
2636  rxdp->Control_1 |= RXD_OWN_XENA;
2637  off++;
2638  if (off == (ring->rxd_count + 1))
2639  off = 0;
2640  ring->rx_curr_put_info.offset = off;
2641 
2642  rxdp->Control_2 |= SET_RXD_MARKER;
2643  if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2644  if (first_rxdp) {
2645  wmb();
2646  first_rxdp->Control_1 |= RXD_OWN_XENA;
2647  }
2648  first_rxdp = rxdp;
2649  }
2650  ring->rx_bufs_left += 1;
2651  alloc_tab++;
2652  }
2653 
2654 end:
2655  /* Transfer ownership of first descriptor to adapter just before
2656  * exiting. Before that, use memory barrier so that ownership
2657  * and other fields are seen by adapter correctly.
2658  */
2659  if (first_rxdp) {
2660  wmb();
2661  first_rxdp->Control_1 |= RXD_OWN_XENA;
2662  }
2663 
2664  return SUCCESS;
2665 
2666 pci_map_failed:
2667  swstats->pci_map_fail_cnt++;
2668  swstats->mem_freed += skb->truesize;
2669  dev_kfree_skb_irq(skb);
2670  return -ENOMEM;
2671 }
2672 
2673 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2674 {
2675  struct net_device *dev = sp->dev;
2676  int j;
2677  struct sk_buff *skb;
2678  struct RxD_t *rxdp;
2679  struct RxD1 *rxdp1;
2680  struct RxD3 *rxdp3;
2681  struct mac_info *mac_control = &sp->mac_control;
2682  struct stat_block *stats = mac_control->stats_info;
2683  struct swStat *swstats = &stats->sw_stat;
2684 
2685  for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2686  rxdp = mac_control->rings[ring_no].
2687  rx_blocks[blk].rxds[j].virt_addr;
2688  skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2689  if (!skb)
2690  continue;
2691  if (sp->rxd_mode == RXD_MODE_1) {
2692  rxdp1 = (struct RxD1 *)rxdp;
2693  pci_unmap_single(sp->pdev,
2694  (dma_addr_t)rxdp1->Buffer0_ptr,
2695  dev->mtu +
2697  HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2699  memset(rxdp, 0, sizeof(struct RxD1));
2700  } else if (sp->rxd_mode == RXD_MODE_3B) {
2701  rxdp3 = (struct RxD3 *)rxdp;
2702  pci_unmap_single(sp->pdev,
2703  (dma_addr_t)rxdp3->Buffer0_ptr,
2704  BUF0_LEN,
2706  pci_unmap_single(sp->pdev,
2707  (dma_addr_t)rxdp3->Buffer1_ptr,
2708  BUF1_LEN,
2710  pci_unmap_single(sp->pdev,
2711  (dma_addr_t)rxdp3->Buffer2_ptr,
2712  dev->mtu + 4,
2714  memset(rxdp, 0, sizeof(struct RxD3));
2715  }
2716  swstats->mem_freed += skb->truesize;
2717  dev_kfree_skb(skb);
2718  mac_control->rings[ring_no].rx_bufs_left -= 1;
2719  }
2720 }
2721 
2731 static void free_rx_buffers(struct s2io_nic *sp)
2732 {
2733  struct net_device *dev = sp->dev;
2734  int i, blk = 0, buf_cnt = 0;
2735  struct config_param *config = &sp->config;
2736  struct mac_info *mac_control = &sp->mac_control;
2737 
2738  for (i = 0; i < config->rx_ring_num; i++) {
2739  struct ring_info *ring = &mac_control->rings[i];
2740 
2741  for (blk = 0; blk < rx_ring_sz[i]; blk++)
2742  free_rxd_blk(sp, i, blk);
2743 
2744  ring->rx_curr_put_info.block_index = 0;
2745  ring->rx_curr_get_info.block_index = 0;
2746  ring->rx_curr_put_info.offset = 0;
2747  ring->rx_curr_get_info.offset = 0;
2748  ring->rx_bufs_left = 0;
2749  DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2750  dev->name, buf_cnt, i);
2751  }
2752 }
2753 
2754 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2755 {
2756  if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2757  DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2758  ring->dev->name);
2759  }
2760  return 0;
2761 }
2762 
2776 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2777 {
2778  struct ring_info *ring = container_of(napi, struct ring_info, napi);
2779  struct net_device *dev = ring->dev;
2780  int pkts_processed = 0;
2781  u8 __iomem *addr = NULL;
2782  u8 val8 = 0;
2783  struct s2io_nic *nic = netdev_priv(dev);
2784  struct XENA_dev_config __iomem *bar0 = nic->bar0;
2785  int budget_org = budget;
2786 
2787  if (unlikely(!is_s2io_card_up(nic)))
2788  return 0;
2789 
2790  pkts_processed = rx_intr_handler(ring, budget);
2791  s2io_chk_rx_buffers(nic, ring);
2792 
2793  if (pkts_processed < budget_org) {
2794  napi_complete(napi);
2795  /*Re Enable MSI-Rx Vector*/
2796  addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2797  addr += 7 - ring->ring_no;
2798  val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2799  writeb(val8, addr);
2800  val8 = readb(addr);
2801  }
2802  return pkts_processed;
2803 }
2804 
2805 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2806 {
2807  struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2808  int pkts_processed = 0;
2809  int ring_pkts_processed, i;
2810  struct XENA_dev_config __iomem *bar0 = nic->bar0;
2811  int budget_org = budget;
2812  struct config_param *config = &nic->config;
2813  struct mac_info *mac_control = &nic->mac_control;
2814 
2815  if (unlikely(!is_s2io_card_up(nic)))
2816  return 0;
2817 
2818  for (i = 0; i < config->rx_ring_num; i++) {
2819  struct ring_info *ring = &mac_control->rings[i];
2820  ring_pkts_processed = rx_intr_handler(ring, budget);
2821  s2io_chk_rx_buffers(nic, ring);
2822  pkts_processed += ring_pkts_processed;
2823  budget -= ring_pkts_processed;
2824  if (budget <= 0)
2825  break;
2826  }
2827  if (pkts_processed < budget_org) {
2828  napi_complete(napi);
2829  /* Re enable the Rx interrupts for the ring */
2830  writeq(0, &bar0->rx_traffic_mask);
2831  readl(&bar0->rx_traffic_mask);
2832  }
2833  return pkts_processed;
2834 }
2835 
2836 #ifdef CONFIG_NET_POLL_CONTROLLER
2837 
2846 static void s2io_netpoll(struct net_device *dev)
2847 {
2848  struct s2io_nic *nic = netdev_priv(dev);
2849  const int irq = nic->pdev->irq;
2850  struct XENA_dev_config __iomem *bar0 = nic->bar0;
2851  u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2852  int i;
2853  struct config_param *config = &nic->config;
2854  struct mac_info *mac_control = &nic->mac_control;
2855 
2856  if (pci_channel_offline(nic->pdev))
2857  return;
2858 
2859  disable_irq(irq);
2860 
2861  writeq(val64, &bar0->rx_traffic_int);
2862  writeq(val64, &bar0->tx_traffic_int);
2863 
2864  /* we need to free up the transmitted skbufs or else netpoll will
2865  * run out of skbs and will fail and eventually netpoll application such
2866  * as netdump will fail.
2867  */
2868  for (i = 0; i < config->tx_fifo_num; i++)
2869  tx_intr_handler(&mac_control->fifos[i]);
2870 
2871  /* check for received packet and indicate up to network */
2872  for (i = 0; i < config->rx_ring_num; i++) {
2873  struct ring_info *ring = &mac_control->rings[i];
2874 
2875  rx_intr_handler(ring, 0);
2876  }
2877 
2878  for (i = 0; i < config->rx_ring_num; i++) {
2879  struct ring_info *ring = &mac_control->rings[i];
2880 
2881  if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2883  "%s: Out of memory in Rx Netpoll!!\n",
2884  dev->name);
2885  break;
2886  }
2887  }
2888  enable_irq(irq);
2889 }
2890 #endif
2891 
2905 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2906 {
2907  int get_block, put_block;
2908  struct rx_curr_get_info get_info, put_info;
2909  struct RxD_t *rxdp;
2910  struct sk_buff *skb;
2911  int pkt_cnt = 0, napi_pkts = 0;
2912  int i;
2913  struct RxD1 *rxdp1;
2914  struct RxD3 *rxdp3;
2915 
2916  get_info = ring_data->rx_curr_get_info;
2917  get_block = get_info.block_index;
2918  memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2919  put_block = put_info.block_index;
2920  rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2921 
2922  while (RXD_IS_UP2DT(rxdp)) {
2923  /*
2924  * If your are next to put index then it's
2925  * FIFO full condition
2926  */
2927  if ((get_block == put_block) &&
2928  (get_info.offset + 1) == put_info.offset) {
2929  DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2930  ring_data->dev->name);
2931  break;
2932  }
2933  skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2934  if (skb == NULL) {
2935  DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2936  ring_data->dev->name);
2937  return 0;
2938  }
2939  if (ring_data->rxd_mode == RXD_MODE_1) {
2940  rxdp1 = (struct RxD1 *)rxdp;
2941  pci_unmap_single(ring_data->pdev, (dma_addr_t)
2942  rxdp1->Buffer0_ptr,
2943  ring_data->mtu +
2946  HEADER_SNAP_SIZE,
2948  } else if (ring_data->rxd_mode == RXD_MODE_3B) {
2949  rxdp3 = (struct RxD3 *)rxdp;
2950  pci_dma_sync_single_for_cpu(ring_data->pdev,
2951  (dma_addr_t)rxdp3->Buffer0_ptr,
2952  BUF0_LEN,
2954  pci_unmap_single(ring_data->pdev,
2955  (dma_addr_t)rxdp3->Buffer2_ptr,
2956  ring_data->mtu + 4,
2958  }
2959  prefetch(skb->data);
2960  rx_osm_handler(ring_data, rxdp);
2961  get_info.offset++;
2962  ring_data->rx_curr_get_info.offset = get_info.offset;
2963  rxdp = ring_data->rx_blocks[get_block].
2964  rxds[get_info.offset].virt_addr;
2965  if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2966  get_info.offset = 0;
2967  ring_data->rx_curr_get_info.offset = get_info.offset;
2968  get_block++;
2969  if (get_block == ring_data->block_count)
2970  get_block = 0;
2971  ring_data->rx_curr_get_info.block_index = get_block;
2972  rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2973  }
2974 
2975  if (ring_data->nic->config.napi) {
2976  budget--;
2977  napi_pkts++;
2978  if (!budget)
2979  break;
2980  }
2981  pkt_cnt++;
2982  if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2983  break;
2984  }
2985  if (ring_data->lro) {
2986  /* Clear all LRO sessions before exiting */
2987  for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2988  struct lro *lro = &ring_data->lro0_n[i];
2989  if (lro->in_use) {
2990  update_L3L4_header(ring_data->nic, lro);
2991  queue_rx_frame(lro->parent, lro->vlan_tag);
2992  clear_lro_session(lro);
2993  }
2994  }
2995  }
2996  return napi_pkts;
2997 }
2998 
3011 static void tx_intr_handler(struct fifo_info *fifo_data)
3012 {
3013  struct s2io_nic *nic = fifo_data->nic;
3014  struct tx_curr_get_info get_info, put_info;
3015  struct sk_buff *skb = NULL;
3016  struct TxD *txdlp;
3017  int pkt_cnt = 0;
3018  unsigned long flags = 0;
3019  u8 err_mask;
3020  struct stat_block *stats = nic->mac_control.stats_info;
3021  struct swStat *swstats = &stats->sw_stat;
3022 
3023  if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3024  return;
3025 
3026  get_info = fifo_data->tx_curr_get_info;
3027  memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3028  txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3029  while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3030  (get_info.offset != put_info.offset) &&
3031  (txdlp->Host_Control)) {
3032  /* Check for TxD errors */
3033  if (txdlp->Control_1 & TXD_T_CODE) {
3034  unsigned long long err;
3035  err = txdlp->Control_1 & TXD_T_CODE;
3036  if (err & 0x1) {
3037  swstats->parity_err_cnt++;
3038  }
3039 
3040  /* update t_code statistics */
3041  err_mask = err >> 48;
3042  switch (err_mask) {
3043  case 2:
3044  swstats->tx_buf_abort_cnt++;
3045  break;
3046 
3047  case 3:
3048  swstats->tx_desc_abort_cnt++;
3049  break;
3050 
3051  case 7:
3052  swstats->tx_parity_err_cnt++;
3053  break;
3054 
3055  case 10:
3056  swstats->tx_link_loss_cnt++;
3057  break;
3058 
3059  case 15:
3060  swstats->tx_list_proc_err_cnt++;
3061  break;
3062  }
3063  }
3064 
3065  skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3066  if (skb == NULL) {
3067  spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3068  DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3069  __func__);
3070  return;
3071  }
3072  pkt_cnt++;
3073 
3074  /* Updating the statistics block */
3075  swstats->mem_freed += skb->truesize;
3076  dev_kfree_skb_irq(skb);
3077 
3078  get_info.offset++;
3079  if (get_info.offset == get_info.fifo_len + 1)
3080  get_info.offset = 0;
3081  txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3082  fifo_data->tx_curr_get_info.offset = get_info.offset;
3083  }
3084 
3085  s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3086 
3087  spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3088 }
3089 
3100 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3101  struct net_device *dev)
3102 {
3103  u64 val64;
3104  struct s2io_nic *sp = netdev_priv(dev);
3105  struct XENA_dev_config __iomem *bar0 = sp->bar0;
3106 
3107  /* address transaction */
3108  val64 = MDIO_MMD_INDX_ADDR(addr) |
3109  MDIO_MMD_DEV_ADDR(mmd_type) |
3110  MDIO_MMS_PRT_ADDR(0x0);
3111  writeq(val64, &bar0->mdio_control);
3112  val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3113  writeq(val64, &bar0->mdio_control);
3114  udelay(100);
3115 
3116  /* Data transaction */
3117  val64 = MDIO_MMD_INDX_ADDR(addr) |
3118  MDIO_MMD_DEV_ADDR(mmd_type) |
3119  MDIO_MMS_PRT_ADDR(0x0) |
3120  MDIO_MDIO_DATA(value) |
3122  writeq(val64, &bar0->mdio_control);
3123  val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3124  writeq(val64, &bar0->mdio_control);
3125  udelay(100);
3126 
3127  val64 = MDIO_MMD_INDX_ADDR(addr) |
3128  MDIO_MMD_DEV_ADDR(mmd_type) |
3129  MDIO_MMS_PRT_ADDR(0x0) |
3131  writeq(val64, &bar0->mdio_control);
3132  val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3133  writeq(val64, &bar0->mdio_control);
3134  udelay(100);
3135 }
3136 
3146 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3147 {
3148  u64 val64 = 0x0;
3149  u64 rval64 = 0x0;
3150  struct s2io_nic *sp = netdev_priv(dev);
3151  struct XENA_dev_config __iomem *bar0 = sp->bar0;
3152 
3153  /* address transaction */
3154  val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3155  | MDIO_MMD_DEV_ADDR(mmd_type)
3156  | MDIO_MMS_PRT_ADDR(0x0));
3157  writeq(val64, &bar0->mdio_control);
3158  val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3159  writeq(val64, &bar0->mdio_control);
3160  udelay(100);
3161 
3162  /* Data transaction */
3163  val64 = MDIO_MMD_INDX_ADDR(addr) |
3164  MDIO_MMD_DEV_ADDR(mmd_type) |
3165  MDIO_MMS_PRT_ADDR(0x0) |
3167  writeq(val64, &bar0->mdio_control);
3168  val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3169  writeq(val64, &bar0->mdio_control);
3170  udelay(100);
3171 
3172  /* Read the value from regs */
3173  rval64 = readq(&bar0->mdio_control);
3174  rval64 = rval64 & 0xFFFF0000;
3175  rval64 = rval64 >> 16;
3176  return rval64;
3177 }
3178 
3189 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3190  u16 flag, u16 type)
3191 {
3192  u64 mask = 0x3;
3193  u64 val64;
3194  int i;
3195  for (i = 0; i < index; i++)
3196  mask = mask << 0x2;
3197 
3198  if (flag > 0) {
3199  *counter = *counter + 1;
3200  val64 = *regs_stat & mask;
3201  val64 = val64 >> (index * 0x2);
3202  val64 = val64 + 1;
3203  if (val64 == 3) {
3204  switch (type) {
3205  case 1:
3207  "Take Xframe NIC out of service.\n");
3209 "Excessive temperatures may result in premature transceiver failure.\n");
3210  break;
3211  case 2:
3213  "Take Xframe NIC out of service.\n");
3215 "Excessive bias currents may indicate imminent laser diode failure.\n");
3216  break;
3217  case 3:
3219  "Take Xframe NIC out of service.\n");
3221 "Excessive laser output power may saturate far-end receiver.\n");
3222  break;
3223  default:
3225  "Incorrect XPAK Alarm type\n");
3226  }
3227  val64 = 0x0;
3228  }
3229  val64 = val64 << (index * 0x2);
3230  *regs_stat = (*regs_stat & (~mask)) | (val64);
3231 
3232  } else {
3233  *regs_stat = *regs_stat & (~mask);
3234  }
3235 }
3236 
3244 static void s2io_updt_xpak_counter(struct net_device *dev)
3245 {
3246  u16 flag = 0x0;
3247  u16 type = 0x0;
3248  u16 val16 = 0x0;
3249  u64 val64 = 0x0;
3250  u64 addr = 0x0;
3251 
3252  struct s2io_nic *sp = netdev_priv(dev);
3253  struct stat_block *stats = sp->mac_control.stats_info;
3254  struct xpakStat *xstats = &stats->xpak_stat;
3255 
3256  /* Check the communication with the MDIO slave */
3257  addr = MDIO_CTRL1;
3258  val64 = 0x0;
3259  val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3260  if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3262  "ERR: MDIO slave access failed - Returned %llx\n",
3263  (unsigned long long)val64);
3264  return;
3265  }
3266 
3267  /* Check for the expected value of control reg 1 */
3268  if (val64 != MDIO_CTRL1_SPEED10G) {
3269  DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3270  "Returned: %llx- Expected: 0x%x\n",
3271  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3272  return;
3273  }
3274 
3275  /* Loading the DOM register to MDIO register */
3276  addr = 0xA100;
3277  s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3278  val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3279 
3280  /* Reading the Alarm flags */
3281  addr = 0xA070;
3282  val64 = 0x0;
3283  val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3284 
3285  flag = CHECKBIT(val64, 0x7);
3286  type = 1;
3287  s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3288  &xstats->xpak_regs_stat,
3289  0x0, flag, type);
3290 
3291  if (CHECKBIT(val64, 0x6))
3292  xstats->alarm_transceiver_temp_low++;
3293 
3294  flag = CHECKBIT(val64, 0x3);
3295  type = 2;
3296  s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3297  &xstats->xpak_regs_stat,
3298  0x2, flag, type);
3299 
3300  if (CHECKBIT(val64, 0x2))
3301  xstats->alarm_laser_bias_current_low++;
3302 
3303  flag = CHECKBIT(val64, 0x1);
3304  type = 3;
3305  s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3306  &xstats->xpak_regs_stat,
3307  0x4, flag, type);
3308 
3309  if (CHECKBIT(val64, 0x0))
3310  xstats->alarm_laser_output_power_low++;
3311 
3312  /* Reading the Warning flags */
3313  addr = 0xA074;
3314  val64 = 0x0;
3315  val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3316 
3317  if (CHECKBIT(val64, 0x7))
3318  xstats->warn_transceiver_temp_high++;
3319 
3320  if (CHECKBIT(val64, 0x6))
3321  xstats->warn_transceiver_temp_low++;
3322 
3323  if (CHECKBIT(val64, 0x3))
3324  xstats->warn_laser_bias_current_high++;
3325 
3326  if (CHECKBIT(val64, 0x2))
3327  xstats->warn_laser_bias_current_low++;
3328 
3329  if (CHECKBIT(val64, 0x1))
3330  xstats->warn_laser_output_power_high++;
3331 
3332  if (CHECKBIT(val64, 0x0))
3333  xstats->warn_laser_output_power_low++;
3334 }
3335 
3347 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3348  int bit_state)
3349 {
3350  int ret = FAILURE, cnt = 0, delay = 1;
3351  u64 val64;
3352 
3353  if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3354  return FAILURE;
3355 
3356  do {
3357  val64 = readq(addr);
3358  if (bit_state == S2IO_BIT_RESET) {
3359  if (!(val64 & busy_bit)) {
3360  ret = SUCCESS;
3361  break;
3362  }
3363  } else {
3364  if (val64 & busy_bit) {
3365  ret = SUCCESS;
3366  break;
3367  }
3368  }
3369 
3370  if (in_interrupt())
3371  mdelay(delay);
3372  else
3373  msleep(delay);
3374 
3375  if (++cnt >= 10)
3376  delay = 50;
3377  } while (cnt < 20);
3378  return ret;
3379 }
3386 static u16 check_pci_device_id(u16 id)
3387 {
3388  switch (id) {
3391  return XFRAME_II_DEVICE;
3394  return XFRAME_I_DEVICE;
3395  default:
3396  return PCI_ANY_ID;
3397  }
3398 }
3399 
3410 static void s2io_reset(struct s2io_nic *sp)
3411 {
3412  struct XENA_dev_config __iomem *bar0 = sp->bar0;
3413  u64 val64;
3414  u16 subid, pci_cmd;
3415  int i;
3416  u16 val16;
3417  unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3418  unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3419  struct stat_block *stats;
3420  struct swStat *swstats;
3421 
3422  DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3423  __func__, pci_name(sp->pdev));
3424 
3425  /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3426  pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3427 
3428  val64 = SW_RESET_ALL;
3429  writeq(val64, &bar0->sw_reset);
3430  if (strstr(sp->product_name, "CX4"))
3431  msleep(750);
3432  msleep(250);
3433  for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3434 
3435  /* Restore the PCI state saved during initialization. */
3436  pci_restore_state(sp->pdev);
3437  pci_save_state(sp->pdev);
3438  pci_read_config_word(sp->pdev, 0x2, &val16);
3439  if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3440  break;
3441  msleep(200);
3442  }
3443 
3444  if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3445  DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3446 
3447  pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3448 
3449  s2io_init_pci(sp);
3450 
3451  /* Set swapper to enable I/O register access */
3452  s2io_set_swapper(sp);
3453 
3454  /* restore mac_addr entries */
3455  do_s2io_restore_unicast_mc(sp);
3456 
3457  /* Restore the MSIX table entries from local variables */
3458  restore_xmsi_data(sp);
3459 
3460  /* Clear certain PCI/PCI-X fields after reset */
3461  if (sp->device_type == XFRAME_II_DEVICE) {
3462  /* Clear "detected parity error" bit */
3463  pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3464 
3465  /* Clearing PCIX Ecc status register */
3466  pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3467 
3468  /* Clearing PCI_STATUS error reflected here */
3469  writeq(s2BIT(62), &bar0->txpic_int_reg);
3470  }
3471 
3472  /* Reset device statistics maintained by OS */
3473  memset(&sp->stats, 0, sizeof(struct net_device_stats));
3474 
3475  stats = sp->mac_control.stats_info;
3476  swstats = &stats->sw_stat;
3477 
3478  /* save link up/down time/cnt, reset/memory/watchdog cnt */
3479  up_cnt = swstats->link_up_cnt;
3480  down_cnt = swstats->link_down_cnt;
3481  up_time = swstats->link_up_time;
3482  down_time = swstats->link_down_time;
3483  reset_cnt = swstats->soft_reset_cnt;
3484  mem_alloc_cnt = swstats->mem_allocated;
3485  mem_free_cnt = swstats->mem_freed;
3486  watchdog_cnt = swstats->watchdog_timer_cnt;
3487 
3488  memset(stats, 0, sizeof(struct stat_block));
3489 
3490  /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3491  swstats->link_up_cnt = up_cnt;
3492  swstats->link_down_cnt = down_cnt;
3493  swstats->link_up_time = up_time;
3494  swstats->link_down_time = down_time;
3495  swstats->soft_reset_cnt = reset_cnt;
3496  swstats->mem_allocated = mem_alloc_cnt;
3497  swstats->mem_freed = mem_free_cnt;
3498  swstats->watchdog_timer_cnt = watchdog_cnt;
3499 
3500  /* SXE-002: Configure link and activity LED to turn it off */
3501  subid = sp->pdev->subsystem_device;
3502  if (((subid & 0xFF) >= 0x07) &&
3503  (sp->device_type == XFRAME_I_DEVICE)) {
3504  val64 = readq(&bar0->gpio_control);
3505  val64 |= 0x0000800000000000ULL;
3506  writeq(val64, &bar0->gpio_control);
3507  val64 = 0x0411040400000000ULL;
3508  writeq(val64, (void __iomem *)bar0 + 0x2700);
3509  }
3510 
3511  /*
3512  * Clear spurious ECC interrupts that would have occurred on
3513  * XFRAME II cards after reset.
3514  */
3515  if (sp->device_type == XFRAME_II_DEVICE) {
3516  val64 = readq(&bar0->pcc_err_reg);
3517  writeq(val64, &bar0->pcc_err_reg);
3518  }
3519 
3520  sp->device_enabled_once = false;
3521 }
3522 
3533 static int s2io_set_swapper(struct s2io_nic *sp)
3534 {
3535  struct net_device *dev = sp->dev;
3536  struct XENA_dev_config __iomem *bar0 = sp->bar0;
3537  u64 val64, valt, valr;
3538 
3539  /*
3540  * Set proper endian settings and verify the same by reading
3541  * the PIF Feed-back register.
3542  */
3543 
3544  val64 = readq(&bar0->pif_rd_swapper_fb);
3545  if (val64 != 0x0123456789ABCDEFULL) {
3546  int i = 0;
3547  static const u64 value[] = {
3548  0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3549  0x8100008181000081ULL, /* FE=1, SE=0 */
3550  0x4200004242000042ULL, /* FE=0, SE=1 */
3551  0 /* FE=0, SE=0 */
3552  };
3553 
3554  while (i < 4) {
3555  writeq(value[i], &bar0->swapper_ctrl);
3556  val64 = readq(&bar0->pif_rd_swapper_fb);
3557  if (val64 == 0x0123456789ABCDEFULL)
3558  break;
3559  i++;
3560  }
3561  if (i == 4) {
3562  DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3563  "feedback read %llx\n",
3564  dev->name, (unsigned long long)val64);
3565  return FAILURE;
3566  }
3567  valr = value[i];
3568  } else {
3569  valr = readq(&bar0->swapper_ctrl);
3570  }
3571 
3572  valt = 0x0123456789ABCDEFULL;
3573  writeq(valt, &bar0->xmsi_address);
3574  val64 = readq(&bar0->xmsi_address);
3575 
3576  if (val64 != valt) {
3577  int i = 0;
3578  static const u64 value[] = {
3579  0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3580  0x0081810000818100ULL, /* FE=1, SE=0 */
3581  0x0042420000424200ULL, /* FE=0, SE=1 */
3582  0 /* FE=0, SE=0 */
3583  };
3584 
3585  while (i < 4) {
3586  writeq((value[i] | valr), &bar0->swapper_ctrl);
3587  writeq(valt, &bar0->xmsi_address);
3588  val64 = readq(&bar0->xmsi_address);
3589  if (val64 == valt)
3590  break;
3591  i++;
3592  }
3593  if (i == 4) {
3594  unsigned long long x = val64;
3596  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3597  return FAILURE;
3598  }
3599  }
3600  val64 = readq(&bar0->swapper_ctrl);
3601  val64 &= 0xFFFF000000000000ULL;
3602 
3603 #ifdef __BIG_ENDIAN
3604  /*
3605  * The device by default set to a big endian format, so a
3606  * big endian driver need not set anything.
3607  */
3608  val64 |= (SWAPPER_CTRL_TXP_FE |
3619  if (sp->config.intr_type == INTA)
3620  val64 |= SWAPPER_CTRL_XMSI_SE;
3621  writeq(val64, &bar0->swapper_ctrl);
3622 #else
3623  /*
3624  * Initially we enable all bits to make it accessible by the
3625  * driver, then we selectively enable only those bits that
3626  * we want to set.
3627  */
3628  val64 |= (SWAPPER_CTRL_TXP_FE |
3643  if (sp->config.intr_type == INTA)
3644  val64 |= SWAPPER_CTRL_XMSI_SE;
3645  writeq(val64, &bar0->swapper_ctrl);
3646 #endif
3647  val64 = readq(&bar0->swapper_ctrl);
3648 
3649  /*
3650  * Verifying if endian settings are accurate by reading a
3651  * feedback register.
3652  */
3653  val64 = readq(&bar0->pif_rd_swapper_fb);
3654  if (val64 != 0x0123456789ABCDEFULL) {
3655  /* Endian settings are incorrect, calls for another dekko. */
3657  "%s: Endian settings are wrong, feedback read %llx\n",
3658  dev->name, (unsigned long long)val64);
3659  return FAILURE;
3660  }
3661 
3662  return SUCCESS;
3663 }
3664 
3665 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3666 {
3667  struct XENA_dev_config __iomem *bar0 = nic->bar0;
3668  u64 val64;
3669  int ret = 0, cnt = 0;
3670 
3671  do {
3672  val64 = readq(&bar0->xmsi_access);
3673  if (!(val64 & s2BIT(15)))
3674  break;
3675  mdelay(1);
3676  cnt++;
3677  } while (cnt < 5);
3678  if (cnt == 5) {
3679  DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3680  ret = 1;
3681  }
3682 
3683  return ret;
3684 }
3685 
3686 static void restore_xmsi_data(struct s2io_nic *nic)
3687 {
3688  struct XENA_dev_config __iomem *bar0 = nic->bar0;
3689  u64 val64;
3690  int i, msix_index;
3691 
3692  if (nic->device_type == XFRAME_I_DEVICE)
3693  return;
3694 
3695  for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3696  msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3697  writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3698  writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3699  val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3700  writeq(val64, &bar0->xmsi_access);
3701  if (wait_for_msix_trans(nic, msix_index)) {
3702  DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3703  __func__, msix_index);
3704  continue;
3705  }
3706  }
3707 }
3708 
3709 static void store_xmsi_data(struct s2io_nic *nic)
3710 {
3711  struct XENA_dev_config __iomem *bar0 = nic->bar0;
3712  u64 val64, addr, data;
3713  int i, msix_index;
3714 
3715  if (nic->device_type == XFRAME_I_DEVICE)
3716  return;
3717 
3718  /* Store and display */
3719  for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3720  msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3721  val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3722  writeq(val64, &bar0->xmsi_access);
3723  if (wait_for_msix_trans(nic, msix_index)) {
3724  DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3725  __func__, msix_index);
3726  continue;
3727  }
3728  addr = readq(&bar0->xmsi_address);
3729  data = readq(&bar0->xmsi_data);
3730  if (addr && data) {
3731  nic->msix_info[i].addr = addr;
3732  nic->msix_info[i].data = data;
3733  }
3734  }
3735 }
3736 
3737 static int s2io_enable_msi_x(struct s2io_nic *nic)
3738 {
3739  struct XENA_dev_config __iomem *bar0 = nic->bar0;
3740  u64 rx_mat;
3741  u16 msi_control; /* Temp variable */
3742  int ret, i, j, msix_indx = 1;
3743  int size;
3744  struct stat_block *stats = nic->mac_control.stats_info;
3745  struct swStat *swstats = &stats->sw_stat;
3746 
3747  size = nic->num_entries * sizeof(struct msix_entry);
3748  nic->entries = kzalloc(size, GFP_KERNEL);
3749  if (!nic->entries) {
3750  DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3751  __func__);
3752  swstats->mem_alloc_fail_cnt++;
3753  return -ENOMEM;
3754  }
3755  swstats->mem_allocated += size;
3756 
3757  size = nic->num_entries * sizeof(struct s2io_msix_entry);
3758  nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3759  if (!nic->s2io_entries) {
3760  DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3761  __func__);
3762  swstats->mem_alloc_fail_cnt++;
3763  kfree(nic->entries);
3764  swstats->mem_freed
3765  += (nic->num_entries * sizeof(struct msix_entry));
3766  return -ENOMEM;
3767  }
3768  swstats->mem_allocated += size;
3769 
3770  nic->entries[0].entry = 0;
3771  nic->s2io_entries[0].entry = 0;
3772  nic->s2io_entries[0].in_use = MSIX_FLG;
3773  nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3774  nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3775 
3776  for (i = 1; i < nic->num_entries; i++) {
3777  nic->entries[i].entry = ((i - 1) * 8) + 1;
3778  nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3779  nic->s2io_entries[i].arg = NULL;
3780  nic->s2io_entries[i].in_use = 0;
3781  }
3782 
3783  rx_mat = readq(&bar0->rx_mat);
3784  for (j = 0; j < nic->config.rx_ring_num; j++) {
3785  rx_mat |= RX_MAT_SET(j, msix_indx);
3786  nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3787  nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3788  nic->s2io_entries[j+1].in_use = MSIX_FLG;
3789  msix_indx += 8;
3790  }
3791  writeq(rx_mat, &bar0->rx_mat);
3792  readq(&bar0->rx_mat);
3793 
3794  ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3795  /* We fail init if error or we get less vectors than min required */
3796  if (ret) {
3797  DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3798  kfree(nic->entries);
3799  swstats->mem_freed += nic->num_entries *
3800  sizeof(struct msix_entry);
3801  kfree(nic->s2io_entries);
3802  swstats->mem_freed += nic->num_entries *
3803  sizeof(struct s2io_msix_entry);
3804  nic->entries = NULL;
3805  nic->s2io_entries = NULL;
3806  return -ENOMEM;
3807  }
3808 
3809  /*
3810  * To enable MSI-X, MSI also needs to be enabled, due to a bug
3811  * in the herc NIC. (Temp change, needs to be removed later)
3812  */
3813  pci_read_config_word(nic->pdev, 0x42, &msi_control);
3814  msi_control |= 0x1; /* Enable MSI */
3815  pci_write_config_word(nic->pdev, 0x42, msi_control);
3816 
3817  return 0;
3818 }
3819 
3820 /* Handle software interrupt used during MSI(X) test */
3821 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3822 {
3823  struct s2io_nic *sp = dev_id;
3824 
3825  sp->msi_detected = 1;
3826  wake_up(&sp->msi_wait);
3827 
3828  return IRQ_HANDLED;
3829 }
3830 
3831 /* Test interrupt path by forcing a a software IRQ */
3832 static int s2io_test_msi(struct s2io_nic *sp)
3833 {
3834  struct pci_dev *pdev = sp->pdev;
3835  struct XENA_dev_config __iomem *bar0 = sp->bar0;
3836  int err;
3837  u64 val64, saved64;
3838 
3839  err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3840  sp->name, sp);
3841  if (err) {
3842  DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3843  sp->dev->name, pci_name(pdev), pdev->irq);
3844  return err;
3845  }
3846 
3848  sp->msi_detected = 0;
3849 
3850  saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3851  val64 |= SCHED_INT_CTRL_ONE_SHOT;
3852  val64 |= SCHED_INT_CTRL_TIMER_EN;
3853  val64 |= SCHED_INT_CTRL_INT2MSI(1);
3854  writeq(val64, &bar0->scheduled_int_ctrl);
3855 
3857 
3858  if (!sp->msi_detected) {
3859  /* MSI(X) test failed, go back to INTx mode */
3860  DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3861  "using MSI(X) during test\n",
3862  sp->dev->name, pci_name(pdev));
3863 
3864  err = -EOPNOTSUPP;
3865  }
3866 
3867  free_irq(sp->entries[1].vector, sp);
3868 
3869  writeq(saved64, &bar0->scheduled_int_ctrl);
3870 
3871  return err;
3872 }
3873 
3874 static void remove_msix_isr(struct s2io_nic *sp)
3875 {
3876  int i;
3877  u16 msi_control;
3878 
3879  for (i = 0; i < sp->num_entries; i++) {
3880  if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3881  int vector = sp->entries[i].vector;
3882  void *arg = sp->s2io_entries[i].arg;
3883  free_irq(vector, arg);
3884  }
3885  }
3886 
3887  kfree(sp->entries);
3888  kfree(sp->s2io_entries);
3889  sp->entries = NULL;
3890  sp->s2io_entries = NULL;
3891 
3892  pci_read_config_word(sp->pdev, 0x42, &msi_control);
3893  msi_control &= 0xFFFE; /* Disable MSI */
3894  pci_write_config_word(sp->pdev, 0x42, msi_control);
3895 
3896  pci_disable_msix(sp->pdev);
3897 }
3898 
3899 static void remove_inta_isr(struct s2io_nic *sp)
3900 {
3901  free_irq(sp->pdev->irq, sp->dev);
3902 }
3903 
3904 /* ********************************************************* *
3905  * Functions defined below concern the OS part of the driver *
3906  * ********************************************************* */
3907 
3920 static int s2io_open(struct net_device *dev)
3921 {
3922  struct s2io_nic *sp = netdev_priv(dev);
3923  struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3924  int err = 0;
3925 
3926  /*
3927  * Make sure you have link off by default every time
3928  * Nic is initialized
3929  */
3930  netif_carrier_off(dev);
3931  sp->last_link_state = 0;
3932 
3933  /* Initialize H/W and enable interrupts */
3934  err = s2io_card_up(sp);
3935  if (err) {
3936  DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3937  dev->name);
3938  goto hw_init_failed;
3939  }
3940 
3941  if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3942  DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3943  s2io_card_down(sp);
3944  err = -ENODEV;
3945  goto hw_init_failed;
3946  }
3947  s2io_start_all_tx_queue(sp);
3948  return 0;
3949 
3950 hw_init_failed:
3951  if (sp->config.intr_type == MSI_X) {
3952  if (sp->entries) {
3953  kfree(sp->entries);
3954  swstats->mem_freed += sp->num_entries *
3955  sizeof(struct msix_entry);
3956  }
3957  if (sp->s2io_entries) {
3958  kfree(sp->s2io_entries);
3959  swstats->mem_freed += sp->num_entries *
3960  sizeof(struct s2io_msix_entry);
3961  }
3962  }
3963  return err;
3964 }
3965 
3979 static int s2io_close(struct net_device *dev)
3980 {
3981  struct s2io_nic *sp = netdev_priv(dev);
3982  struct config_param *config = &sp->config;
3983  u64 tmp64;
3984  int offset;
3985 
3986  /* Return if the device is already closed *
3987  * Can happen when s2io_card_up failed in change_mtu *
3988  */
3989  if (!is_s2io_card_up(sp))
3990  return 0;
3991 
3992  s2io_stop_all_tx_queue(sp);
3993  /* delete all populated mac entries */
3994  for (offset = 1; offset < config->max_mc_addr; offset++) {
3995  tmp64 = do_s2io_read_unicast_mc(sp, offset);
3996  if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3997  do_s2io_delete_unicast_mc(sp, tmp64);
3998  }
3999 
4000  s2io_card_down(sp);
4001 
4002  return 0;
4003 }
4004 
4018 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4019 {
4020  struct s2io_nic *sp = netdev_priv(dev);
4021  u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4022  register u64 val64;
4023  struct TxD *txdp;
4024  struct TxFIFO_element __iomem *tx_fifo;
4025  unsigned long flags = 0;
4026  u16 vlan_tag = 0;
4027  struct fifo_info *fifo = NULL;
4028  int do_spin_lock = 1;
4029  int offload_type;
4030  int enable_per_list_interrupt = 0;
4031  struct config_param *config = &sp->config;
4032  struct mac_info *mac_control = &sp->mac_control;
4033  struct stat_block *stats = mac_control->stats_info;
4034  struct swStat *swstats = &stats->sw_stat;
4035 
4036  DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4037 
4038  if (unlikely(skb->len <= 0)) {
4039  DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4040  dev_kfree_skb_any(skb);
4041  return NETDEV_TX_OK;
4042  }
4043 
4044  if (!is_s2io_card_up(sp)) {
4045  DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4046  dev->name);
4047  dev_kfree_skb(skb);
4048  return NETDEV_TX_OK;
4049  }
4050 
4051  queue = 0;
4052  if (vlan_tx_tag_present(skb))
4053  vlan_tag = vlan_tx_tag_get(skb);
4054  if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4055  if (skb->protocol == htons(ETH_P_IP)) {
4056  struct iphdr *ip;
4057  struct tcphdr *th;
4058  ip = ip_hdr(skb);
4059 
4060  if (!ip_is_fragment(ip)) {
4061  th = (struct tcphdr *)(((unsigned char *)ip) +
4062  ip->ihl*4);
4063 
4064  if (ip->protocol == IPPROTO_TCP) {
4065  queue_len = sp->total_tcp_fifos;
4066  queue = (ntohs(th->source) +
4067  ntohs(th->dest)) &
4068  sp->fifo_selector[queue_len - 1];
4069  if (queue >= queue_len)
4070  queue = queue_len - 1;
4071  } else if (ip->protocol == IPPROTO_UDP) {
4072  queue_len = sp->total_udp_fifos;
4073  queue = (ntohs(th->source) +
4074  ntohs(th->dest)) &
4075  sp->fifo_selector[queue_len - 1];
4076  if (queue >= queue_len)
4077  queue = queue_len - 1;
4078  queue += sp->udp_fifo_idx;
4079  if (skb->len > 1024)
4080  enable_per_list_interrupt = 1;
4081  do_spin_lock = 0;
4082  }
4083  }
4084  }
4085  } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4086  /* get fifo number based on skb->priority value */
4087  queue = config->fifo_mapping
4088  [skb->priority & (MAX_TX_FIFOS - 1)];
4089  fifo = &mac_control->fifos[queue];
4090 
4091  if (do_spin_lock)
4092  spin_lock_irqsave(&fifo->tx_lock, flags);
4093  else {
4094  if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4095  return NETDEV_TX_LOCKED;
4096  }
4097 
4098  if (sp->config.multiq) {
4099  if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4100  spin_unlock_irqrestore(&fifo->tx_lock, flags);
4101  return NETDEV_TX_BUSY;
4102  }
4103  } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4104  if (netif_queue_stopped(dev)) {
4105  spin_unlock_irqrestore(&fifo->tx_lock, flags);
4106  return NETDEV_TX_BUSY;
4107  }
4108  }
4109 
4110  put_off = (u16)fifo->tx_curr_put_info.offset;
4111  get_off = (u16)fifo->tx_curr_get_info.offset;
4112  txdp = fifo->list_info[put_off].list_virt_addr;
4113 
4114  queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4115  /* Avoid "put" pointer going beyond "get" pointer */
4116  if (txdp->Host_Control ||
4117  ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4118  DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4119  s2io_stop_tx_queue(sp, fifo->fifo_no);
4120  dev_kfree_skb(skb);
4121  spin_unlock_irqrestore(&fifo->tx_lock, flags);
4122  return NETDEV_TX_OK;
4123  }
4124 
4125  offload_type = s2io_offload_type(skb);
4126  if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4127  txdp->Control_1 |= TXD_TCP_LSO_EN;
4128  txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4129  }
4130  if (skb->ip_summed == CHECKSUM_PARTIAL) {
4131  txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4134  }
4136  txdp->Control_1 |= TXD_LIST_OWN_XENA;
4137  txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4138  if (enable_per_list_interrupt)
4139  if (put_off & (queue_len >> 5))
4141  if (vlan_tag) {
4142  txdp->Control_2 |= TXD_VLAN_ENABLE;
4143  txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4144  }
4145 
4146  frg_len = skb_headlen(skb);
4147  if (offload_type == SKB_GSO_UDP) {
4148  int ufo_size;
4149 
4150  ufo_size = s2io_udp_mss(skb);
4151  ufo_size &= ~7;
4152  txdp->Control_1 |= TXD_UFO_EN;
4153  txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4154  txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4155 #ifdef __BIG_ENDIAN
4156  /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4157  fifo->ufo_in_band_v[put_off] =
4158  (__force u64)skb_shinfo(skb)->ip6_frag_id;
4159 #else
4160  fifo->ufo_in_band_v[put_off] =
4161  (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4162 #endif
4163  txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4164  txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4165  fifo->ufo_in_band_v,
4166  sizeof(u64),
4168  if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4169  goto pci_map_failed;
4170  txdp++;
4171  }
4172 
4173  txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4174  frg_len, PCI_DMA_TODEVICE);
4175  if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4176  goto pci_map_failed;
4177 
4178  txdp->Host_Control = (unsigned long)skb;
4179  txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4180  if (offload_type == SKB_GSO_UDP)
4181  txdp->Control_1 |= TXD_UFO_EN;
4182 
4183  frg_cnt = skb_shinfo(skb)->nr_frags;
4184  /* For fragmented SKB. */
4185  for (i = 0; i < frg_cnt; i++) {
4186  const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4187  /* A '0' length fragment will be ignored */
4188  if (!skb_frag_size(frag))
4189  continue;
4190  txdp++;
4191  txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4192  frag, 0,
4193  skb_frag_size(frag),
4194  DMA_TO_DEVICE);
4195  txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4196  if (offload_type == SKB_GSO_UDP)
4197  txdp->Control_1 |= TXD_UFO_EN;
4198  }
4200 
4201  if (offload_type == SKB_GSO_UDP)
4202  frg_cnt++; /* as Txd0 was used for inband header */
4203 
4204  tx_fifo = mac_control->tx_FIFO_start[queue];
4205  val64 = fifo->list_info[put_off].list_phy_addr;
4206  writeq(val64, &tx_fifo->TxDL_Pointer);
4207 
4208  val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4210  if (offload_type)
4211  val64 |= TX_FIFO_SPECIAL_FUNC;
4212 
4213  writeq(val64, &tx_fifo->List_Control);
4214 
4215  mmiowb();
4216 
4217  put_off++;
4218  if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4219  put_off = 0;
4220  fifo->tx_curr_put_info.offset = put_off;
4221 
4222  /* Avoid "put" pointer going beyond "get" pointer */
4223  if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4224  swstats->fifo_full_cnt++;
4225  DBG_PRINT(TX_DBG,
4226  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4227  put_off, get_off);
4228  s2io_stop_tx_queue(sp, fifo->fifo_no);
4229  }
4230  swstats->mem_allocated += skb->truesize;
4231  spin_unlock_irqrestore(&fifo->tx_lock, flags);
4232 
4233  if (sp->config.intr_type == MSI_X)
4234  tx_intr_handler(fifo);
4235 
4236  return NETDEV_TX_OK;
4237 
4238 pci_map_failed:
4239  swstats->pci_map_fail_cnt++;
4240  s2io_stop_tx_queue(sp, fifo->fifo_no);
4241  swstats->mem_freed += skb->truesize;
4242  dev_kfree_skb(skb);
4243  spin_unlock_irqrestore(&fifo->tx_lock, flags);
4244  return NETDEV_TX_OK;
4245 }
4246 
4247 static void
4248 s2io_alarm_handle(unsigned long data)
4249 {
4250  struct s2io_nic *sp = (struct s2io_nic *)data;
4251  struct net_device *dev = sp->dev;
4252 
4253  s2io_handle_errors(dev);
4254  mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4255 }
4256 
4257 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4258 {
4259  struct ring_info *ring = (struct ring_info *)dev_id;
4260  struct s2io_nic *sp = ring->nic;
4261  struct XENA_dev_config __iomem *bar0 = sp->bar0;
4262 
4263  if (unlikely(!is_s2io_card_up(sp)))
4264  return IRQ_HANDLED;
4265 
4266  if (sp->config.napi) {
4267  u8 __iomem *addr = NULL;
4268  u8 val8 = 0;
4269 
4270  addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4271  addr += (7 - ring->ring_no);
4272  val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4273  writeb(val8, addr);
4274  val8 = readb(addr);
4275  napi_schedule(&ring->napi);
4276  } else {
4277  rx_intr_handler(ring, 0);
4278  s2io_chk_rx_buffers(sp, ring);
4279  }
4280 
4281  return IRQ_HANDLED;
4282 }
4283 
4284 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4285 {
4286  int i;
4287  struct fifo_info *fifos = (struct fifo_info *)dev_id;
4288  struct s2io_nic *sp = fifos->nic;
4289  struct XENA_dev_config __iomem *bar0 = sp->bar0;
4290  struct config_param *config = &sp->config;
4291  u64 reason;
4292 
4293  if (unlikely(!is_s2io_card_up(sp)))
4294  return IRQ_NONE;
4295 
4296  reason = readq(&bar0->general_int_status);
4297  if (unlikely(reason == S2IO_MINUS_ONE))
4298  /* Nothing much can be done. Get out */
4299  return IRQ_HANDLED;
4300 
4301  if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4303 
4304  if (reason & GEN_INTR_TXPIC)
4305  s2io_txpic_intr_handle(sp);
4306 
4307  if (reason & GEN_INTR_TXTRAFFIC)
4309 
4310  for (i = 0; i < config->tx_fifo_num; i++)
4311  tx_intr_handler(&fifos[i]);
4312 
4314  readl(&bar0->general_int_status);
4315  return IRQ_HANDLED;
4316  }
4317  /* The interrupt was not raised by us */
4318  return IRQ_NONE;
4319 }
4320 
4321 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4322 {
4323  struct XENA_dev_config __iomem *bar0 = sp->bar0;
4324  u64 val64;
4325 
4326  val64 = readq(&bar0->pic_int_status);
4327  if (val64 & PIC_INT_GPIO) {
4328  val64 = readq(&bar0->gpio_int_reg);
4329  if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4330  (val64 & GPIO_INT_REG_LINK_UP)) {
4331  /*
4332  * This is unstable state so clear both up/down
4333  * interrupt and adapter to re-evaluate the link state.
4334  */
4335  val64 |= GPIO_INT_REG_LINK_DOWN;
4336  val64 |= GPIO_INT_REG_LINK_UP;
4337  writeq(val64, &bar0->gpio_int_reg);
4338  val64 = readq(&bar0->gpio_int_mask);
4339  val64 &= ~(GPIO_INT_MASK_LINK_UP |
4341  writeq(val64, &bar0->gpio_int_mask);
4342  } else if (val64 & GPIO_INT_REG_LINK_UP) {
4343  val64 = readq(&bar0->adapter_status);
4344  /* Enable Adapter */
4345  val64 = readq(&bar0->adapter_control);
4346  val64 |= ADAPTER_CNTL_EN;
4347  writeq(val64, &bar0->adapter_control);
4348  val64 |= ADAPTER_LED_ON;
4349  writeq(val64, &bar0->adapter_control);
4350  if (!sp->device_enabled_once)
4351  sp->device_enabled_once = 1;
4352 
4353  s2io_link(sp, LINK_UP);
4354  /*
4355  * unmask link down interrupt and mask link-up
4356  * intr
4357  */
4358  val64 = readq(&bar0->gpio_int_mask);
4359  val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4360  val64 |= GPIO_INT_MASK_LINK_UP;
4361  writeq(val64, &bar0->gpio_int_mask);
4362 
4363  } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4364  val64 = readq(&bar0->adapter_status);
4365  s2io_link(sp, LINK_DOWN);
4366  /* Link is down so unmaks link up interrupt */
4367  val64 = readq(&bar0->gpio_int_mask);
4368  val64 &= ~GPIO_INT_MASK_LINK_UP;
4369  val64 |= GPIO_INT_MASK_LINK_DOWN;
4370  writeq(val64, &bar0->gpio_int_mask);
4371 
4372  /* turn off LED */
4373  val64 = readq(&bar0->adapter_control);
4374  val64 = val64 & (~ADAPTER_LED_ON);
4375  writeq(val64, &bar0->adapter_control);
4376  }
4377  }
4378  val64 = readq(&bar0->gpio_int_mask);
4379 }
4380 
4391 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4392  unsigned long long *cnt)
4393 {
4394  u64 val64;
4395  val64 = readq(addr);
4396  if (val64 & value) {
4397  writeq(val64, addr);
4398  (*cnt)++;
4399  return 1;
4400  }
4401  return 0;
4402 
4403 }
4404 
4413 static void s2io_handle_errors(void *dev_id)
4414 {
4415  struct net_device *dev = (struct net_device *)dev_id;
4416  struct s2io_nic *sp = netdev_priv(dev);
4417  struct XENA_dev_config __iomem *bar0 = sp->bar0;
4418  u64 temp64 = 0, val64 = 0;
4419  int i = 0;
4420 
4421  struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4422  struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4423 
4424  if (!is_s2io_card_up(sp))
4425  return;
4426 
4427  if (pci_channel_offline(sp->pdev))
4428  return;
4429 
4430  memset(&sw_stat->ring_full_cnt, 0,
4431  sizeof(sw_stat->ring_full_cnt));
4432 
4433  /* Handling the XPAK counters update */
4434  if (stats->xpak_timer_count < 72000) {
4435  /* waiting for an hour */
4436  stats->xpak_timer_count++;
4437  } else {
4438  s2io_updt_xpak_counter(dev);
4439  /* reset the count to zero */
4440  stats->xpak_timer_count = 0;
4441  }
4442 
4443  /* Handling link status change error Intr */
4444  if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4445  val64 = readq(&bar0->mac_rmac_err_reg);
4446  writeq(val64, &bar0->mac_rmac_err_reg);
4447  if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4449  }
4450 
4451  /* In case of a serious error, the device will be Reset. */
4452  if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4453  &sw_stat->serious_err_cnt))
4454  goto reset;
4455 
4456  /* Check for data parity error */
4457  if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4458  &sw_stat->parity_err_cnt))
4459  goto reset;
4460 
4461  /* Check for ring full counter */
4462  if (sp->device_type == XFRAME_II_DEVICE) {
4463  val64 = readq(&bar0->ring_bump_counter1);
4464  for (i = 0; i < 4; i++) {
4465  temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4466  temp64 >>= 64 - ((i+1)*16);
4467  sw_stat->ring_full_cnt[i] += temp64;
4468  }
4469 
4470  val64 = readq(&bar0->ring_bump_counter2);
4471  for (i = 0; i < 4; i++) {
4472  temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4473  temp64 >>= 64 - ((i+1)*16);
4474  sw_stat->ring_full_cnt[i+4] += temp64;
4475  }
4476  }
4477 
4478  val64 = readq(&bar0->txdma_int_status);
4479  /*check for pfc_err*/
4480  if (val64 & TXDMA_PFC_INT) {
4481  if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4483  PFC_PCIX_ERR,
4484  &bar0->pfc_err_reg,
4485  &sw_stat->pfc_err_cnt))
4486  goto reset;
4487  do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4488  &bar0->pfc_err_reg,
4489  &sw_stat->pfc_err_cnt);
4490  }
4491 
4492  /*check for tda_err*/
4493  if (val64 & TXDMA_TDA_INT) {
4494  if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4497  &bar0->tda_err_reg,
4498  &sw_stat->tda_err_cnt))
4499  goto reset;
4500  do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4501  &bar0->tda_err_reg,
4502  &sw_stat->tda_err_cnt);
4503  }
4504  /*check for pcc_err*/
4505  if (val64 & TXDMA_PCC_INT) {
4506  if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4511  &bar0->pcc_err_reg,
4512  &sw_stat->pcc_err_cnt))
4513  goto reset;
4514  do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4515  &bar0->pcc_err_reg,
4516  &sw_stat->pcc_err_cnt);
4517  }
4518 
4519  /*check for tti_err*/
4520  if (val64 & TXDMA_TTI_INT) {
4521  if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4522  &bar0->tti_err_reg,
4523  &sw_stat->tti_err_cnt))
4524  goto reset;
4525  do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4526  &bar0->tti_err_reg,
4527  &sw_stat->tti_err_cnt);
4528  }
4529 
4530  /*check for lso_err*/
4531  if (val64 & TXDMA_LSO_INT) {
4532  if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4534  &bar0->lso_err_reg,
4535  &sw_stat->lso_err_cnt))
4536  goto reset;
4537  do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4538  &bar0->lso_err_reg,
4539  &sw_stat->lso_err_cnt);
4540  }
4541 
4542  /*check for tpa_err*/
4543  if (val64 & TXDMA_TPA_INT) {
4544  if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4545  &bar0->tpa_err_reg,
4546  &sw_stat->tpa_err_cnt))
4547  goto reset;
4548  do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4549  &bar0->tpa_err_reg,
4550  &sw_stat->tpa_err_cnt);
4551  }
4552 
4553  /*check for sm_err*/
4554  if (val64 & TXDMA_SM_INT) {
4555  if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4556  &bar0->sm_err_reg,
4557  &sw_stat->sm_err_cnt))
4558  goto reset;
4559  }
4560 
4561  val64 = readq(&bar0->mac_int_status);
4562  if (val64 & MAC_INT_STATUS_TMAC_INT) {
4563  if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4564  &bar0->mac_tmac_err_reg,
4565  &sw_stat->mac_tmac_err_cnt))
4566  goto reset;
4567  do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4570  &bar0->mac_tmac_err_reg,
4571  &sw_stat->mac_tmac_err_cnt);
4572  }
4573 
4574  val64 = readq(&bar0->xgxs_int_status);
4575  if (val64 & XGXS_INT_STATUS_TXGXS) {
4576  if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4577  &bar0->xgxs_txgxs_err_reg,
4578  &sw_stat->xgxs_txgxs_err_cnt))
4579  goto reset;
4580  do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4581  &bar0->xgxs_txgxs_err_reg,
4582  &sw_stat->xgxs_txgxs_err_cnt);
4583  }
4584 
4585  val64 = readq(&bar0->rxdma_int_status);
4586  if (val64 & RXDMA_INT_RC_INT_M) {
4587  if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4591  &bar0->rc_err_reg,
4592  &sw_stat->rc_err_cnt))
4593  goto reset;
4594  do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4596  RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4597  &sw_stat->rc_err_cnt);
4598  if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4601  &bar0->prc_pcix_err_reg,
4602  &sw_stat->prc_pcix_err_cnt))
4603  goto reset;
4604  do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4607  &bar0->prc_pcix_err_reg,
4608  &sw_stat->prc_pcix_err_cnt);
4609  }
4610 
4611  if (val64 & RXDMA_INT_RPA_INT_M) {
4612  if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4613  &bar0->rpa_err_reg,
4614  &sw_stat->rpa_err_cnt))
4615  goto reset;
4616  do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4617  &bar0->rpa_err_reg,
4618  &sw_stat->rpa_err_cnt);
4619  }
4620 
4621  if (val64 & RXDMA_INT_RDA_INT_M) {
4622  if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4627  &bar0->rda_err_reg,
4628  &sw_stat->rda_err_cnt))
4629  goto reset;
4630  do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4632  RDA_MISC_ERR |
4633  RDA_PCIX_ERR,
4634  &bar0->rda_err_reg,
4635  &sw_stat->rda_err_cnt);
4636  }
4637 
4638  if (val64 & RXDMA_INT_RTI_INT_M) {
4639  if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4640  &bar0->rti_err_reg,
4641  &sw_stat->rti_err_cnt))
4642  goto reset;
4643  do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4644  &bar0->rti_err_reg,
4645  &sw_stat->rti_err_cnt);
4646  }
4647 
4648  val64 = readq(&bar0->mac_int_status);
4649  if (val64 & MAC_INT_STATUS_RMAC_INT) {
4650  if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4651  &bar0->mac_rmac_err_reg,
4652  &sw_stat->mac_rmac_err_cnt))
4653  goto reset;
4654  do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4657  &bar0->mac_rmac_err_reg,
4658  &sw_stat->mac_rmac_err_cnt);
4659  }
4660 
4661  val64 = readq(&bar0->xgxs_int_status);
4662  if (val64 & XGXS_INT_STATUS_RXGXS) {
4663  if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4664  &bar0->xgxs_rxgxs_err_reg,
4665  &sw_stat->xgxs_rxgxs_err_cnt))
4666  goto reset;
4667  }
4668 
4669  val64 = readq(&bar0->mc_int_status);
4670  if (val64 & MC_INT_STATUS_MC_INT) {
4671  if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4672  &bar0->mc_err_reg,
4673  &sw_stat->mc_err_cnt))
4674  goto reset;
4675 
4676  /* Handling Ecc errors */
4678  writeq(val64, &bar0->mc_err_reg);
4679  if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4680  sw_stat->double_ecc_errs++;
4681  if (sp->device_type != XFRAME_II_DEVICE) {
4682  /*
4683  * Reset XframeI only if critical error
4684  */
4685  if (val64 &
4688  goto reset;
4689  }
4690  } else
4691  sw_stat->single_ecc_errs++;
4692  }
4693  }
4694  return;
4695 
4696 reset:
4697  s2io_stop_all_tx_queue(sp);
4699  sw_stat->soft_reset_cnt++;
4700 }
4701 
4715 static irqreturn_t s2io_isr(int irq, void *dev_id)
4716 {
4717  struct net_device *dev = (struct net_device *)dev_id;
4718  struct s2io_nic *sp = netdev_priv(dev);
4719  struct XENA_dev_config __iomem *bar0 = sp->bar0;
4720  int i;
4721  u64 reason = 0;
4722  struct mac_info *mac_control;
4723  struct config_param *config;
4724 
4725  /* Pretend we handled any irq's from a disconnected card */
4726  if (pci_channel_offline(sp->pdev))
4727  return IRQ_NONE;
4728 
4729  if (!is_s2io_card_up(sp))
4730  return IRQ_NONE;
4731 
4732  config = &sp->config;
4733  mac_control = &sp->mac_control;
4734 
4735  /*
4736  * Identify the cause for interrupt and call the appropriate
4737  * interrupt handler. Causes for the interrupt could be;
4738  * 1. Rx of packet.
4739  * 2. Tx complete.
4740  * 3. Link down.
4741  */
4742  reason = readq(&bar0->general_int_status);
4743 
4744  if (unlikely(reason == S2IO_MINUS_ONE))
4745  return IRQ_HANDLED; /* Nothing much can be done. Get out */
4746 
4747  if (reason &
4748  (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4750 
4751  if (config->napi) {
4752  if (reason & GEN_INTR_RXTRAFFIC) {
4753  napi_schedule(&sp->napi);
4756  readl(&bar0->rx_traffic_int);
4757  }
4758  } else {
4759  /*
4760  * rx_traffic_int reg is an R1 register, writing all 1's
4761  * will ensure that the actual interrupt causing bit
4762  * get's cleared and hence a read can be avoided.
4763  */
4764  if (reason & GEN_INTR_RXTRAFFIC)
4766 
4767  for (i = 0; i < config->rx_ring_num; i++) {
4768  struct ring_info *ring = &mac_control->rings[i];
4769 
4770  rx_intr_handler(ring, 0);
4771  }
4772  }
4773 
4774  /*
4775  * tx_traffic_int reg is an R1 register, writing all 1's
4776  * will ensure that the actual interrupt causing bit get's
4777  * cleared and hence a read can be avoided.
4778  */
4779  if (reason & GEN_INTR_TXTRAFFIC)
4781 
4782  for (i = 0; i < config->tx_fifo_num; i++)
4783  tx_intr_handler(&mac_control->fifos[i]);
4784 
4785  if (reason & GEN_INTR_TXPIC)
4786  s2io_txpic_intr_handle(sp);
4787 
4788  /*
4789  * Reallocate the buffers from the interrupt handler itself.
4790  */
4791  if (!config->napi) {
4792  for (i = 0; i < config->rx_ring_num; i++) {
4793  struct ring_info *ring = &mac_control->rings[i];
4794 
4795  s2io_chk_rx_buffers(sp, ring);
4796  }
4797  }
4799  readl(&bar0->general_int_status);
4800 
4801  return IRQ_HANDLED;
4802 
4803  } else if (!reason) {
4804  /* The interrupt was not raised by us */
4805  return IRQ_NONE;
4806  }
4807 
4808  return IRQ_HANDLED;
4809 }
4810 
4814 static void s2io_updt_stats(struct s2io_nic *sp)
4815 {
4816  struct XENA_dev_config __iomem *bar0 = sp->bar0;
4817  u64 val64;
4818  int cnt = 0;
4819 
4820  if (is_s2io_card_up(sp)) {
4821  /* Apprx 30us on a 133 MHz bus */
4822  val64 = SET_UPDT_CLICKS(10) |
4824  writeq(val64, &bar0->stat_cfg);
4825  do {
4826  udelay(100);
4827  val64 = readq(&bar0->stat_cfg);
4828  if (!(val64 & s2BIT(0)))
4829  break;
4830  cnt++;
4831  if (cnt == 5)
4832  break; /* Updt failed */
4833  } while (1);
4834  }
4835 }
4836 
4846 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4847 {
4848  struct s2io_nic *sp = netdev_priv(dev);
4849  struct mac_info *mac_control = &sp->mac_control;
4850  struct stat_block *stats = mac_control->stats_info;
4851  u64 delta;
4852 
4853  /* Configure Stats for immediate updt */
4854  s2io_updt_stats(sp);
4855 
4856  /* A device reset will cause the on-adapter statistics to be zero'ed.
4857  * This can be done while running by changing the MTU. To prevent the
4858  * system from having the stats zero'ed, the driver keeps a copy of the
4859  * last update to the system (which is also zero'ed on reset). This
4860  * enables the driver to accurately know the delta between the last
4861  * update and the current update.
4862  */
4863  delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4864  le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4865  sp->stats.rx_packets += delta;
4866  dev->stats.rx_packets += delta;
4867 
4868  delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4869  le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4870  sp->stats.tx_packets += delta;
4871  dev->stats.tx_packets += delta;
4872 
4873  delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4874  le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4875  sp->stats.rx_bytes += delta;
4876  dev->stats.rx_bytes += delta;
4877 
4878  delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4879  le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4880  sp->stats.tx_bytes += delta;
4881  dev->stats.tx_bytes += delta;
4882 
4883  delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4884  sp->stats.rx_errors += delta;
4885  dev->stats.rx_errors += delta;
4886 
4887  delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4888  le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4889  sp->stats.tx_errors += delta;
4890  dev->stats.tx_errors += delta;
4891 
4892  delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4893  sp->stats.rx_dropped += delta;
4894  dev->stats.rx_dropped += delta;
4895 
4896  delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4897  sp->stats.tx_dropped += delta;
4898  dev->stats.tx_dropped += delta;
4899 
4900  /* The adapter MAC interprets pause frames as multicast packets, but
4901  * does not pass them up. This erroneously increases the multicast
4902  * packet count and needs to be deducted when the multicast frame count
4903  * is queried.
4904  */
4905  delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4907  delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4908  delta -= sp->stats.multicast;
4909  sp->stats.multicast += delta;
4910  dev->stats.multicast += delta;
4911 
4912  delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4913  le32_to_cpu(stats->rmac_usized_frms)) +
4914  le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4915  sp->stats.rx_length_errors += delta;
4916  dev->stats.rx_length_errors += delta;
4917 
4918  delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4919  sp->stats.rx_crc_errors += delta;
4920  dev->stats.rx_crc_errors += delta;
4921 
4922  return &dev->stats;
4923 }
4924 
4938 static void s2io_set_multicast(struct net_device *dev)
4939 {
4940  int i, j, prev_cnt;
4941  struct netdev_hw_addr *ha;
4942  struct s2io_nic *sp = netdev_priv(dev);
4943  struct XENA_dev_config __iomem *bar0 = sp->bar0;
4944  u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4945  0xfeffffffffffULL;
4946  u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4947  void __iomem *add;
4948  struct config_param *config = &sp->config;
4949 
4950  if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4951  /* Enable all Multicast addresses */
4952  writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4953  &bar0->rmac_addr_data0_mem);
4955  &bar0->rmac_addr_data1_mem);
4956  val64 = RMAC_ADDR_CMD_MEM_WE |
4959  writeq(val64, &bar0->rmac_addr_cmd_mem);
4960  /* Wait till command completes */
4961  wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4963  S2IO_BIT_RESET);
4964 
4965  sp->m_cast_flg = 1;
4966  sp->all_multi_pos = config->max_mc_addr - 1;
4967  } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4968  /* Disable all Multicast addresses */
4969  writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4970  &bar0->rmac_addr_data0_mem);
4972  &bar0->rmac_addr_data1_mem);
4973  val64 = RMAC_ADDR_CMD_MEM_WE |
4976  writeq(val64, &bar0->rmac_addr_cmd_mem);
4977  /* Wait till command completes */
4978  wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4980  S2IO_BIT_RESET);
4981 
4982  sp->m_cast_flg = 0;
4983  sp->all_multi_pos = 0;
4984  }
4985 
4986  if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4987  /* Put the NIC into promiscuous mode */
4988  add = &bar0->mac_cfg;
4989  val64 = readq(&bar0->mac_cfg);
4990  val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4991 
4992  writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4993  writel((u32)val64, add);
4994  writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4995  writel((u32) (val64 >> 32), (add + 4));
4996 
4997  if (vlan_tag_strip != 1) {
4998  val64 = readq(&bar0->rx_pa_cfg);
4999  val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5000  writeq(val64, &bar0->rx_pa_cfg);
5001  sp->vlan_strip_flag = 0;
5002  }
5003 
5004  val64 = readq(&bar0->mac_cfg);
5005  sp->promisc_flg = 1;
5006  DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5007  dev->name);
5008  } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5009  /* Remove the NIC from promiscuous mode */
5010  add = &bar0->mac_cfg;
5011  val64 = readq(&bar0->mac_cfg);
5012  val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5013 
5014  writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5015  writel((u32)val64, add);
5016  writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5017  writel((u32) (val64 >> 32), (add + 4));
5018 
5019  if (vlan_tag_strip != 0) {
5020  val64 = readq(&bar0->rx_pa_cfg);
5021  val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5022  writeq(val64, &bar0->rx_pa_cfg);
5023  sp->vlan_strip_flag = 1;
5024  }
5025 
5026  val64 = readq(&bar0->mac_cfg);
5027  sp->promisc_flg = 0;
5028  DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
5029  }
5030 
5031  /* Update individual M_CAST address list */
5032  if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5033  if (netdev_mc_count(dev) >
5034  (config->max_mc_addr - config->max_mac_addr)) {
5036  "%s: No more Rx filters can be added - "
5037  "please enable ALL_MULTI instead\n",
5038  dev->name);
5039  return;
5040  }
5041 
5042  prev_cnt = sp->mc_addr_count;
5043  sp->mc_addr_count = netdev_mc_count(dev);
5044 
5045  /* Clear out the previous list of Mc in the H/W. */
5046  for (i = 0; i < prev_cnt; i++) {
5047  writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5048  &bar0->rmac_addr_data0_mem);
5050  &bar0->rmac_addr_data1_mem);
5051  val64 = RMAC_ADDR_CMD_MEM_WE |
5054  (config->mc_start_offset + i);
5055  writeq(val64, &bar0->rmac_addr_cmd_mem);
5056 
5057  /* Wait for command completes */
5058  if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5060  S2IO_BIT_RESET)) {
5062  "%s: Adding Multicasts failed\n",
5063  dev->name);
5064  return;
5065  }
5066  }
5067 
5068  /* Create the new Rx filter list and update the same in H/W. */
5069  i = 0;
5070  netdev_for_each_mc_addr(ha, dev) {
5071  mac_addr = 0;
5072  for (j = 0; j < ETH_ALEN; j++) {
5073  mac_addr |= ha->addr[j];
5074  mac_addr <<= 8;
5075  }
5076  mac_addr >>= 8;
5077  writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5078  &bar0->rmac_addr_data0_mem);
5080  &bar0->rmac_addr_data1_mem);
5081  val64 = RMAC_ADDR_CMD_MEM_WE |
5084  (i + config->mc_start_offset);
5085  writeq(val64, &bar0->rmac_addr_cmd_mem);
5086 
5087  /* Wait for command completes */
5088  if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5090  S2IO_BIT_RESET)) {
5092  "%s: Adding Multicasts failed\n",
5093  dev->name);
5094  return;
5095  }
5096  i++;
5097  }
5098  }
5099 }
5100 
5101 /* read from CAM unicast & multicast addresses and store it in
5102  * def_mac_addr structure
5103  */
5104 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5105 {
5106  int offset;
5107  u64 mac_addr = 0x0;
5108  struct config_param *config = &sp->config;
5109 
5110  /* store unicast & multicast mac addresses */
5111  for (offset = 0; offset < config->max_mc_addr; offset++) {
5112  mac_addr = do_s2io_read_unicast_mc(sp, offset);
5113  /* if read fails disable the entry */
5114  if (mac_addr == FAILURE)
5115  mac_addr = S2IO_DISABLE_MAC_ENTRY;
5116  do_s2io_copy_mac_addr(sp, offset, mac_addr);
5117  }
5118 }
5119 
5120 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5121 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5122 {
5123  int offset;
5124  struct config_param *config = &sp->config;
5125  /* restore unicast mac address */
5126  for (offset = 0; offset < config->max_mac_addr; offset++)
5127  do_s2io_prog_unicast(sp->dev,
5128  sp->def_mac_addr[offset].mac_addr);
5129 
5130  /* restore multicast mac address */
5131  for (offset = config->mc_start_offset;
5132  offset < config->max_mc_addr; offset++)
5133  do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5134 }
5135 
5136 /* add a multicast MAC address to CAM */
5137 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5138 {
5139  int i;
5140  u64 mac_addr = 0;
5141  struct config_param *config = &sp->config;
5142 
5143  for (i = 0; i < ETH_ALEN; i++) {
5144  mac_addr <<= 8;
5145  mac_addr |= addr[i];
5146  }
5147  if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5148  return SUCCESS;
5149 
5150  /* check if the multicast mac already preset in CAM */
5151  for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5152  u64 tmp64;
5153  tmp64 = do_s2io_read_unicast_mc(sp, i);
5154  if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5155  break;
5156 
5157  if (tmp64 == mac_addr)
5158  return SUCCESS;
5159  }
5160  if (i == config->max_mc_addr) {
5162  "CAM full no space left for multicast MAC\n");
5163  return FAILURE;
5164  }
5165  /* Update the internal structure with this new mac address */
5166  do_s2io_copy_mac_addr(sp, i, mac_addr);
5167 
5168  return do_s2io_add_mac(sp, mac_addr, i);
5169 }
5170 
5171 /* add MAC address to CAM */
5172 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5173 {
5174  u64 val64;
5175  struct XENA_dev_config __iomem *bar0 = sp->bar0;
5176 
5178  &bar0->rmac_addr_data0_mem);
5179 
5182  writeq(val64, &bar0->rmac_addr_cmd_mem);
5183 
5184  /* Wait till command completes */
5185  if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5187  S2IO_BIT_RESET)) {
5188  DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5189  return FAILURE;
5190  }
5191  return SUCCESS;
5192 }
5193 /* deletes a specified unicast/multicast mac entry from CAM */
5194 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5195 {
5196  int offset;
5197  u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5198  struct config_param *config = &sp->config;
5199 
5200  for (offset = 1;
5201  offset < config->max_mc_addr; offset++) {
5202  tmp64 = do_s2io_read_unicast_mc(sp, offset);
5203  if (tmp64 == addr) {
5204  /* disable the entry by writing 0xffffffffffffULL */
5205  if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5206  return FAILURE;
5207  /* store the new mac list from CAM */
5208  do_s2io_store_unicast_mc(sp);
5209  return SUCCESS;
5210  }
5211  }
5212  DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5213  (unsigned long long)addr);
5214  return FAILURE;
5215 }
5216 
5217 /* read mac entries from CAM */
5218 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5219 {
5220  u64 tmp64 = 0xffffffffffff0000ULL, val64;
5221  struct XENA_dev_config __iomem *bar0 = sp->bar0;
5222 
5223  /* read mac addr */
5225  RMAC_ADDR_CMD_MEM_OFFSET(offset);
5226  writeq(val64, &bar0->rmac_addr_cmd_mem);
5227 
5228  /* Wait till command completes */
5229  if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5231  S2IO_BIT_RESET)) {
5232  DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5233  return FAILURE;
5234  }
5235  tmp64 = readq(&bar0->rmac_addr_data0_mem);
5236 
5237  return tmp64 >> 16;
5238 }
5239 
5244 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5245 {
5246  struct sockaddr *addr = p;
5247 
5248  if (!is_valid_ether_addr(addr->sa_data))
5249  return -EADDRNOTAVAIL;
5250 
5251  memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5252 
5253  /* store the MAC address in CAM */
5254  return do_s2io_prog_unicast(dev, dev->dev_addr);
5255 }
5266 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5267 {
5268  struct s2io_nic *sp = netdev_priv(dev);
5269  register u64 mac_addr = 0, perm_addr = 0;
5270  int i;
5271  u64 tmp64;
5272  struct config_param *config = &sp->config;
5273 
5274  /*
5275  * Set the new MAC address as the new unicast filter and reflect this
5276  * change on the device address registered with the OS. It will be
5277  * at offset 0.
5278  */
5279  for (i = 0; i < ETH_ALEN; i++) {
5280  mac_addr <<= 8;
5281  mac_addr |= addr[i];
5282  perm_addr <<= 8;
5283  perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5284  }
5285 
5286  /* check if the dev_addr is different than perm_addr */
5287  if (mac_addr == perm_addr)
5288  return SUCCESS;
5289 
5290  /* check if the mac already preset in CAM */
5291  for (i = 1; i < config->max_mac_addr; i++) {
5292  tmp64 = do_s2io_read_unicast_mc(sp, i);
5293  if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5294  break;
5295 
5296  if (tmp64 == mac_addr) {
5298  "MAC addr:0x%llx already present in CAM\n",
5299  (unsigned long long)mac_addr);
5300  return SUCCESS;
5301  }
5302  }
5303  if (i == config->max_mac_addr) {
5304  DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5305  return FAILURE;
5306  }
5307  /* Update the internal structure with this new mac address */
5308  do_s2io_copy_mac_addr(sp, i, mac_addr);
5309 
5310  return do_s2io_add_mac(sp, mac_addr, i);
5311 }
5312 
5325 static int s2io_ethtool_sset(struct net_device *dev,
5326  struct ethtool_cmd *info)
5327 {
5328  struct s2io_nic *sp = netdev_priv(dev);
5329  if ((info->autoneg == AUTONEG_ENABLE) ||
5330  (ethtool_cmd_speed(info) != SPEED_10000) ||
5331  (info->duplex != DUPLEX_FULL))
5332  return -EINVAL;
5333  else {
5334  s2io_close(sp->dev);
5335  s2io_open(sp->dev);
5336  }
5337 
5338  return 0;
5339 }
5340 
5353 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5354 {
5355  struct s2io_nic *sp = netdev_priv(dev);
5358  info->port = PORT_FIBRE;
5359 
5360  /* info->transceiver */
5361  info->transceiver = XCVR_EXTERNAL;
5362 
5363  if (netif_carrier_ok(sp->dev)) {
5364  ethtool_cmd_speed_set(info, SPEED_10000);
5365  info->duplex = DUPLEX_FULL;
5366  } else {
5367  ethtool_cmd_speed_set(info, -1);
5368  info->duplex = -1;
5369  }
5370 
5371  info->autoneg = AUTONEG_DISABLE;
5372  return 0;
5373 }
5374 
5387 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5388  struct ethtool_drvinfo *info)
5389 {
5390  struct s2io_nic *sp = netdev_priv(dev);
5391 
5392  strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5393  strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5394  strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5395  info->regdump_len = XENA_REG_SPACE;
5396  info->eedump_len = XENA_EEPROM_SPACE;
5397 }
5398 
5413 static void s2io_ethtool_gregs(struct net_device *dev,
5414  struct ethtool_regs *regs, void *space)
5415 {
5416  int i;
5417  u64 reg;
5418  u8 *reg_space = (u8 *)space;
5419  struct s2io_nic *sp = netdev_priv(dev);
5420 
5421  regs->len = XENA_REG_SPACE;
5422  regs->version = sp->pdev->subsystem_device;
5423 
5424  for (i = 0; i < regs->len; i += 8) {
5425  reg = readq(sp->bar0 + i);
5426  memcpy((reg_space + i), &reg, 8);
5427  }
5428 }
5429 
5430 /*
5431  * s2io_set_led - control NIC led
5432  */
5433 static void s2io_set_led(struct s2io_nic *sp, bool on)
5434 {
5435  struct XENA_dev_config __iomem *bar0 = sp->bar0;
5436  u16 subid = sp->pdev->subsystem_device;
5437  u64 val64;
5438 
5439  if ((sp->device_type == XFRAME_II_DEVICE) ||
5440  ((subid & 0xFF) >= 0x07)) {
5441  val64 = readq(&bar0->gpio_control);
5442  if (on)
5443  val64 |= GPIO_CTRL_GPIO_0;
5444  else
5445  val64 &= ~GPIO_CTRL_GPIO_0;
5446 
5447  writeq(val64, &bar0->gpio_control);
5448  } else {
5449  val64 = readq(&bar0->adapter_control);
5450  if (on)
5451  val64 |= ADAPTER_LED_ON;
5452  else
5453  val64 &= ~ADAPTER_LED_ON;
5454 
5455  writeq(val64, &bar0->adapter_control);
5456  }
5457 
5458 }
5459 
5472 static int s2io_ethtool_set_led(struct net_device *dev,
5474 {
5475  struct s2io_nic *sp = netdev_priv(dev);
5476  struct XENA_dev_config __iomem *bar0 = sp->bar0;
5477  u16 subid = sp->pdev->subsystem_device;
5478 
5479  if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5480  u64 val64 = readq(&bar0->adapter_control);
5481  if (!(val64 & ADAPTER_CNTL_EN)) {
5482  pr_err("Adapter Link down, cannot blink LED\n");
5483  return -EAGAIN;
5484  }
5485  }
5486 
5487  switch (state) {
5488  case ETHTOOL_ID_ACTIVE:
5489  sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5490  return 1; /* cycle on/off once per second */
5491 
5492  case ETHTOOL_ID_ON:
5493  s2io_set_led(sp, true);
5494  break;
5495 
5496  case ETHTOOL_ID_OFF:
5497  s2io_set_led(sp, false);
5498  break;
5499 
5500  case ETHTOOL_ID_INACTIVE:
5502  writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5503  }
5504 
5505  return 0;
5506 }
5507 
5508 static void s2io_ethtool_gringparam(struct net_device *dev,
5509  struct ethtool_ringparam *ering)
5510 {
5511  struct s2io_nic *sp = netdev_priv(dev);
5512  int i, tx_desc_count = 0, rx_desc_count = 0;
5513 
5514  if (sp->rxd_mode == RXD_MODE_1) {
5515  ering->rx_max_pending = MAX_RX_DESC_1;
5517  } else {
5518  ering->rx_max_pending = MAX_RX_DESC_2;
5520  }
5521 
5522  ering->tx_max_pending = MAX_TX_DESC;
5523 
5524  for (i = 0; i < sp->config.rx_ring_num; i++)
5525  rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5526  ering->rx_pending = rx_desc_count;
5527  ering->rx_jumbo_pending = rx_desc_count;
5528 
5529  for (i = 0; i < sp->config.tx_fifo_num; i++)
5530  tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5531  ering->tx_pending = tx_desc_count;
5532  DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5533 }
5534 
5545 static void s2io_ethtool_getpause_data(struct net_device *dev,
5546  struct ethtool_pauseparam *ep)
5547 {
5548  u64 val64;
5549  struct s2io_nic *sp = netdev_priv(dev);
5550  struct XENA_dev_config __iomem *bar0 = sp->bar0;
5551 
5552  val64 = readq(&bar0->rmac_pause_cfg);
5553  if (val64 & RMAC_PAUSE_GEN_ENABLE)
5554  ep->tx_pause = true;
5555  if (val64 & RMAC_PAUSE_RX_ENABLE)
5556  ep->rx_pause = true;
5557  ep->autoneg = false;
5558 }
5559 
5572 static int s2io_ethtool_setpause_data(struct net_device *dev,
5573  struct ethtool_pauseparam *ep)
5574 {
5575  u64 val64;
5576  struct s2io_nic *sp = netdev_priv(dev);
5577  struct XENA_dev_config __iomem *bar0 = sp->bar0;
5578 
5579  val64 = readq(&bar0->rmac_pause_cfg);
5580  if (ep->tx_pause)
5581  val64 |= RMAC_PAUSE_GEN_ENABLE;
5582  else
5583  val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5584  if (ep->rx_pause)
5585  val64 |= RMAC_PAUSE_RX_ENABLE;
5586  else
5587  val64 &= ~RMAC_PAUSE_RX_ENABLE;
5588  writeq(val64, &bar0->rmac_pause_cfg);
5589  return 0;
5590 }
5591 
5608 #define S2IO_DEV_ID 5
5609 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5610 {
5611  int ret = -1;
5612  u32 exit_cnt = 0;
5613  u64 val64;
5614  struct XENA_dev_config __iomem *bar0 = sp->bar0;
5615 
5616  if (sp->device_type == XFRAME_I_DEVICE) {
5617  val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5618  I2C_CONTROL_ADDR(off) |
5619  I2C_CONTROL_BYTE_CNT(0x3) |
5622  SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5623 
5624  while (exit_cnt < 5) {
5625  val64 = readq(&bar0->i2c_control);
5626  if (I2C_CONTROL_CNTL_END(val64)) {
5627  *data = I2C_CONTROL_GET_DATA(val64);
5628  ret = 0;
5629  break;
5630  }
5631  msleep(50);
5632  exit_cnt++;
5633  }
5634  }
5635 
5636  if (sp->device_type == XFRAME_II_DEVICE) {
5637  val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5638  SPI_CONTROL_BYTECNT(0x3) |
5639  SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5640  SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5641  val64 |= SPI_CONTROL_REQ;
5642  SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5643  while (exit_cnt < 5) {
5644  val64 = readq(&bar0->spi_control);
5645  if (val64 & SPI_CONTROL_NACK) {
5646  ret = 1;
5647  break;
5648  } else if (val64 & SPI_CONTROL_DONE) {
5649  *data = readq(&bar0->spi_data);
5650  *data &= 0xffffff;
5651  ret = 0;
5652  break;
5653  }
5654  msleep(50);
5655  exit_cnt++;
5656  }
5657  }
5658  return ret;
5659 }
5660 
5676 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5677 {
5678  int exit_cnt = 0, ret = -1;
5679  u64 val64;
5680  struct XENA_dev_config __iomem *bar0 = sp->bar0;
5681 
5682  if (sp->device_type == XFRAME_I_DEVICE) {
5683  val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5684  I2C_CONTROL_ADDR(off) |
5685  I2C_CONTROL_BYTE_CNT(cnt) |
5686  I2C_CONTROL_SET_DATA((u32)data) |
5688  SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5689 
5690  while (exit_cnt < 5) {
5691  val64 = readq(&bar0->i2c_control);
5692  if (I2C_CONTROL_CNTL_END(val64)) {
5693  if (!(val64 & I2C_CONTROL_NACK))
5694  ret = 0;
5695  break;
5696  }
5697  msleep(50);
5698  exit_cnt++;
5699  }
5700  }
5701 
5702  if (sp->device_type == XFRAME_II_DEVICE) {
5703  int write_cnt = (cnt == 8) ? 0 : cnt;
5704  writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5705 
5706  val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5707  SPI_CONTROL_BYTECNT(write_cnt) |
5708  SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5709  SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5710  val64 |= SPI_CONTROL_REQ;
5711  SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5712  while (exit_cnt < 5) {
5713  val64 = readq(&bar0->spi_control);
5714  if (val64 & SPI_CONTROL_NACK) {
5715  ret = 1;
5716  break;
5717  } else if (val64 & SPI_CONTROL_DONE) {
5718  ret = 0;
5719  break;
5720  }
5721  msleep(50);
5722  exit_cnt++;
5723  }
5724  }
5725  return ret;
5726 }
5727 static void s2io_vpd_read(struct s2io_nic *nic)
5728 {
5729  u8 *vpd_data;
5730  u8 data;
5731  int i = 0, cnt, len, fail = 0;
5732  int vpd_addr = 0x80;
5733  struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5734 
5735  if (nic->device_type == XFRAME_II_DEVICE) {
5736  strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5737  vpd_addr = 0x80;
5738  } else {
5739  strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5740  vpd_addr = 0x50;
5741  }
5742  strcpy(nic->serial_num, "NOT AVAILABLE");
5743 
5744  vpd_data = kmalloc(256, GFP_KERNEL);
5745  if (!vpd_data) {
5746  swstats->mem_alloc_fail_cnt++;
5747  return;
5748  }
5749  swstats->mem_allocated += 256;
5750 
5751  for (i = 0; i < 256; i += 4) {
5752  pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5753  pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5754  pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5755  for (cnt = 0; cnt < 5; cnt++) {
5756  msleep(2);
5757  pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5758  if (data == 0x80)
5759  break;
5760  }
5761  if (cnt >= 5) {
5762  DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5763  fail = 1;
5764  break;
5765  }
5766  pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5767  (u32 *)&vpd_data[i]);
5768  }
5769 
5770  if (!fail) {
5771  /* read serial number of adapter */
5772  for (cnt = 0; cnt < 252; cnt++) {
5773  if ((vpd_data[cnt] == 'S') &&
5774  (vpd_data[cnt+1] == 'N')) {
5775  len = vpd_data[cnt+2];
5776  if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5777  memcpy(nic->serial_num,
5778  &vpd_data[cnt + 3],
5779  len);
5780  memset(nic->serial_num+len,
5781  0,
5782  VPD_STRING_LEN-len);
5783  break;
5784  }
5785  }
5786  }
5787  }
5788 
5789  if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5790  len = vpd_data[1];
5791  memcpy(nic->product_name, &vpd_data[3], len);
5792  nic->product_name[len] = 0;
5793  }
5794  kfree(vpd_data);
5795  swstats->mem_freed += 256;
5796 }
5797 
5811 static int s2io_ethtool_geeprom(struct net_device *dev,
5812  struct ethtool_eeprom *eeprom, u8 * data_buf)
5813 {
5814  u32 i, valid;
5815  u64 data;
5816  struct s2io_nic *sp = netdev_priv(dev);
5817 
5818  eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5819 
5820  if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5821  eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5822 
5823  for (i = 0; i < eeprom->len; i += 4) {
5824  if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5825  DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5826  return -EFAULT;
5827  }
5828  valid = INV(data);
5829  memcpy((data_buf + i), &valid, 4);
5830  }
5831  return 0;
5832 }
5833 
5848 static int s2io_ethtool_seeprom(struct net_device *dev,
5849  struct ethtool_eeprom *eeprom,
5850  u8 *data_buf)
5851 {
5852  int len = eeprom->len, cnt = 0;
5853  u64 valid = 0, data;
5854  struct s2io_nic *sp = netdev_priv(dev);
5855 
5856  if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5858  "ETHTOOL_WRITE_EEPROM Err: "
5859  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5860  (sp->pdev->vendor | (sp->pdev->device << 16)),
5861  eeprom->magic);
5862  return -EFAULT;
5863  }
5864 
5865  while (len) {
5866  data = (u32)data_buf[cnt] & 0x000000FF;
5867  if (data)
5868  valid = (u32)(data << 24);
5869  else
5870  valid = data;
5871 
5872  if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5874  "ETHTOOL_WRITE_EEPROM Err: "
5875  "Cannot write into the specified offset\n");
5876  return -EFAULT;
5877  }
5878  cnt++;
5879  len--;
5880  }
5881 
5882  return 0;
5883 }
5884 
5898 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5899 {
5900  struct XENA_dev_config __iomem *bar0 = sp->bar0;
5901  u64 val64 = 0, exp_val;
5902  int fail = 0;
5903 
5904  val64 = readq(&bar0->pif_rd_swapper_fb);
5905  if (val64 != 0x123456789abcdefULL) {
5906  fail = 1;
5907  DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5908  }
5909 
5910  val64 = readq(&bar0->rmac_pause_cfg);
5911  if (val64 != 0xc000ffff00000000ULL) {
5912  fail = 1;
5913  DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5914  }
5915 
5916  val64 = readq(&bar0->rx_queue_cfg);
5917  if (sp->device_type == XFRAME_II_DEVICE)
5918  exp_val = 0x0404040404040404ULL;
5919  else
5920  exp_val = 0x0808080808080808ULL;
5921  if (val64 != exp_val) {
5922  fail = 1;
5923  DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5924  }
5925 
5926  val64 = readq(&bar0->xgxs_efifo_cfg);
5927  if (val64 != 0x000000001923141EULL) {
5928  fail = 1;
5929  DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5930  }
5931 
5932  val64 = 0x5A5A5A5A5A5A5A5AULL;
5933  writeq(val64, &bar0->xmsi_data);
5934  val64 = readq(&bar0->xmsi_data);
5935  if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5936  fail = 1;
5937  DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5938  }
5939 
5940  val64 = 0xA5A5A5A5A5A5A5A5ULL;
5941  writeq(val64, &bar0->xmsi_data);
5942  val64 = readq(&bar0->xmsi_data);
5943  if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5944  fail = 1;
5945  DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5946  }
5947 
5948  *data = fail;
5949  return fail;
5950 }
5951 
5965 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5966 {
5967  int fail = 0;
5968  u64 ret_data, org_4F0, org_7F0;
5969  u8 saved_4F0 = 0, saved_7F0 = 0;
5970  struct net_device *dev = sp->dev;
5971 
5972  /* Test Write Error at offset 0 */
5973  /* Note that SPI interface allows write access to all areas
5974  * of EEPROM. Hence doing all negative testing only for Xframe I.
5975  */
5976  if (sp->device_type == XFRAME_I_DEVICE)
5977  if (!write_eeprom(sp, 0, 0, 3))
5978  fail = 1;
5979 
5980  /* Save current values at offsets 0x4F0 and 0x7F0 */
5981  if (!read_eeprom(sp, 0x4F0, &org_4F0))
5982  saved_4F0 = 1;
5983  if (!read_eeprom(sp, 0x7F0, &org_7F0))
5984  saved_7F0 = 1;
5985 
5986  /* Test Write at offset 4f0 */
5987  if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5988  fail = 1;
5989  if (read_eeprom(sp, 0x4F0, &ret_data))
5990  fail = 1;
5991 
5992  if (ret_data != 0x012345) {
5993  DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5994  "Data written %llx Data read %llx\n",
5995  dev->name, (unsigned long long)0x12345,
5996  (unsigned long long)ret_data);
5997  fail = 1;
5998  }
5999 
6000  /* Reset the EEPROM data go FFFF */
6001  write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6002 
6003  /* Test Write Request Error at offset 0x7c */
6004  if (sp->device_type == XFRAME_I_DEVICE)
6005  if (!write_eeprom(sp, 0x07C, 0, 3))
6006  fail = 1;
6007 
6008  /* Test Write Request at offset 0x7f0 */
6009  if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6010  fail = 1;
6011  if (read_eeprom(sp, 0x7F0, &ret_data))
6012  fail = 1;
6013 
6014  if (ret_data != 0x012345) {
6015  DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6016  "Data written %llx Data read %llx\n",
6017  dev->name, (unsigned long long)0x12345,
6018  (unsigned long long)ret_data);
6019  fail = 1;
6020  }
6021 
6022  /* Reset the EEPROM data go FFFF */
6023  write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6024 
6025  if (sp->device_type == XFRAME_I_DEVICE) {
6026  /* Test Write Error at offset 0x80 */
6027  if (!write_eeprom(sp, 0x080, 0, 3))
6028  fail = 1;
6029 
6030  /* Test Write Error at offset 0xfc */
6031  if (!write_eeprom(sp, 0x0FC, 0, 3))
6032  fail = 1;
6033 
6034  /* Test Write Error at offset 0x100 */
6035  if (!write_eeprom(sp, 0x100, 0, 3))
6036  fail = 1;
6037 
6038  /* Test Write Error at offset 4ec */
6039  if (!write_eeprom(sp, 0x4EC, 0, 3))
6040  fail = 1;
6041  }
6042 
6043  /* Restore values at offsets 0x4F0 and 0x7F0 */
6044  if (saved_4F0)
6045  write_eeprom(sp, 0x4F0, org_4F0, 3);
6046  if (saved_7F0)
6047  write_eeprom(sp, 0x7F0, org_7F0, 3);
6048 
6049  *data = fail;
6050  return fail;
6051 }
6052 
6067 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6068 {
6069  u8 bist = 0;
6070  int cnt = 0, ret = -1;
6071 
6072  pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6073  bist |= PCI_BIST_START;
6074  pci_write_config_word(sp->pdev, PCI_BIST, bist);
6075 
6076  while (cnt < 20) {
6077  pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6078  if (!(bist & PCI_BIST_START)) {
6079  *data = (bist & PCI_BIST_CODE_MASK);
6080  ret = 0;
6081  break;
6082  }
6083  msleep(100);
6084  cnt++;
6085  }
6086 
6087  return ret;
6088 }
6089 
6103 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6104 {
6105  struct XENA_dev_config __iomem *bar0 = sp->bar0;
6106  u64 val64;
6107 
6108  val64 = readq(&bar0->adapter_status);
6109  if (!(LINK_IS_UP(val64)))
6110  *data = 1;
6111  else
6112  *data = 0;
6113 
6114  return *data;
6115 }
6116 
6130 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6131 {
6132  struct XENA_dev_config __iomem *bar0 = sp->bar0;
6133  u64 val64;
6134  int cnt, iteration = 0, test_fail = 0;
6135 
6136  val64 = readq(&bar0->adapter_control);
6137  val64 &= ~ADAPTER_ECC_EN;
6138  writeq(val64, &bar0->adapter_control);
6139 
6140  val64 = readq(&bar0->mc_rldram_test_ctrl);
6141  val64 |= MC_RLDRAM_TEST_MODE;
6142  SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6143 
6144  val64 = readq(&bar0->mc_rldram_mrs);
6145  val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6146  SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6147 
6148  val64 |= MC_RLDRAM_MRS_ENABLE;
6149  SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6150 
6151  while (iteration < 2) {
6152  val64 = 0x55555555aaaa0000ULL;
6153  if (iteration == 1)
6154  val64 ^= 0xFFFFFFFFFFFF0000ULL;
6155  writeq(val64, &bar0->mc_rldram_test_d0);
6156 
6157  val64 = 0xaaaa5a5555550000ULL;
6158  if (iteration == 1)
6159  val64 ^= 0xFFFFFFFFFFFF0000ULL;
6160  writeq(val64, &bar0->mc_rldram_test_d1);
6161 
6162  val64 = 0x55aaaaaaaa5a0000ULL;
6163  if (iteration == 1)
6164  val64 ^= 0xFFFFFFFFFFFF0000ULL;
6165  writeq(val64, &bar0->mc_rldram_test_d2);
6166 
6167  val64 = (u64) (0x0000003ffffe0100ULL);
6168  writeq(val64, &bar0->mc_rldram_test_add);
6169 
6170  val64 = MC_RLDRAM_TEST_MODE |
6173  SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6174 
6175  for (cnt = 0; cnt < 5; cnt++) {
6176  val64 = readq(&bar0->mc_rldram_test_ctrl);
6177  if (val64 & MC_RLDRAM_TEST_DONE)
6178  break;
6179  msleep(200);
6180  }
6181 
6182  if (cnt == 5)
6183  break;
6184 
6186  SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6187 
6188  for (cnt = 0; cnt < 5; cnt++) {
6189  val64 = readq(&bar0->mc_rldram_test_ctrl);
6190  if (val64 & MC_RLDRAM_TEST_DONE)
6191  break;
6192  msleep(500);
6193  }
6194 
6195  if (cnt == 5)
6196  break;
6197 
6198  val64 = readq(&bar0->mc_rldram_test_ctrl);
6199  if (!(val64 & MC_RLDRAM_TEST_PASS))
6200  test_fail = 1;
6201 
6202  iteration++;
6203  }
6204 
6205  *data = test_fail;
6206 
6207  /* Bring the adapter out of test mode */
6208  SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6209 
6210  return test_fail;
6211 }
6212 
6228 static void s2io_ethtool_test(struct net_device *dev,
6229  struct ethtool_test *ethtest,
6230  uint64_t *data)
6231 {
6232  struct s2io_nic *sp = netdev_priv(dev);
6233  int orig_state = netif_running(sp->dev);
6234 
6235  if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6236  /* Offline Tests. */
6237  if (orig_state)
6238  s2io_close(sp->dev);
6239 
6240  if (s2io_register_test(sp, &data[0]))
6241  ethtest->flags |= ETH_TEST_FL_FAILED;
6242 
6243  s2io_reset(sp);
6244 
6245  if (s2io_rldram_test(sp, &data[3]))
6246  ethtest->flags |= ETH_TEST_FL_FAILED;
6247 
6248  s2io_reset(sp);
6249 
6250  if (s2io_eeprom_test(sp, &data[1]))
6251  ethtest->flags |= ETH_TEST_FL_FAILED;
6252 
6253  if (s2io_bist_test(sp, &data[4]))
6254  ethtest->flags |= ETH_TEST_FL_FAILED;
6255 
6256  if (orig_state)
6257  s2io_open(sp->dev);
6258 
6259  data[2] = 0;
6260  } else {
6261  /* Online Tests. */
6262  if (!orig_state) {
6263  DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6264  dev->name);
6265  data[0] = -1;
6266  data[1] = -1;
6267  data[2] = -1;
6268  data[3] = -1;
6269  data[4] = -1;
6270  }
6271 
6272  if (s2io_link_test(sp, &data[2]))
6273  ethtest->flags |= ETH_TEST_FL_FAILED;
6274 
6275  data[0] = 0;
6276  data[1] = 0;
6277  data[3] = 0;
6278  data[4] = 0;
6279  }
6280 }
6281 
6282 static void s2io_get_ethtool_stats(struct net_device *dev,
6283  struct ethtool_stats *estats,
6284  u64 *tmp_stats)
6285 {
6286  int i = 0, k;
6287  struct s2io_nic *sp = netdev_priv(dev);
6288  struct stat_block *stats = sp->mac_control.stats_info;
6289  struct swStat *swstats = &stats->sw_stat;
6290  struct xpakStat *xstats = &stats->xpak_stat;
6291 
6292  s2io_updt_stats(sp);
6293  tmp_stats[i++] =
6294  (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6295  le32_to_cpu(stats->tmac_frms);
6296  tmp_stats[i++] =
6297  (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6298  le32_to_cpu(stats->tmac_data_octets);
6299  tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6300  tmp_stats[i++] =
6301  (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6302  le32_to_cpu(stats->tmac_mcst_frms);
6303  tmp_stats[i++] =
6304  (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6305  le32_to_cpu(stats->tmac_bcst_frms);
6306  tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6307  tmp_stats[i++] =
6308  (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6309  le32_to_cpu(stats->tmac_ttl_octets);
6310  tmp_stats[i++] =
6311  (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6312  le32_to_cpu(stats->tmac_ucst_frms);
6313  tmp_stats[i++] =
6314  (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6315  le32_to_cpu(stats->tmac_nucst_frms);
6316  tmp_stats[i++] =
6317  (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6319  tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6320  tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6321  tmp_stats[i++] =
6322  (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6323  le32_to_cpu(stats->tmac_vld_ip);
6324  tmp_stats[i++] =
6325  (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6326  le32_to_cpu(stats->tmac_drop_ip);
6327  tmp_stats[i++] =
6328  (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6329  le32_to_cpu(stats->tmac_icmp);
6330  tmp_stats[i++] =
6331  (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6332  le32_to_cpu(stats->tmac_rst_tcp);
6333  tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6334  tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6335  le32_to_cpu(stats->tmac_udp);
6336  tmp_stats[i++] =
6337  (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6338  le32_to_cpu(stats->rmac_vld_frms);
6339  tmp_stats[i++] =
6340  (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6341  le32_to_cpu(stats->rmac_data_octets);
6342  tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6343  tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6344  tmp_stats[i++] =
6345  (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6347  tmp_stats[i++] =
6348  (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6350  tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6351  tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6352  tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6353  tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6354  tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6355  tmp_stats[i++] =
6356  (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6357  le32_to_cpu(stats->rmac_ttl_octets);
6358  tmp_stats[i++] =
6361  tmp_stats[i++] =
6363  << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6364  tmp_stats[i++] =
6365  (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6367  tmp_stats[i++] =
6369  << 32 | le32_to_cpu(stats->rmac_drop_events);
6370  tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6371  tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6372  tmp_stats[i++] =
6373  (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6374  le32_to_cpu(stats->rmac_usized_frms);
6375  tmp_stats[i++] =
6376  (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6377  le32_to_cpu(stats->rmac_osized_frms);
6378  tmp_stats[i++] =
6379  (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6380  le32_to_cpu(stats->rmac_frag_frms);
6381  tmp_stats[i++] =
6382  (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6383  le32_to_cpu(stats->rmac_jabber_frms);
6384  tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6385  tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6386  tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6387  tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6388  tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6389  tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6390  tmp_stats[i++] =
6391  (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6392  le32_to_cpu(stats->rmac_ip);
6393  tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6394  tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6395  tmp_stats[i++] =
6396  (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6397  le32_to_cpu(stats->rmac_drop_ip);
6398  tmp_stats[i++] =
6399  (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6400  le32_to_cpu(stats->rmac_icmp);
6401  tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6402  tmp_stats[i++] =
6403  (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6404  le32_to_cpu(stats->rmac_udp);
6405  tmp_stats[i++] =
6406  (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6407  le32_to_cpu(stats->rmac_err_drp_udp);
6408  tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6409  tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6410  tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6411  tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6412  tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6413  tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6414  tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6415  tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6416  tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6417  tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6418  tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6419  tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6420  tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6421  tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6422  tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6423  tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6424  tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6425  tmp_stats[i++] =
6426  (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6427  le32_to_cpu(stats->rmac_pause_cnt);
6428  tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6429  tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6430  tmp_stats[i++] =
6431  (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6432  le32_to_cpu(stats->rmac_accepted_ip);
6433  tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6434  tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6435  tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6436  tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6437  tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6438  tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6439  tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6440  tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6441  tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6442  tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6443  tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6444  tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6445  tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6446  tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6447  tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6448  tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6449  tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6450  tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6451  tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6452 
6453  /* Enhanced statistics exist only for Hercules */
6454  if (sp->device_type == XFRAME_II_DEVICE) {
6455  tmp_stats[i++] =
6457  tmp_stats[i++] =
6459  tmp_stats[i++] =
6461  tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6462  tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6463  tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6464  tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6465  tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6466  tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6467  tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6468  tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6469  tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6470  tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6471  tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6472  tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6473  tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6474  }
6475 
6476  tmp_stats[i++] = 0;
6477  tmp_stats[i++] = swstats->single_ecc_errs;
6478  tmp_stats[i++] = swstats->double_ecc_errs;
6479  tmp_stats[i++] = swstats->parity_err_cnt;
6480  tmp_stats[i++] = swstats->serious_err_cnt;
6481  tmp_stats[i++] = swstats->soft_reset_cnt;
6482  tmp_stats[i++] = swstats->fifo_full_cnt;
6483  for (k = 0; k < MAX_RX_RINGS; k++)
6484  tmp_stats[i++] = swstats->ring_full_cnt[k];
6485  tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6486  tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6487  tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6488  tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6489  tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6490  tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6491  tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6492  tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6493  tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6494  tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6495  tmp_stats[i++] = xstats->warn_laser_output_power_high;
6496  tmp_stats[i++] = xstats->warn_laser_output_power_low;
6497  tmp_stats[i++] = swstats->clubbed_frms_cnt;
6498  tmp_stats[i++] = swstats->sending_both;
6499  tmp_stats[i++] = swstats->outof_sequence_pkts;
6500  tmp_stats[i++] = swstats->flush_max_pkts;
6501  if (swstats->num_aggregations) {
6502  u64 tmp = swstats->sum_avg_pkts_aggregated;
6503  int count = 0;
6504  /*
6505  * Since 64-bit divide does not work on all platforms,
6506  * do repeated subtraction.
6507  */
6508  while (tmp >= swstats->num_aggregations) {
6509  tmp -= swstats->num_aggregations;
6510  count++;
6511  }
6512  tmp_stats[i++] = count;
6513  } else
6514  tmp_stats[i++] = 0;
6515  tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6516  tmp_stats[i++] = swstats->pci_map_fail_cnt;
6517  tmp_stats[i++] = swstats->watchdog_timer_cnt;
6518  tmp_stats[i++] = swstats->mem_allocated;
6519  tmp_stats[i++] = swstats->mem_freed;
6520  tmp_stats[i++] = swstats->link_up_cnt;
6521  tmp_stats[i++] = swstats->link_down_cnt;
6522  tmp_stats[i++] = swstats->link_up_time;
6523  tmp_stats[i++] = swstats->link_down_time;
6524 
6525  tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6526  tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6527  tmp_stats[i++] = swstats->tx_parity_err_cnt;
6528  tmp_stats[i++] = swstats->tx_link_loss_cnt;
6529  tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6530 
6531  tmp_stats[i++] = swstats->rx_parity_err_cnt;
6532  tmp_stats[i++] = swstats->rx_abort_cnt;
6533  tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6534  tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6535  tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6536  tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6537  tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6538  tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6539  tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6540  tmp_stats[i++] = swstats->tda_err_cnt;
6541  tmp_stats[i++] = swstats->pfc_err_cnt;
6542  tmp_stats[i++] = swstats->pcc_err_cnt;
6543  tmp_stats[i++] = swstats->tti_err_cnt;
6544  tmp_stats[i++] = swstats->tpa_err_cnt;
6545  tmp_stats[i++] = swstats->sm_err_cnt;
6546  tmp_stats[i++] = swstats->lso_err_cnt;
6547  tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6548  tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6549  tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6550  tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6551  tmp_stats[i++] = swstats->rc_err_cnt;
6552  tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6553  tmp_stats[i++] = swstats->rpa_err_cnt;
6554  tmp_stats[i++] = swstats->rda_err_cnt;
6555  tmp_stats[i++] = swstats->rti_err_cnt;
6556  tmp_stats[i++] = swstats->mc_err_cnt;
6557 }
6558 
6559 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6560 {
6561  return XENA_REG_SPACE;
6562 }
6563 
6564 
6565 static int s2io_get_eeprom_len(struct net_device *dev)
6566 {
6567  return XENA_EEPROM_SPACE;
6568 }
6569 
6570 static int s2io_get_sset_count(struct net_device *dev, int sset)
6571 {
6572  struct s2io_nic *sp = netdev_priv(dev);
6573 
6574  switch (sset) {
6575  case ETH_SS_TEST:
6576  return S2IO_TEST_LEN;
6577  case ETH_SS_STATS:
6578  switch (sp->device_type) {
6579  case XFRAME_I_DEVICE:
6580  return XFRAME_I_STAT_LEN;
6581  case XFRAME_II_DEVICE:
6582  return XFRAME_II_STAT_LEN;
6583  default:
6584  return 0;
6585  }
6586  default:
6587  return -EOPNOTSUPP;
6588  }
6589 }
6590 
6591 static void s2io_ethtool_get_strings(struct net_device *dev,
6592  u32 stringset, u8 *data)
6593 {
6594  int stat_size = 0;
6595  struct s2io_nic *sp = netdev_priv(dev);
6596 
6597  switch (stringset) {
6598  case ETH_SS_TEST:
6599  memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6600  break;
6601  case ETH_SS_STATS:
6602  stat_size = sizeof(ethtool_xena_stats_keys);
6603  memcpy(data, &ethtool_xena_stats_keys, stat_size);
6604  if (sp->device_type == XFRAME_II_DEVICE) {
6605  memcpy(data + stat_size,
6606  &ethtool_enhanced_stats_keys,
6607  sizeof(ethtool_enhanced_stats_keys));
6608  stat_size += sizeof(ethtool_enhanced_stats_keys);
6609  }
6610 
6611  memcpy(data + stat_size, &ethtool_driver_stats_keys,
6612  sizeof(ethtool_driver_stats_keys));
6613  }
6614 }
6615 
6616 static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6617 {
6618  struct s2io_nic *sp = netdev_priv(dev);
6619  netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6620 
6621  if (changed && netif_running(dev)) {
6622  int rc;
6623 
6624  s2io_stop_all_tx_queue(sp);
6625  s2io_card_down(sp);
6626  dev->features = features;
6627  rc = s2io_card_up(sp);
6628  if (rc)
6629  s2io_reset(sp);
6630  else
6631  s2io_start_all_tx_queue(sp);
6632 
6633  return rc ? rc : 1;
6634  }
6635 
6636  return 0;
6637 }
6638 
6639 static const struct ethtool_ops netdev_ethtool_ops = {
6640  .get_settings = s2io_ethtool_gset,
6641  .set_settings = s2io_ethtool_sset,
6642  .get_drvinfo = s2io_ethtool_gdrvinfo,
6643  .get_regs_len = s2io_ethtool_get_regs_len,
6644  .get_regs = s2io_ethtool_gregs,
6645  .get_link = ethtool_op_get_link,
6646  .get_eeprom_len = s2io_get_eeprom_len,
6647  .get_eeprom = s2io_ethtool_geeprom,
6648  .set_eeprom = s2io_ethtool_seeprom,
6649  .get_ringparam = s2io_ethtool_gringparam,
6650  .get_pauseparam = s2io_ethtool_getpause_data,
6651  .set_pauseparam = s2io_ethtool_setpause_data,
6652  .self_test = s2io_ethtool_test,
6653  .get_strings = s2io_ethtool_get_strings,
6654  .set_phys_id = s2io_ethtool_set_led,
6655  .get_ethtool_stats = s2io_get_ethtool_stats,
6656  .get_sset_count = s2io_get_sset_count,
6657 };
6658 
6671 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6672 {
6673  return -EOPNOTSUPP;
6674 }
6675 
6687 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6688 {
6689  struct s2io_nic *sp = netdev_priv(dev);
6690  int ret = 0;
6691 
6692  if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6693  DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
6694  return -EPERM;
6695  }
6696 
6697  dev->mtu = new_mtu;
6698  if (netif_running(dev)) {
6699  s2io_stop_all_tx_queue(sp);
6700  s2io_card_down(sp);
6701  ret = s2io_card_up(sp);
6702  if (ret) {
6703  DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6704  __func__);
6705  return ret;
6706  }
6707  s2io_wake_all_tx_queue(sp);
6708  } else { /* Device is down */
6709  struct XENA_dev_config __iomem *bar0 = sp->bar0;
6710  u64 val64 = new_mtu;
6711 
6712  writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6713  }
6714 
6715  return ret;
6716 }
6717 
6724 static void s2io_set_link(struct work_struct *work)
6725 {
6726  struct s2io_nic *nic = container_of(work, struct s2io_nic,
6727  set_link_task);
6728  struct net_device *dev = nic->dev;
6729  struct XENA_dev_config __iomem *bar0 = nic->bar0;
6730  register u64 val64;
6731  u16 subid;
6732 
6733  rtnl_lock();
6734 
6735  if (!netif_running(dev))
6736  goto out_unlock;
6737 
6739  /* The card is being reset, no point doing anything */
6740  goto out_unlock;
6741  }
6742 
6743  subid = nic->pdev->subsystem_device;
6744  if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6745  /*
6746  * Allow a small delay for the NICs self initiated
6747  * cleanup to complete.
6748  */
6749  msleep(100);
6750  }
6751 
6752  val64 = readq(&bar0->adapter_status);
6753  if (LINK_IS_UP(val64)) {
6754  if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6755  if (verify_xena_quiescence(nic)) {
6756  val64 = readq(&bar0->adapter_control);
6757  val64 |= ADAPTER_CNTL_EN;
6758  writeq(val64, &bar0->adapter_control);
6760  nic->device_type, subid)) {
6761  val64 = readq(&bar0->gpio_control);
6762  val64 |= GPIO_CTRL_GPIO_0;
6763  writeq(val64, &bar0->gpio_control);
6764  val64 = readq(&bar0->gpio_control);
6765  } else {
6766  val64 |= ADAPTER_LED_ON;
6767  writeq(val64, &bar0->adapter_control);
6768  }
6769  nic->device_enabled_once = true;
6770  } else {
6772  "%s: Error: device is not Quiescent\n",
6773  dev->name);
6774  s2io_stop_all_tx_queue(nic);
6775  }
6776  }
6777  val64 = readq(&bar0->adapter_control);
6778  val64 |= ADAPTER_LED_ON;
6779  writeq(val64, &bar0->adapter_control);
6780  s2io_link(nic, LINK_UP);
6781  } else {
6783  subid)) {
6784  val64 = readq(&bar0->gpio_control);
6785  val64 &= ~GPIO_CTRL_GPIO_0;
6786  writeq(val64, &bar0->gpio_control);
6787  val64 = readq(&bar0->gpio_control);
6788  }
6789  /* turn off LED */
6790  val64 = readq(&bar0->adapter_control);
6791  val64 = val64 & (~ADAPTER_LED_ON);
6792  writeq(val64, &bar0->adapter_control);
6793  s2io_link(nic, LINK_DOWN);
6794  }
6796 
6797 out_unlock:
6798  rtnl_unlock();
6799 }
6800 
6801 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6802  struct buffAdd *ba,
6803  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6804  u64 *temp2, int size)
6805 {
6806  struct net_device *dev = sp->dev;
6807  struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6808 
6809  if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6810  struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6811  /* allocate skb */
6812  if (*skb) {
6813  DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6814  /*
6815  * As Rx frame are not going to be processed,
6816  * using same mapped address for the Rxd
6817  * buffer pointer
6818  */
6819  rxdp1->Buffer0_ptr = *temp0;
6820  } else {
6821  *skb = netdev_alloc_skb(dev, size);
6822  if (!(*skb)) {
6824  "%s: Out of memory to allocate %s\n",
6825  dev->name, "1 buf mode SKBs");
6826  stats->mem_alloc_fail_cnt++;
6827  return -ENOMEM ;
6828  }
6829  stats->mem_allocated += (*skb)->truesize;
6830  /* storing the mapped addr in a temp variable
6831  * such it will be used for next rxd whose
6832  * Host Control is NULL
6833  */
6834  rxdp1->Buffer0_ptr = *temp0 =
6835  pci_map_single(sp->pdev, (*skb)->data,
6836  size - NET_IP_ALIGN,
6838  if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6839  goto memalloc_failed;
6840  rxdp->Host_Control = (unsigned long) (*skb);
6841  }
6842  } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6843  struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6844  /* Two buffer Mode */
6845  if (*skb) {
6846  rxdp3->Buffer2_ptr = *temp2;
6847  rxdp3->Buffer0_ptr = *temp0;
6848  rxdp3->Buffer1_ptr = *temp1;
6849  } else {
6850  *skb = netdev_alloc_skb(dev, size);
6851  if (!(*skb)) {
6853  "%s: Out of memory to allocate %s\n",
6854  dev->name,
6855  "2 buf mode SKBs");
6856  stats->mem_alloc_fail_cnt++;
6857  return -ENOMEM;
6858  }
6859  stats->mem_allocated += (*skb)->truesize;
6860  rxdp3->Buffer2_ptr = *temp2 =
6861  pci_map_single(sp->pdev, (*skb)->data,
6862  dev->mtu + 4,
6864  if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6865  goto memalloc_failed;
6866  rxdp3->Buffer0_ptr = *temp0 =
6867  pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6869  if (pci_dma_mapping_error(sp->pdev,
6870  rxdp3->Buffer0_ptr)) {
6871  pci_unmap_single(sp->pdev,
6872  (dma_addr_t)rxdp3->Buffer2_ptr,
6873  dev->mtu + 4,
6875  goto memalloc_failed;
6876  }
6877  rxdp->Host_Control = (unsigned long) (*skb);
6878 
6879  /* Buffer-1 will be dummy buffer not used */
6880  rxdp3->Buffer1_ptr = *temp1 =
6881  pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6883  if (pci_dma_mapping_error(sp->pdev,
6884  rxdp3->Buffer1_ptr)) {
6885  pci_unmap_single(sp->pdev,
6886  (dma_addr_t)rxdp3->Buffer0_ptr,
6887  BUF0_LEN, PCI_DMA_FROMDEVICE);
6888  pci_unmap_single(sp->pdev,
6889  (dma_addr_t)rxdp3->Buffer2_ptr,
6890  dev->mtu + 4,
6892  goto memalloc_failed;
6893  }
6894  }
6895  }
6896  return 0;
6897 
6898 memalloc_failed:
6899  stats->pci_map_fail_cnt++;
6900  stats->mem_freed += (*skb)->truesize;
6901  dev_kfree_skb(*skb);
6902  return -ENOMEM;
6903 }
6904 
6905 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6906  int size)
6907 {
6908  struct net_device *dev = sp->dev;
6909  if (sp->rxd_mode == RXD_MODE_1) {
6910  rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6911  } else if (sp->rxd_mode == RXD_MODE_3B) {
6912  rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6913  rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6914  rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6915  }
6916 }
6917 
6918 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6919 {
6920  int i, j, k, blk_cnt = 0, size;
6921  struct config_param *config = &sp->config;
6922  struct mac_info *mac_control = &sp->mac_control;
6923  struct net_device *dev = sp->dev;
6924  struct RxD_t *rxdp = NULL;
6925  struct sk_buff *skb = NULL;
6926  struct buffAdd *ba = NULL;
6927  u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6928 
6929  /* Calculate the size based on ring mode */
6930  size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6932  if (sp->rxd_mode == RXD_MODE_1)
6933  size += NET_IP_ALIGN;
6934  else if (sp->rxd_mode == RXD_MODE_3B)
6935  size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6936 
6937  for (i = 0; i < config->rx_ring_num; i++) {
6938  struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6939  struct ring_info *ring = &mac_control->rings[i];
6940 
6941  blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6942 
6943  for (j = 0; j < blk_cnt; j++) {
6944  for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6945  rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6946  if (sp->rxd_mode == RXD_MODE_3B)
6947  ba = &ring->ba[j][k];
6948  if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6949  &temp0_64,
6950  &temp1_64,
6951  &temp2_64,
6952  size) == -ENOMEM) {
6953  return 0;
6954  }
6955 
6956  set_rxd_buffer_size(sp, rxdp, size);
6957  wmb();
6958  /* flip the Ownership bit to Hardware */
6959  rxdp->Control_1 |= RXD_OWN_XENA;
6960  }
6961  }
6962  }
6963  return 0;
6964 
6965 }
6966 
6967 static int s2io_add_isr(struct s2io_nic *sp)
6968 {
6969  int ret = 0;
6970  struct net_device *dev = sp->dev;
6971  int err = 0;
6972 
6973  if (sp->config.intr_type == MSI_X)
6974  ret = s2io_enable_msi_x(sp);
6975  if (ret) {
6976  DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6977  sp->config.intr_type = INTA;
6978  }
6979 
6980  /*
6981  * Store the values of the MSIX table in
6982  * the struct s2io_nic structure
6983  */
6984  store_xmsi_data(sp);
6985 
6986  /* After proper initialization of H/W, register ISR */
6987  if (sp->config.intr_type == MSI_X) {
6988  int i, msix_rx_cnt = 0;
6989 
6990  for (i = 0; i < sp->num_entries; i++) {
6991  if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6992  if (sp->s2io_entries[i].type ==
6993  MSIX_RING_TYPE) {
6994  sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6995  dev->name, i);
6996  err = request_irq(sp->entries[i].vector,
6997  s2io_msix_ring_handle,
6998  0,
6999  sp->desc[i],
7000  sp->s2io_entries[i].arg);
7001  } else if (sp->s2io_entries[i].type ==
7002  MSIX_ALARM_TYPE) {
7003  sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7004  dev->name, i);
7005  err = request_irq(sp->entries[i].vector,
7006  s2io_msix_fifo_handle,
7007  0,
7008  sp->desc[i],
7009  sp->s2io_entries[i].arg);
7010 
7011  }
7012  /* if either data or addr is zero print it. */
7013  if (!(sp->msix_info[i].addr &&
7014  sp->msix_info[i].data)) {
7016  "%s @Addr:0x%llx Data:0x%llx\n",
7017  sp->desc[i],
7018  (unsigned long long)
7019  sp->msix_info[i].addr,
7020  (unsigned long long)
7021  ntohl(sp->msix_info[i].data));
7022  } else
7023  msix_rx_cnt++;
7024  if (err) {
7025  remove_msix_isr(sp);
7026 
7028  "%s:MSI-X-%d registration "
7029  "failed\n", dev->name, i);
7030 
7032  "%s: Defaulting to INTA\n",
7033  dev->name);
7034  sp->config.intr_type = INTA;
7035  break;
7036  }
7037  sp->s2io_entries[i].in_use =
7039  }
7040  }
7041  if (!err) {
7042  pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
7044  "MSI-X-TX entries enabled through alarm vector\n");
7045  }
7046  }
7047  if (sp->config.intr_type == INTA) {
7048  err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
7049  sp->name, dev);
7050  if (err) {
7051  DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7052  dev->name);
7053  return -1;
7054  }
7055  }
7056  return 0;
7057 }
7058 
7059 static void s2io_rem_isr(struct s2io_nic *sp)
7060 {
7061  if (sp->config.intr_type == MSI_X)
7062  remove_msix_isr(sp);
7063  else
7064  remove_inta_isr(sp);
7065 }
7066 
7067 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7068 {
7069  int cnt = 0;
7070  struct XENA_dev_config __iomem *bar0 = sp->bar0;
7071  register u64 val64 = 0;
7072  struct config_param *config;
7073  config = &sp->config;
7074 
7075  if (!is_s2io_card_up(sp))
7076  return;
7077 
7079  /* If s2io_set_link task is executing, wait till it completes. */
7081  msleep(50);
7083 
7084  /* Disable napi */
7085  if (sp->config.napi) {
7086  int off = 0;
7087  if (config->intr_type == MSI_X) {
7088  for (; off < sp->config.rx_ring_num; off++)
7089  napi_disable(&sp->mac_control.rings[off].napi);
7090  }
7091  else
7092  napi_disable(&sp->napi);
7093  }
7094 
7095  /* disable Tx and Rx traffic on the NIC */
7096  if (do_io)
7097  stop_nic(sp);
7098 
7099  s2io_rem_isr(sp);
7100 
7101  /* stop the tx queue, indicate link down */
7102  s2io_link(sp, LINK_DOWN);
7103 
7104  /* Check if the device is Quiescent and then Reset the NIC */
7105  while (do_io) {
7106  /* As per the HW requirement we need to replenish the
7107  * receive buffer to avoid the ring bump. Since there is
7108  * no intention of processing the Rx frame at this pointwe are
7109  * just setting the ownership bit of rxd in Each Rx
7110  * ring to HW and set the appropriate buffer size
7111  * based on the ring mode
7112  */
7113  rxd_owner_bit_reset(sp);
7114 
7115  val64 = readq(&bar0->adapter_status);
7116  if (verify_xena_quiescence(sp)) {
7117  if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7118  break;
7119  }
7120 
7121  msleep(50);
7122  cnt++;
7123  if (cnt == 10) {
7124  DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7125  "adapter status reads 0x%llx\n",
7126  (unsigned long long)val64);
7127  break;
7128  }
7129  }
7130  if (do_io)
7131  s2io_reset(sp);
7132 
7133  /* Free all Tx buffers */
7134  free_tx_buffers(sp);
7135 
7136  /* Free all Rx buffers */
7137  free_rx_buffers(sp);
7138 
7140 }
7141 
7142 static void s2io_card_down(struct s2io_nic *sp)
7143 {
7144  do_s2io_card_down(sp, 1);
7145 }
7146 
7147 static int s2io_card_up(struct s2io_nic *sp)
7148 {
7149  int i, ret = 0;
7150  struct config_param *config;
7151  struct mac_info *mac_control;
7152  struct net_device *dev = sp->dev;
7153  u16 interruptible;
7154 
7155  /* Initialize the H/W I/O registers */
7156  ret = init_nic(sp);
7157  if (ret != 0) {
7158  DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7159  dev->name);
7160  if (ret != -EIO)
7161  s2io_reset(sp);
7162  return ret;
7163  }
7164 
7165  /*
7166  * Initializing the Rx buffers. For now we are considering only 1
7167  * Rx ring and initializing buffers into 30 Rx blocks
7168  */
7169  config = &sp->config;
7170  mac_control = &sp->mac_control;
7171 
7172  for (i = 0; i < config->rx_ring_num; i++) {
7173  struct ring_info *ring = &mac_control->rings[i];
7174 
7175  ring->mtu = dev->mtu;
7176  ring->lro = !!(dev->features & NETIF_F_LRO);
7177  ret = fill_rx_buffers(sp, ring, 1);
7178  if (ret) {
7179  DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7180  dev->name);
7181  s2io_reset(sp);
7182  free_rx_buffers(sp);
7183  return -ENOMEM;
7184  }
7185  DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7186  ring->rx_bufs_left);
7187  }
7188 
7189  /* Initialise napi */
7190  if (config->napi) {
7191  if (config->intr_type == MSI_X) {
7192  for (i = 0; i < sp->config.rx_ring_num; i++)
7193  napi_enable(&sp->mac_control.rings[i].napi);
7194  } else {
7195  napi_enable(&sp->napi);
7196  }
7197  }
7198 
7199  /* Maintain the state prior to the open */
7200  if (sp->promisc_flg)
7201  sp->promisc_flg = 0;
7202  if (sp->m_cast_flg) {
7203  sp->m_cast_flg = 0;
7204  sp->all_multi_pos = 0;
7205  }
7206 
7207  /* Setting its receive mode */
7208  s2io_set_multicast(dev);
7209 
7210  if (dev->features & NETIF_F_LRO) {
7211  /* Initialize max aggregatable pkts per session based on MTU */
7212  sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7213  /* Check if we can use (if specified) user provided value */
7214  if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7215  sp->lro_max_aggr_per_sess = lro_max_pkts;
7216  }
7217 
7218  /* Enable Rx Traffic and interrupts on the NIC */
7219  if (start_nic(sp)) {
7220  DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7221  s2io_reset(sp);
7222  free_rx_buffers(sp);
7223  return -ENODEV;
7224  }
7225 
7226  /* Add interrupt service routine */
7227  if (s2io_add_isr(sp) != 0) {
7228  if (sp->config.intr_type == MSI_X)
7229  s2io_rem_isr(sp);
7230  s2io_reset(sp);
7231  free_rx_buffers(sp);
7232  return -ENODEV;
7233  }
7234 
7235  S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7236 
7238 
7239  /* Enable select interrupts */
7240  en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7241  if (sp->config.intr_type != INTA) {
7242  interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7243  en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7244  } else {
7245  interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7246  interruptible |= TX_PIC_INTR;
7247  en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7248  }
7249 
7250  return 0;
7251 }
7252 
7263 static void s2io_restart_nic(struct work_struct *work)
7264 {
7265  struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7266  struct net_device *dev = sp->dev;
7267 
7268  rtnl_lock();
7269 
7270  if (!netif_running(dev))
7271  goto out_unlock;
7272 
7273  s2io_card_down(sp);
7274  if (s2io_card_up(sp)) {
7275  DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7276  }
7277  s2io_wake_all_tx_queue(sp);
7278  DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7279 out_unlock:
7280  rtnl_unlock();
7281 }
7282 
7296 static void s2io_tx_watchdog(struct net_device *dev)
7297 {
7298  struct s2io_nic *sp = netdev_priv(dev);
7299  struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7300 
7301  if (netif_carrier_ok(dev)) {
7302  swstats->watchdog_timer_cnt++;
7304  swstats->soft_reset_cnt++;
7305  }
7306 }
7307 
7325 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7326 {
7327  struct s2io_nic *sp = ring_data->nic;
7328  struct net_device *dev = ring_data->dev;
7329  struct sk_buff *skb = (struct sk_buff *)
7330  ((unsigned long)rxdp->Host_Control);
7331  int ring_no = ring_data->ring_no;
7332  u16 l3_csum, l4_csum;
7333  unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7334  struct lro *uninitialized_var(lro);
7335  u8 err_mask;
7336  struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7337 
7338  skb->dev = dev;
7339 
7340  if (err) {
7341  /* Check for parity error */
7342  if (err & 0x1)
7343  swstats->parity_err_cnt++;
7344 
7345  err_mask = err >> 48;
7346  switch (err_mask) {
7347  case 1:
7348  swstats->rx_parity_err_cnt++;
7349  break;
7350 
7351  case 2:
7352  swstats->rx_abort_cnt++;
7353  break;
7354 
7355  case 3:
7356  swstats->rx_parity_abort_cnt++;
7357  break;
7358 
7359  case 4:
7360  swstats->rx_rda_fail_cnt++;
7361  break;
7362 
7363  case 5:
7364  swstats->rx_unkn_prot_cnt++;
7365  break;
7366 
7367  case 6:
7368  swstats->rx_fcs_err_cnt++;
7369  break;
7370 
7371  case 7:
7372  swstats->rx_buf_size_err_cnt++;
7373  break;
7374 
7375  case 8:
7376  swstats->rx_rxd_corrupt_cnt++;
7377  break;
7378 
7379  case 15:
7380  swstats->rx_unkn_err_cnt++;
7381  break;
7382  }
7383  /*
7384  * Drop the packet if bad transfer code. Exception being
7385  * 0x5, which could be due to unsupported IPv6 extension header.
7386  * In this case, we let stack handle the packet.
7387  * Note that in this case, since checksum will be incorrect,
7388  * stack will validate the same.
7389  */
7390  if (err_mask != 0x5) {
7391  DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7392  dev->name, err_mask);
7393  dev->stats.rx_crc_errors++;
7394  swstats->mem_freed
7395  += skb->truesize;
7396  dev_kfree_skb(skb);
7397  ring_data->rx_bufs_left -= 1;
7398  rxdp->Host_Control = 0;
7399  return 0;
7400  }
7401  }
7402 
7403  rxdp->Host_Control = 0;
7404  if (sp->rxd_mode == RXD_MODE_1) {
7405  int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7406 
7407  skb_put(skb, len);
7408  } else if (sp->rxd_mode == RXD_MODE_3B) {
7409  int get_block = ring_data->rx_curr_get_info.block_index;
7410  int get_off = ring_data->rx_curr_get_info.offset;
7411  int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7412  int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7413  unsigned char *buff = skb_push(skb, buf0_len);
7414 
7415  struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7416  memcpy(buff, ba->ba_0, buf0_len);
7417  skb_put(skb, buf2_len);
7418  }
7419 
7420  if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7421  ((!ring_data->lro) ||
7422  (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7423  (dev->features & NETIF_F_RXCSUM)) {
7424  l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7425  l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7426  if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7427  /*
7428  * NIC verifies if the Checksum of the received
7429  * frame is Ok or not and accordingly returns
7430  * a flag in the RxD.
7431  */
7433  if (ring_data->lro) {
7434  u32 tcp_len = 0;
7435  u8 *tcp;
7436  int ret = 0;
7437 
7438  ret = s2io_club_tcp_session(ring_data,
7439  skb->data, &tcp,
7440  &tcp_len, &lro,
7441  rxdp, sp);
7442  switch (ret) {
7443  case 3: /* Begin anew */
7444  lro->parent = skb;
7445  goto aggregate;
7446  case 1: /* Aggregate */
7447  lro_append_pkt(sp, lro, skb, tcp_len);
7448  goto aggregate;
7449  case 4: /* Flush session */
7450  lro_append_pkt(sp, lro, skb, tcp_len);
7451  queue_rx_frame(lro->parent,
7452  lro->vlan_tag);
7453  clear_lro_session(lro);
7454  swstats->flush_max_pkts++;
7455  goto aggregate;
7456  case 2: /* Flush both */
7457  lro->parent->data_len = lro->frags_len;
7458  swstats->sending_both++;
7459  queue_rx_frame(lro->parent,
7460  lro->vlan_tag);
7461  clear_lro_session(lro);
7462  goto send_up;
7463  case 0: /* sessions exceeded */
7464  case -1: /* non-TCP or not L2 aggregatable */
7465  case 5: /*
7466  * First pkt in session not
7467  * L3/L4 aggregatable
7468  */
7469  break;
7470  default:
7472  "%s: Samadhana!!\n",
7473  __func__);
7474  BUG();
7475  }
7476  }
7477  } else {
7478  /*
7479  * Packet with erroneous checksum, let the
7480  * upper layers deal with it.
7481  */
7482  skb_checksum_none_assert(skb);
7483  }
7484  } else
7485  skb_checksum_none_assert(skb);
7486 
7487  swstats->mem_freed += skb->truesize;
7488 send_up:
7489  skb_record_rx_queue(skb, ring_no);
7490  queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7491 aggregate:
7492  sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7493  return SUCCESS;
7494 }
7495 
7509 static void s2io_link(struct s2io_nic *sp, int link)
7510 {
7511  struct net_device *dev = sp->dev;
7512  struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7513 
7514  if (link != sp->last_link_state) {
7515  init_tti(sp, link);
7516  if (link == LINK_DOWN) {
7517  DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7518  s2io_stop_all_tx_queue(sp);
7519  netif_carrier_off(dev);
7520  if (swstats->link_up_cnt)
7521  swstats->link_up_time =
7522  jiffies - sp->start_time;
7523  swstats->link_down_cnt++;
7524  } else {
7525  DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7526  if (swstats->link_down_cnt)
7527  swstats->link_down_time =
7528  jiffies - sp->start_time;
7529  swstats->link_up_cnt++;
7530  netif_carrier_on(dev);
7531  s2io_wake_all_tx_queue(sp);
7532  }
7533  }
7534  sp->last_link_state = link;
7535  sp->start_time = jiffies;
7536 }
7537 
7549 static void s2io_init_pci(struct s2io_nic *sp)
7550 {
7551  u16 pci_cmd = 0, pcix_cmd = 0;
7552 
7553  /* Enable Data Parity Error Recovery in PCI-X command register. */
7554  pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7555  &(pcix_cmd));
7556  pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7557  (pcix_cmd | 1));
7558  pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7559  &(pcix_cmd));
7560 
7561  /* Set the PErr Response bit in PCI command register. */
7562  pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7563  pci_write_config_word(sp->pdev, PCI_COMMAND,
7564  (pci_cmd | PCI_COMMAND_PARITY));
7565  pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7566 }
7567 
7568 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7569  u8 *dev_multiq)
7570 {
7571  int i;
7572 
7573  if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7574  DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7575  "(%d) not supported\n", tx_fifo_num);
7576 
7577  if (tx_fifo_num < 1)
7578  tx_fifo_num = 1;
7579  else
7580  tx_fifo_num = MAX_TX_FIFOS;
7581 
7582  DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7583  }
7584 
7585  if (multiq)
7586  *dev_multiq = multiq;
7587 
7588  if (tx_steering_type && (1 == tx_fifo_num)) {
7589  if (tx_steering_type != TX_DEFAULT_STEERING)
7591  "Tx steering is not supported with "
7592  "one fifo. Disabling Tx steering.\n");
7593  tx_steering_type = NO_STEERING;
7594  }
7595 
7596  if ((tx_steering_type < NO_STEERING) ||
7597  (tx_steering_type > TX_DEFAULT_STEERING)) {
7599  "Requested transmit steering not supported\n");
7600  DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7601  tx_steering_type = NO_STEERING;
7602  }
7603 
7604  if (rx_ring_num > MAX_RX_RINGS) {
7606  "Requested number of rx rings not supported\n");
7607  DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7608  MAX_RX_RINGS);
7609  rx_ring_num = MAX_RX_RINGS;
7610  }
7611 
7612  if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7613  DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7614  "Defaulting to INTA\n");
7615  *dev_intr_type = INTA;
7616  }
7617 
7618  if ((*dev_intr_type == MSI_X) &&
7619  ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7620  (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7621  DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7622  "Defaulting to INTA\n");
7623  *dev_intr_type = INTA;
7624  }
7625 
7626  if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7627  DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7628  DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7629  rx_ring_mode = 1;
7630  }
7631 
7632  for (i = 0; i < MAX_RX_RINGS; i++)
7633  if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7634  DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7635  "supported\nDefaulting to %d\n",
7637  rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7638  }
7639 
7640  return SUCCESS;
7641 }
7642 
7652 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7653 {
7654  struct XENA_dev_config __iomem *bar0 = nic->bar0;
7655  register u64 val64 = 0;
7656 
7657  if (ds_codepoint > 63)
7658  return FAILURE;
7659 
7660  val64 = RTS_DS_MEM_DATA(ring);
7661  writeq(val64, &bar0->rts_ds_mem_data);
7662 
7663  val64 = RTS_DS_MEM_CTRL_WE |
7665  RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7666 
7667  writeq(val64, &bar0->rts_ds_mem_ctrl);
7668 
7669  return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7671  S2IO_BIT_RESET);
7672 }
7673 
7674 static const struct net_device_ops s2io_netdev_ops = {
7675  .ndo_open = s2io_open,
7676  .ndo_stop = s2io_close,
7677  .ndo_get_stats = s2io_get_stats,
7678  .ndo_start_xmit = s2io_xmit,
7679  .ndo_validate_addr = eth_validate_addr,
7680  .ndo_set_rx_mode = s2io_set_multicast,
7681  .ndo_do_ioctl = s2io_ioctl,
7682  .ndo_set_mac_address = s2io_set_mac_addr,
7683  .ndo_change_mtu = s2io_change_mtu,
7684  .ndo_set_features = s2io_set_features,
7685  .ndo_tx_timeout = s2io_tx_watchdog,
7686 #ifdef CONFIG_NET_POLL_CONTROLLER
7687  .ndo_poll_controller = s2io_netpoll,
7688 #endif
7689 };
7690 
7705 static int __devinit
7706 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7707 {
7708  struct s2io_nic *sp;
7709  struct net_device *dev;
7710  int i, j, ret;
7711  int dma_flag = false;
7712  u32 mac_up, mac_down;
7713  u64 val64 = 0, tmp64 = 0;
7714  struct XENA_dev_config __iomem *bar0 = NULL;
7715  u16 subid;
7716  struct config_param *config;
7717  struct mac_info *mac_control;
7718  int mode;
7719  u8 dev_intr_type = intr_type;
7720  u8 dev_multiq = 0;
7721 
7722  ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7723  if (ret)
7724  return ret;
7725 
7726  ret = pci_enable_device(pdev);
7727  if (ret) {
7729  "%s: pci_enable_device failed\n", __func__);
7730  return ret;
7731  }
7732 
7733  if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7734  DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7735  dma_flag = true;
7736  if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7738  "Unable to obtain 64bit DMA "
7739  "for consistent allocations\n");
7740  pci_disable_device(pdev);
7741  return -ENOMEM;
7742  }
7743  } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7744  DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7745  } else {
7746  pci_disable_device(pdev);
7747  return -ENOMEM;
7748  }
7749  ret = pci_request_regions(pdev, s2io_driver_name);
7750  if (ret) {
7751  DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7752  __func__, ret);
7753  pci_disable_device(pdev);
7754  return -ENODEV;
7755  }
7756  if (dev_multiq)
7757  dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7758  else
7759  dev = alloc_etherdev(sizeof(struct s2io_nic));
7760  if (dev == NULL) {
7761  pci_disable_device(pdev);
7762  pci_release_regions(pdev);
7763  return -ENODEV;
7764  }
7765 
7766  pci_set_master(pdev);
7767  pci_set_drvdata(pdev, dev);
7768  SET_NETDEV_DEV(dev, &pdev->dev);
7769 
7770  /* Private member variable initialized to s2io NIC structure */
7771  sp = netdev_priv(dev);
7772  sp->dev = dev;
7773  sp->pdev = pdev;
7774  sp->high_dma_flag = dma_flag;
7775  sp->device_enabled_once = false;
7776  if (rx_ring_mode == 1)
7777  sp->rxd_mode = RXD_MODE_1;
7778  if (rx_ring_mode == 2)
7779  sp->rxd_mode = RXD_MODE_3B;
7780 
7781  sp->config.intr_type = dev_intr_type;
7782 
7783  if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7784  (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7786  else
7788 
7789 
7790  /* Initialize some PCI/PCI-X fields of the NIC. */
7791  s2io_init_pci(sp);
7792 
7793  /*
7794  * Setting the device configuration parameters.
7795  * Most of these parameters can be specified by the user during
7796  * module insertion as they are module loadable parameters. If
7797  * these parameters are not not specified during load time, they
7798  * are initialized with default values.
7799  */
7800  config = &sp->config;
7801  mac_control = &sp->mac_control;
7802 
7803  config->napi = napi;
7804  config->tx_steering_type = tx_steering_type;
7805 
7806  /* Tx side parameters. */
7807  if (config->tx_steering_type == TX_PRIORITY_STEERING)
7808  config->tx_fifo_num = MAX_TX_FIFOS;
7809  else
7810  config->tx_fifo_num = tx_fifo_num;
7811 
7812  /* Initialize the fifos used for tx steering */
7813  if (config->tx_fifo_num < 5) {
7814  if (config->tx_fifo_num == 1)
7815  sp->total_tcp_fifos = 1;
7816  else
7817  sp->total_tcp_fifos = config->tx_fifo_num - 1;
7818  sp->udp_fifo_idx = config->tx_fifo_num - 1;
7819  sp->total_udp_fifos = 1;
7820  sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7821  } else {
7822  sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7824  sp->udp_fifo_idx = sp->total_tcp_fifos;
7827  }
7828 
7829  config->multiq = dev_multiq;
7830  for (i = 0; i < config->tx_fifo_num; i++) {
7831  struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7832 
7833  tx_cfg->fifo_len = tx_fifo_len[i];
7834  tx_cfg->fifo_priority = i;
7835  }
7836 
7837  /* mapping the QoS priority to the configured fifos */
7838  for (i = 0; i < MAX_TX_FIFOS; i++)
7839  config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7840 
7841  /* map the hashing selector table to the configured fifos */
7842  for (i = 0; i < config->tx_fifo_num; i++)
7843  sp->fifo_selector[i] = fifo_selector[i];
7844 
7845 
7846  config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7847  for (i = 0; i < config->tx_fifo_num; i++) {
7848  struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7849 
7851  if (tx_cfg->fifo_len < 65) {
7853  break;
7854  }
7855  }
7856  /* + 2 because one Txd for skb->data and one Txd for UFO */
7857  config->max_txds = MAX_SKB_FRAGS + 2;
7858 
7859  /* Rx side parameters. */
7860  config->rx_ring_num = rx_ring_num;
7861  for (i = 0; i < config->rx_ring_num; i++) {
7862  struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7863  struct ring_info *ring = &mac_control->rings[i];
7864 
7865  rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7866  rx_cfg->ring_priority = i;
7867  ring->rx_bufs_left = 0;
7868  ring->rxd_mode = sp->rxd_mode;
7869  ring->rxd_count = rxd_count[sp->rxd_mode];
7870  ring->pdev = sp->pdev;
7871  ring->dev = sp->dev;
7872  }
7873 
7874  for (i = 0; i < rx_ring_num; i++) {
7875  struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7876 
7877  rx_cfg->ring_org = RING_ORG_BUFF1;
7879  }
7880 
7881  /* Setting Mac Control parameters */
7882  mac_control->rmac_pause_time = rmac_pause_time;
7883  mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7884  mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7885 
7886 
7887  /* initialize the shared memory used by the NIC and the host */
7888  if (init_shared_mem(sp)) {
7889  DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7890  ret = -ENOMEM;
7891  goto mem_alloc_failed;
7892  }
7893 
7894  sp->bar0 = pci_ioremap_bar(pdev, 0);
7895  if (!sp->bar0) {
7896  DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7897  dev->name);
7898  ret = -ENOMEM;
7899  goto bar0_remap_failed;
7900  }
7901 
7902  sp->bar1 = pci_ioremap_bar(pdev, 2);
7903  if (!sp->bar1) {
7904  DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7905  dev->name);
7906  ret = -ENOMEM;
7907  goto bar1_remap_failed;
7908  }
7909 
7910  /* Initializing the BAR1 address as the start of the FIFO pointer. */
7911  for (j = 0; j < MAX_TX_FIFOS; j++) {
7912  mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7913  }
7914 
7915  /* Driver entry points */
7916  dev->netdev_ops = &s2io_netdev_ops;
7917  SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7921  dev->features |= dev->hw_features |
7923  if (sp->device_type & XFRAME_II_DEVICE) {
7924  dev->hw_features |= NETIF_F_UFO;
7925  if (ufo)
7926  dev->features |= NETIF_F_UFO;
7927  }
7928  if (sp->high_dma_flag == true)
7929  dev->features |= NETIF_F_HIGHDMA;
7931  INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7932  INIT_WORK(&sp->set_link_task, s2io_set_link);
7933 
7934  pci_save_state(sp->pdev);
7935 
7936  /* Setting swapper control on the NIC, for proper reset operation */
7937  if (s2io_set_swapper(sp)) {
7938  DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7939  dev->name);
7940  ret = -EAGAIN;
7941  goto set_swap_failed;
7942  }
7943 
7944  /* Verify if the Herc works on the slot its placed into */
7945  if (sp->device_type & XFRAME_II_DEVICE) {
7946  mode = s2io_verify_pci_mode(sp);
7947  if (mode < 0) {
7948  DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7949  __func__);
7950  ret = -EBADSLT;
7951  goto set_swap_failed;
7952  }
7953  }
7954 
7955  if (sp->config.intr_type == MSI_X) {
7956  sp->num_entries = config->rx_ring_num + 1;
7957  ret = s2io_enable_msi_x(sp);
7958 
7959  if (!ret) {
7960  ret = s2io_test_msi(sp);
7961  /* rollback MSI-X, will re-enable during add_isr() */
7962  remove_msix_isr(sp);
7963  }
7964  if (ret) {
7965 
7967  "MSI-X requested but failed to enable\n");
7968  sp->config.intr_type = INTA;
7969  }
7970  }
7971 
7972  if (config->intr_type == MSI_X) {
7973  for (i = 0; i < config->rx_ring_num ; i++) {
7974  struct ring_info *ring = &mac_control->rings[i];
7975 
7976  netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7977  }
7978  } else {
7979  netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7980  }
7981 
7982  /* Not needed for Herc */
7983  if (sp->device_type & XFRAME_I_DEVICE) {
7984  /*
7985  * Fix for all "FFs" MAC address problems observed on
7986  * Alpha platforms
7987  */
7988  fix_mac_address(sp);
7989  s2io_reset(sp);
7990  }
7991 
7992  /*
7993  * MAC address initialization.
7994  * For now only one mac address will be read and used.
7995  */
7996  bar0 = sp->bar0;
7999  writeq(val64, &bar0->rmac_addr_cmd_mem);
8000  wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8002  S2IO_BIT_RESET);
8003  tmp64 = readq(&bar0->rmac_addr_data0_mem);
8004  mac_down = (u32)tmp64;
8005  mac_up = (u32) (tmp64 >> 32);
8006 
8007  sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8008  sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8009  sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8010  sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8011  sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8012  sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8013 
8014  /* Set the factory defined MAC address initially */
8015  dev->addr_len = ETH_ALEN;
8016  memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8017  memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8018 
8019  /* initialize number of multicast & unicast MAC entries variables */
8020  if (sp->device_type == XFRAME_I_DEVICE) {
8024  } else if (sp->device_type == XFRAME_II_DEVICE) {
8028  }
8029 
8030  /* store mac addresses from CAM to s2io_nic structure */
8031  do_s2io_store_unicast_mc(sp);
8032 
8033  /* Configure MSIX vector for number of rings configured plus one */
8034  if ((sp->device_type == XFRAME_II_DEVICE) &&
8035  (config->intr_type == MSI_X))
8036  sp->num_entries = config->rx_ring_num + 1;
8037 
8038  /* Store the values of the MSIX table in the s2io_nic structure */
8039  store_xmsi_data(sp);
8040  /* reset Nic and bring it to known state */
8041  s2io_reset(sp);
8042 
8043  /*
8044  * Initialize link state flags
8045  * and the card state parameter
8046  */
8047  sp->state = 0;
8048 
8049  /* Initialize spinlocks */
8050  for (i = 0; i < sp->config.tx_fifo_num; i++) {
8051  struct fifo_info *fifo = &mac_control->fifos[i];
8052 
8053  spin_lock_init(&fifo->tx_lock);
8054  }
8055 
8056  /*
8057  * SXE-002: Configure link and activity LED to init state
8058  * on driver load.
8059  */
8060  subid = sp->pdev->subsystem_device;
8061  if ((subid & 0xFF) >= 0x07) {
8062  val64 = readq(&bar0->gpio_control);
8063  val64 |= 0x0000800000000000ULL;
8064  writeq(val64, &bar0->gpio_control);
8065  val64 = 0x0411040400000000ULL;
8066  writeq(val64, (void __iomem *)bar0 + 0x2700);
8067  val64 = readq(&bar0->gpio_control);
8068  }
8069 
8070  sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8071 
8072  if (register_netdev(dev)) {
8073  DBG_PRINT(ERR_DBG, "Device registration failed\n");
8074  ret = -ENODEV;
8075  goto register_failed;
8076  }
8077  s2io_vpd_read(sp);
8078  DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8079  DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8080  sp->product_name, pdev->revision);
8081  DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8082  s2io_driver_version);
8083  DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8084  DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8085  if (sp->device_type & XFRAME_II_DEVICE) {
8086  mode = s2io_print_pci_mode(sp);
8087  if (mode < 0) {
8088  ret = -EBADSLT;
8089  unregister_netdev(dev);
8090  goto set_swap_failed;
8091  }
8092  }
8093  switch (sp->rxd_mode) {
8094  case RXD_MODE_1:
8095  DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8096  dev->name);
8097  break;
8098  case RXD_MODE_3B:
8099  DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8100  dev->name);
8101  break;
8102  }
8103 
8104  switch (sp->config.napi) {
8105  case 0:
8106  DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8107  break;
8108  case 1:
8109  DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8110  break;
8111  }
8112 
8113  DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8114  sp->config.tx_fifo_num);
8115 
8116  DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8117  sp->config.rx_ring_num);
8118 
8119  switch (sp->config.intr_type) {
8120  case INTA:
8121  DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8122  break;
8123  case MSI_X:
8124  DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8125  break;
8126  }
8127  if (sp->config.multiq) {
8128  for (i = 0; i < sp->config.tx_fifo_num; i++) {
8129  struct fifo_info *fifo = &mac_control->fifos[i];
8130 
8131  fifo->multiq = config->multiq;
8132  }
8133  DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8134  dev->name);
8135  } else
8136  DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8137  dev->name);
8138 
8139  switch (sp->config.tx_steering_type) {
8140  case NO_STEERING:
8141  DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8142  dev->name);
8143  break;
8144  case TX_PRIORITY_STEERING:
8146  "%s: Priority steering enabled for transmit\n",
8147  dev->name);
8148  break;
8149  case TX_DEFAULT_STEERING:
8151  "%s: Default steering enabled for transmit\n",
8152  dev->name);
8153  }
8154 
8155  DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8156  dev->name);
8157  if (ufo)
8159  "%s: UDP Fragmentation Offload(UFO) enabled\n",
8160  dev->name);
8161  /* Initialize device name */
8162  sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8163 
8164  if (vlan_tag_strip)
8165  sp->vlan_strip_flag = 1;
8166  else
8167  sp->vlan_strip_flag = 0;
8168 
8169  /*
8170  * Make Link state as off at this point, when the Link change
8171  * interrupt comes the state will be automatically changed to
8172  * the right state.
8173  */
8174  netif_carrier_off(dev);
8175 
8176  return 0;
8177 
8178 register_failed:
8179 set_swap_failed:
8180  iounmap(sp->bar1);
8181 bar1_remap_failed:
8182  iounmap(sp->bar0);
8183 bar0_remap_failed:
8184 mem_alloc_failed:
8185  free_shared_mem(sp);
8186  pci_disable_device(pdev);
8187  pci_release_regions(pdev);
8188  pci_set_drvdata(pdev, NULL);
8189  free_netdev(dev);
8190 
8191  return ret;
8192 }
8193 
8203 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8204 {
8205  struct net_device *dev = pci_get_drvdata(pdev);
8206  struct s2io_nic *sp;
8207 
8208  if (dev == NULL) {
8209  DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8210  return;
8211  }
8212 
8213  sp = netdev_priv(dev);
8214 
8217 
8218  unregister_netdev(dev);
8219 
8220  free_shared_mem(sp);
8221  iounmap(sp->bar0);
8222  iounmap(sp->bar1);
8223  pci_release_regions(pdev);
8224  pci_set_drvdata(pdev, NULL);
8225  free_netdev(dev);
8226  pci_disable_device(pdev);
8227 }
8228 
8235 static int __init s2io_starter(void)
8236 {
8237  return pci_register_driver(&s2io_driver);
8238 }
8239 
8245 static __exit void s2io_closer(void)
8246 {
8247  pci_unregister_driver(&s2io_driver);
8248  DBG_PRINT(INIT_DBG, "cleanup done\n");
8249 }
8250 
8251 module_init(s2io_starter);
8252 module_exit(s2io_closer);
8253 
8254 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8255  struct tcphdr **tcp, struct RxD_t *rxdp,
8256  struct s2io_nic *sp)
8257 {
8258  int ip_off;
8259  u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8260 
8261  if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8263  "%s: Non-TCP frames not supported for LRO\n",
8264  __func__);
8265  return -1;
8266  }
8267 
8268  /* Checking for DIX type or DIX type with VLAN */
8269  if ((l2_type == 0) || (l2_type == 4)) {
8271  /*
8272  * If vlan stripping is disabled and the frame is VLAN tagged,
8273  * shift the offset by the VLAN header size bytes.
8274  */
8275  if ((!sp->vlan_strip_flag) &&
8276  (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8277  ip_off += HEADER_VLAN_SIZE;
8278  } else {
8279  /* LLC, SNAP etc are considered non-mergeable */
8280  return -1;
8281  }
8282 
8283  *ip = (struct iphdr *)(buffer + ip_off);
8284  ip_len = (u8)((*ip)->ihl);
8285  ip_len <<= 2;
8286  *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8287 
8288  return 0;
8289 }
8290 
8291 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8292  struct tcphdr *tcp)
8293 {
8294  DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8295  if ((lro->iph->saddr != ip->saddr) ||
8296  (lro->iph->daddr != ip->daddr) ||
8297  (lro->tcph->source != tcp->source) ||
8298  (lro->tcph->dest != tcp->dest))
8299  return -1;
8300  return 0;
8301 }
8302 
8303 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8304 {
8305  return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8306 }
8307 
8308 static void initiate_new_session(struct lro *lro, u8 *l2h,
8309  struct iphdr *ip, struct tcphdr *tcp,
8310  u32 tcp_pyld_len, u16 vlan_tag)
8311 {
8312  DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8313  lro->l2h = l2h;
8314  lro->iph = ip;
8315  lro->tcph = tcp;
8316  lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8317  lro->tcp_ack = tcp->ack_seq;
8318  lro->sg_num = 1;
8319  lro->total_len = ntohs(ip->tot_len);
8320  lro->frags_len = 0;
8321  lro->vlan_tag = vlan_tag;
8322  /*
8323  * Check if we saw TCP timestamp.
8324  * Other consistency checks have already been done.
8325  */
8326  if (tcp->doff == 8) {
8327  __be32 *ptr;
8328  ptr = (__be32 *)(tcp+1);
8329  lro->saw_ts = 1;
8330  lro->cur_tsval = ntohl(*(ptr+1));
8331  lro->cur_tsecr = *(ptr+2);
8332  }
8333  lro->in_use = 1;
8334 }
8335 
8336 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8337 {
8338  struct iphdr *ip = lro->iph;
8339  struct tcphdr *tcp = lro->tcph;
8340  __sum16 nchk;
8341  struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8342 
8343  DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8344 
8345  /* Update L3 header */
8346  ip->tot_len = htons(lro->total_len);
8347  ip->check = 0;
8348  nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8349  ip->check = nchk;
8350 
8351  /* Update L4 header */
8352  tcp->ack_seq = lro->tcp_ack;
8353  tcp->window = lro->window;
8354 
8355  /* Update tsecr field if this session has timestamps enabled */
8356  if (lro->saw_ts) {
8357  __be32 *ptr = (__be32 *)(tcp + 1);
8358  *(ptr+2) = lro->cur_tsecr;
8359  }
8360 
8361  /* Update counters required for calculation of
8362  * average no. of packets aggregated.
8363  */
8364  swstats->sum_avg_pkts_aggregated += lro->sg_num;
8365  swstats->num_aggregations++;
8366 }
8367 
8368 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8369  struct tcphdr *tcp, u32 l4_pyld)
8370 {
8371  DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8372  lro->total_len += l4_pyld;
8373  lro->frags_len += l4_pyld;
8374  lro->tcp_next_seq += l4_pyld;
8375  lro->sg_num++;
8376 
8377  /* Update ack seq no. and window ad(from this pkt) in LRO object */
8378  lro->tcp_ack = tcp->ack_seq;
8379  lro->window = tcp->window;
8380 
8381  if (lro->saw_ts) {
8382  __be32 *ptr;
8383  /* Update tsecr and tsval from this packet */
8384  ptr = (__be32 *)(tcp+1);
8385  lro->cur_tsval = ntohl(*(ptr+1));
8386  lro->cur_tsecr = *(ptr + 2);
8387  }
8388 }
8389 
8390 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8391  struct tcphdr *tcp, u32 tcp_pyld_len)
8392 {
8393  u8 *ptr;
8394 
8395  DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8396 
8397  if (!tcp_pyld_len) {
8398  /* Runt frame or a pure ack */
8399  return -1;
8400  }
8401 
8402  if (ip->ihl != 5) /* IP has options */
8403  return -1;
8404 
8405  /* If we see CE codepoint in IP header, packet is not mergeable */
8406  if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8407  return -1;
8408 
8409  /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8410  if (tcp->urg || tcp->psh || tcp->rst ||
8411  tcp->syn || tcp->fin ||
8412  tcp->ece || tcp->cwr || !tcp->ack) {
8413  /*
8414  * Currently recognize only the ack control word and
8415  * any other control field being set would result in
8416  * flushing the LRO session
8417  */
8418  return -1;
8419  }
8420 
8421  /*
8422  * Allow only one TCP timestamp option. Don't aggregate if
8423  * any other options are detected.
8424  */
8425  if (tcp->doff != 5 && tcp->doff != 8)
8426  return -1;
8427 
8428  if (tcp->doff == 8) {
8429  ptr = (u8 *)(tcp + 1);
8430  while (*ptr == TCPOPT_NOP)
8431  ptr++;
8432  if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8433  return -1;
8434 
8435  /* Ensure timestamp value increases monotonically */
8436  if (l_lro)
8437  if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8438  return -1;
8439 
8440  /* timestamp echo reply should be non-zero */
8441  if (*((__be32 *)(ptr+6)) == 0)
8442  return -1;
8443  }
8444 
8445  return 0;
8446 }
8447 
8448 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8449  u8 **tcp, u32 *tcp_len, struct lro **lro,
8450  struct RxD_t *rxdp, struct s2io_nic *sp)
8451 {
8452  struct iphdr *ip;
8453  struct tcphdr *tcph;
8454  int ret = 0, i;
8455  u16 vlan_tag = 0;
8456  struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8457 
8458  ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8459  rxdp, sp);
8460  if (ret)
8461  return ret;
8462 
8463  DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8464 
8465  vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8466  tcph = (struct tcphdr *)*tcp;
8467  *tcp_len = get_l4_pyld_length(ip, tcph);
8468  for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8469  struct lro *l_lro = &ring_data->lro0_n[i];
8470  if (l_lro->in_use) {
8471  if (check_for_socket_match(l_lro, ip, tcph))
8472  continue;
8473  /* Sock pair matched */
8474  *lro = l_lro;
8475 
8476  if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8477  DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8478  "expected 0x%x, actual 0x%x\n",
8479  __func__,
8480  (*lro)->tcp_next_seq,
8481  ntohl(tcph->seq));
8482 
8483  swstats->outof_sequence_pkts++;
8484  ret = 2;
8485  break;
8486  }
8487 
8488  if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8489  *tcp_len))
8490  ret = 1; /* Aggregate */
8491  else
8492  ret = 2; /* Flush both */
8493  break;
8494  }
8495  }
8496 
8497  if (ret == 0) {
8498  /* Before searching for available LRO objects,
8499  * check if the pkt is L3/L4 aggregatable. If not
8500  * don't create new LRO session. Just send this
8501  * packet up.
8502  */
8503  if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8504  return 5;
8505 
8506  for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8507  struct lro *l_lro = &ring_data->lro0_n[i];
8508  if (!(l_lro->in_use)) {
8509  *lro = l_lro;
8510  ret = 3; /* Begin anew */
8511  break;
8512  }
8513  }
8514  }
8515 
8516  if (ret == 0) { /* sessions exceeded */
8517  DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8518  __func__);
8519  *lro = NULL;
8520  return ret;
8521  }
8522 
8523  switch (ret) {
8524  case 3:
8525  initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8526  vlan_tag);
8527  break;
8528  case 2:
8529  update_L3L4_header(sp, *lro);
8530  break;
8531  case 1:
8532  aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8533  if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8534  update_L3L4_header(sp, *lro);
8535  ret = 4; /* Flush the LRO */
8536  }
8537  break;
8538  default:
8539  DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8540  break;
8541  }
8542 
8543  return ret;
8544 }
8545 
8546 static void clear_lro_session(struct lro *lro)
8547 {
8548  static u16 lro_struct_size = sizeof(struct lro);
8549 
8550  memset(lro, 0, lro_struct_size);
8551 }
8552 
8553 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8554 {
8555  struct net_device *dev = skb->dev;
8556  struct s2io_nic *sp = netdev_priv(dev);
8557 
8558  skb->protocol = eth_type_trans(skb, dev);
8559  if (vlan_tag && sp->vlan_strip_flag)
8560  __vlan_hwaccel_put_tag(skb, vlan_tag);
8561  if (sp->config.napi)
8562  netif_receive_skb(skb);
8563  else
8564  netif_rx(skb);
8565 }
8566 
8567 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8568  struct sk_buff *skb, u32 tcp_len)
8569 {
8570  struct sk_buff *first = lro->parent;
8571  struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8572 
8573  first->len += tcp_len;
8574  first->data_len = lro->frags_len;
8575  skb_pull(skb, (skb->len - tcp_len));
8576  if (skb_shinfo(first)->frag_list)
8577  lro->last_frag->next = skb;
8578  else
8579  skb_shinfo(first)->frag_list = skb;
8580  first->truesize += skb->truesize;
8581  lro->last_frag = skb;
8582  swstats->clubbed_frms_cnt++;
8583 }
8584 
8593 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8594  pci_channel_state_t state)
8595 {
8596  struct net_device *netdev = pci_get_drvdata(pdev);
8597  struct s2io_nic *sp = netdev_priv(netdev);
8598 
8599  netif_device_detach(netdev);
8600 
8601  if (state == pci_channel_io_perm_failure)
8603 
8604  if (netif_running(netdev)) {
8605  /* Bring down the card, while avoiding PCI I/O */
8606  do_s2io_card_down(sp, 0);
8607  }
8608  pci_disable_device(pdev);
8609 
8611 }
8612 
8622 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8623 {
8624  struct net_device *netdev = pci_get_drvdata(pdev);
8625  struct s2io_nic *sp = netdev_priv(netdev);
8626 
8627  if (pci_enable_device(pdev)) {
8628  pr_err("Cannot re-enable PCI device after reset.\n");
8630  }
8631 
8632  pci_set_master(pdev);
8633  s2io_reset(sp);
8634 
8635  return PCI_ERS_RESULT_RECOVERED;
8636 }
8637 
8645 static void s2io_io_resume(struct pci_dev *pdev)
8646 {
8647  struct net_device *netdev = pci_get_drvdata(pdev);
8648  struct s2io_nic *sp = netdev_priv(netdev);
8649 
8650  if (netif_running(netdev)) {
8651  if (s2io_card_up(sp)) {
8652  pr_err("Can't bring device back up after reset.\n");
8653  return;
8654  }
8655 
8656  if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8657  s2io_card_down(sp);
8658  pr_err("Can't restore mac addr after reset.\n");
8659  return;
8660  }
8661  }
8662 
8663  netif_device_attach(netdev);
8664  netif_tx_wake_all_queues(netdev);
8665 }