55 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
61 #include <linux/pci.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
66 #include <linux/mdio.h>
70 #include <linux/stddef.h>
72 #include <linux/timex.h>
73 #include <linux/ethtool.h>
75 #include <linux/if_vlan.h>
77 #include <linux/tcp.h>
80 #include <linux/slab.h>
81 #include <linux/prefetch.h>
84 #include <asm/div64.h>
91 #define DRV_VERSION "2.0.26.28"
94 static const char s2io_driver_name[] =
"Neterion";
95 static const char s2io_driver_version[] =
DRV_VERSION;
97 static const int rxd_size[2] = {32, 48};
98 static const int rxd_count[2] = {127, 85};
100 static inline int RXD_IS_UP2DT(
struct RxD_t *
rxdp)
115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
116 (dev_type == XFRAME_I_DEVICE) ? \
117 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
118 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
123 static inline int is_s2io_card_up(
const struct s2io_nic *
sp)
130 "Register test\t(offline)",
131 "Eeprom test\t(offline)",
132 "Link test\t(online)",
133 "RLDRAM test\t(offline)",
134 "BIST Test\t(offline)"
139 {
"tmac_data_octets"},
143 {
"tmac_pause_ctrl_frms"},
147 {
"tmac_any_err_frms"},
148 {
"tmac_ttl_less_fb_octets"},
149 {
"tmac_vld_ip_octets"},
157 {
"rmac_data_octets"},
158 {
"rmac_fcs_err_frms"},
160 {
"rmac_vld_mcst_frms"},
161 {
"rmac_vld_bcst_frms"},
162 {
"rmac_in_rng_len_err_frms"},
163 {
"rmac_out_rng_len_err_frms"},
165 {
"rmac_pause_ctrl_frms"},
166 {
"rmac_unsup_ctrl_frms"},
168 {
"rmac_accepted_ucst_frms"},
169 {
"rmac_accepted_nucst_frms"},
170 {
"rmac_discarded_frms"},
171 {
"rmac_drop_events"},
172 {
"rmac_ttl_less_fb_octets"},
174 {
"rmac_usized_frms"},
175 {
"rmac_osized_frms"},
177 {
"rmac_jabber_frms"},
178 {
"rmac_ttl_64_frms"},
179 {
"rmac_ttl_65_127_frms"},
180 {
"rmac_ttl_128_255_frms"},
181 {
"rmac_ttl_256_511_frms"},
182 {
"rmac_ttl_512_1023_frms"},
183 {
"rmac_ttl_1024_1518_frms"},
191 {
"rmac_err_drp_udp"},
192 {
"rmac_xgmii_err_sym"},
210 {
"rmac_xgmii_data_err_cnt"},
211 {
"rmac_xgmii_ctrl_err_cnt"},
212 {
"rmac_accepted_ip"},
216 {
"new_rd_req_rtry_cnt"},
218 {
"wr_rtry_rd_ack_cnt"},
221 {
"new_wr_req_rtry_cnt"},
224 {
"rd_rtry_wr_ack_cnt"},
235 {
"rmac_ttl_1519_4095_frms"},
236 {
"rmac_ttl_4096_8191_frms"},
237 {
"rmac_ttl_8192_max_frms"},
238 {
"rmac_ttl_gt_max_frms"},
239 {
"rmac_osized_alt_frms"},
240 {
"rmac_jabber_alt_frms"},
241 {
"rmac_gt_max_alt_frms"},
243 {
"rmac_len_discard"},
244 {
"rmac_fcs_discard"},
247 {
"rmac_red_discard"},
248 {
"rmac_rts_discard"},
249 {
"rmac_ingm_full_discard"},
254 {
"\n DRIVER STATISTICS"},
255 {
"single_bit_ecc_errs"},
256 {
"double_bit_ecc_errs"},
269 {
"alarm_transceiver_temp_high"},
270 {
"alarm_transceiver_temp_low"},
271 {
"alarm_laser_bias_current_high"},
272 {
"alarm_laser_bias_current_low"},
273 {
"alarm_laser_output_power_high"},
274 {
"alarm_laser_output_power_low"},
275 {
"warn_transceiver_temp_high"},
276 {
"warn_transceiver_temp_low"},
277 {
"warn_laser_bias_current_high"},
278 {
"warn_laser_bias_current_low"},
279 {
"warn_laser_output_power_high"},
280 {
"warn_laser_output_power_low"},
281 {
"lro_aggregated_pkts"},
282 {
"lro_flush_both_count"},
283 {
"lro_out_of_sequence_pkts"},
284 {
"lro_flush_due_to_max_pkts"},
285 {
"lro_avg_aggr_pkts"},
286 {
"mem_alloc_fail_cnt"},
287 {
"pci_map_fail_cnt"},
288 {
"watchdog_timer_cnt"},
295 {
"tx_tcode_buf_abort_cnt"},
296 {
"tx_tcode_desc_abort_cnt"},
297 {
"tx_tcode_parity_err_cnt"},
298 {
"tx_tcode_link_loss_cnt"},
299 {
"tx_tcode_list_proc_err_cnt"},
300 {
"rx_tcode_parity_err_cnt"},
301 {
"rx_tcode_abort_cnt"},
302 {
"rx_tcode_parity_abort_cnt"},
303 {
"rx_tcode_rda_fail_cnt"},
304 {
"rx_tcode_unkn_prot_cnt"},
305 {
"rx_tcode_fcs_err_cnt"},
306 {
"rx_tcode_buf_size_err_cnt"},
307 {
"rx_tcode_rxd_corrupt_cnt"},
308 {
"rx_tcode_unkn_err_cnt"},
316 {
"mac_tmac_err_cnt"},
317 {
"mac_rmac_err_cnt"},
318 {
"xgxs_txgxs_err_cnt"},
319 {
"xgxs_rxgxs_err_cnt"},
321 {
"prc_pcix_err_cnt"},
328 #define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
329 #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
330 #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
338 #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
339 #define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
341 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
342 init_timer(&timer); \
343 timer.function = handle; \
344 timer.data = (unsigned long)arg; \
345 mod_timer(&timer, (jiffies + exp)) \
364 static const u64 herc_act_dtx_cfg[] = {
366 0x8000051536750000ULL, 0x80000515367500E0ULL,
368 0x8000051536750004ULL, 0x80000515367500E4ULL,
370 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
372 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
374 0x801205150D440000ULL, 0x801205150D4400E0ULL,
376 0x801205150D440004ULL, 0x801205150D4400E4ULL,
378 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
380 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
385 static const u64 xena_dtx_cfg[] = {
387 0x8000051500000000ULL, 0x80000515000000E0ULL,
389 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
391 0x8001051500000000ULL, 0x80010515000000E0ULL,
393 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
395 0x8002051500000000ULL, 0x80020515000000E0ULL,
397 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
405 static const u64 fix_mac[] = {
406 0x0060000000000000ULL, 0x0060600000000000ULL,
407 0x0040600000000000ULL, 0x0000600000000000ULL,
408 0x0020600000000000ULL, 0x0060600000000000ULL,
409 0x0020600000000000ULL, 0x0060600000000000ULL,
410 0x0020600000000000ULL, 0x0060600000000000ULL,
411 0x0020600000000000ULL, 0x0060600000000000ULL,
412 0x0020600000000000ULL, 0x0060600000000000ULL,
413 0x0020600000000000ULL, 0x0060600000000000ULL,
414 0x0020600000000000ULL, 0x0060600000000000ULL,
415 0x0020600000000000ULL, 0x0060600000000000ULL,
416 0x0020600000000000ULL, 0x0060600000000000ULL,
417 0x0020600000000000ULL, 0x0060600000000000ULL,
418 0x0020600000000000ULL, 0x0000600000000000ULL,
419 0x0040600000000000ULL, 0x0060600000000000ULL,
463 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
488 .error_detected = s2io_io_error_detected,
489 .slot_reset = s2io_io_slot_reset,
490 .resume = s2io_io_resume,
495 .id_table = s2io_tbl,
496 .probe = s2io_init_nic,
498 .err_handler = &s2io_err_handler,
502 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
505 static inline void s2io_stop_all_tx_queue(
struct s2io_nic *
sp)
510 for (i = 0; i < sp->
config.tx_fifo_num; i++)
513 netif_tx_stop_all_queues(sp->
dev);
516 static inline void s2io_stop_tx_queue(
struct s2io_nic *
sp,
int fifo_no)
522 netif_tx_stop_all_queues(sp->
dev);
525 static inline void s2io_start_all_tx_queue(
struct s2io_nic *sp)
530 for (i = 0; i < sp->
config.tx_fifo_num; i++)
533 netif_tx_start_all_queues(sp->
dev);
536 static inline void s2io_start_tx_queue(
struct s2io_nic *sp,
int fifo_no)
542 netif_tx_start_all_queues(sp->
dev);
545 static inline void s2io_wake_all_tx_queue(
struct s2io_nic *sp)
550 for (i = 0; i < sp->
config.tx_fifo_num; i++)
553 netif_tx_wake_all_queues(sp->
dev);
556 static inline void s2io_wake_tx_queue(
561 if (cnt && __netif_subqueue_stopped(fifo->
dev, fifo->
fifo_no))
562 netif_wake_subqueue(fifo->
dev, fifo->
fifo_no);
564 if (netif_queue_stopped(fifo->
dev)) {
566 netif_wake_queue(fifo->
dev);
579 static int init_shared_mem(
struct s2io_nic *nic)
582 void *tmp_v_addr, *tmp_v_addr_next;
586 int lst_size, lst_per_page;
592 unsigned long long mem_allocated = 0;
603 "Too many TxDs requested: %d, max supported: %d\n",
618 "Valid lengths are 2 through 8192\n",
624 lst_size = (
sizeof(
struct TxD) * config->
max_txds);
638 mem_allocated += list_holder_size;
655 for (j = 0; j < page_num; j++) {
663 "pci_alloc_consistent failed for TxDL\n");
674 "%s: Zero DMA address for TxDL. "
675 "Virtual address %p\n",
681 "pci_alloc_consistent failed for TxDL\n");
686 while (k < lst_per_page) {
687 int l = (j * lst_per_page) + k;
691 tmp_v + (k * lst_size);
693 tmp_p + (k * lst_size);
707 mem_allocated += (size *
sizeof(
u64));
718 "multiple of RxDs per Block\n",
728 size = (size * (
sizeof(
struct RxD1)));
730 size = (size * (
sizeof(
struct RxD3)));
747 for (j = 0; j <
blk_cnt; j++) {
755 if (tmp_v_addr ==
NULL) {
765 mem_allocated +=
size;
766 memset(tmp_v_addr, 0, size);
773 if (!rx_blocks->
rxds)
775 mem_allocated +=
size;
776 for (l = 0; l < rxd_count[nic->
rxd_mode]; l++) {
777 rx_blocks->
rxds[
l].virt_addr =
780 rx_blocks->
rxds[
l].dma_addr =
786 for (j = 0; j <
blk_cnt; j++) {
787 int next = (j + 1) % blk_cnt;
788 tmp_v_addr = ring->
rx_blocks[
j].block_virt_addr;
790 tmp_p_addr = ring->
rx_blocks[
j].block_dma_addr;
793 pre_rxd_blk = tmp_v_addr;
795 (
unsigned long)tmp_v_addr_next;
797 (
u64)tmp_p_addr_next;
811 size =
sizeof(
struct buffAdd *) * blk_cnt;
815 mem_allocated +=
size;
816 for (j = 0; j <
blk_cnt; j++) {
819 size =
sizeof(
struct buffAdd) *
824 mem_allocated +=
size;
825 while (k != rxd_count[nic->
rxd_mode]) {
826 ba = &ring->
ba[
j][
k];
831 mem_allocated +=
size;
835 ba->
ba_0 = (
void *)tmp;
841 mem_allocated +=
size;
845 ba->
ba_1 = (
void *)tmp;
866 mem_allocated +=
size;
871 memset(tmp_v_addr, 0, size);
873 dev_name(&nic->
pdev->dev), (
unsigned long long)tmp_p_addr);
874 mac_control->
stats_info->sw_stat.mem_allocated += mem_allocated;
885 static void free_shared_mem(
struct s2io_nic *nic)
890 int lst_size, lst_per_page;
916 for (j = 0; j < page_num; j++) {
917 int mem_blks = (j * lst_per_page);
939 "%s: Freeing TxDL with zero DMA address. "
940 "Virtual address %p\n",
954 for (j = 0; j <
blk_cnt; j++) {
955 tmp_v_addr = ring->
rx_blocks[
j].block_virt_addr;
956 tmp_p_addr = ring->
rx_blocks[
j].block_dma_addr;
957 if (tmp_v_addr ==
NULL)
960 tmp_v_addr, tmp_p_addr);
976 for (j = 0; j <
blk_cnt; j++) {
980 while (k != rxd_count[nic->
rxd_mode]) {
1000 for (i = 0; i < nic->
config.tx_fifo_num; i++) {
1024 static int s2io_verify_pci_mode(
struct s2io_nic *nic)
1027 register u64 val64 = 0;
1038 #define NEC_VENID 0x1033
1039 #define NEC_DEVID 0x0125
1040 static int s2io_on_nec_bridge(
struct pci_dev *s2io_pdev)
1045 if (tdev->
bus == s2io_pdev->
bus->parent) {
1054 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1058 static int s2io_print_pci_mode(
struct s2io_nic *nic)
1061 register u64 val64 = 0;
1064 const char *pcimode;
1074 if (s2io_on_nec_bridge(nic->
pdev)) {
1082 pcimode =
"33MHz PCI bus";
1085 pcimode =
"66MHz PCI bus";
1088 pcimode =
"66MHz PCIX(M1) bus";
1091 pcimode =
"100MHz PCIX(M1) bus";
1094 pcimode =
"133MHz PCIX(M1) bus";
1097 pcimode =
"133MHz PCIX(M2) bus";
1100 pcimode =
"200MHz PCIX(M2) bus";
1103 pcimode =
"266MHz PCIX(M2) bus";
1106 pcimode =
"unsupported bus!";
1129 register u64 val64 = 0;
1150 if (use_continuous_tx_intrs && (link ==
LINK_UP))
1160 if ((nic->
config.tx_steering_type ==
1202 static int init_nic(
struct s2io_nic *nic)
1206 register u64 val64 = 0;
1211 unsigned long long mem_share;
1217 if (s2io_set_swapper(nic)) {
1226 val64 = 0xA500000000ULL;
1242 for (i = 0; i < 50; i++) {
1271 while (herc_act_dtx_cfg[dtx_cnt] !=
END_SIGN) {
1272 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1279 while (xena_dtx_cfg[dtx_cnt] !=
END_SIGN) {
1280 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1294 for (i = 0, j = 0; i < config->
tx_fifo_num; i++) {
1297 val64 |=
vBIT(tx_cfg->
fifo_len - 1, ((j * 32) + 19), 13) |
1426 val64 = 0x0001000100010001ULL;
1431 val64 = 0x0001000100000000ULL;
1435 val64 = 0x0001020001020001ULL;
1437 val64 = 0x0200010200010200ULL;
1439 val64 = 0x0102000102000102ULL;
1441 val64 = 0x0001020001020001ULL;
1443 val64 = 0x0200010200000000ULL;
1447 val64 = 0x0001020300010203ULL;
1452 val64 = 0x0001020300000000ULL;
1456 val64 = 0x0001020304000102ULL;
1458 val64 = 0x0304000102030400ULL;
1460 val64 = 0x0102030400010203ULL;
1462 val64 = 0x0400010203040001ULL;
1464 val64 = 0x0203040000000000ULL;
1468 val64 = 0x0001020304050001ULL;
1470 val64 = 0x0203040500010203ULL;
1472 val64 = 0x0405000102030405ULL;
1474 val64 = 0x0001020304050001ULL;
1476 val64 = 0x0203040500000000ULL;
1480 val64 = 0x0001020304050600ULL;
1482 val64 = 0x0102030405060001ULL;
1484 val64 = 0x0203040506000102ULL;
1486 val64 = 0x0304050600010203ULL;
1488 val64 = 0x0405060000000000ULL;
1492 val64 = 0x0001020304050607ULL;
1497 val64 = 0x0001020300000000ULL;
1520 val64 = 0x8080808080808080ULL;
1524 val64 = 0x0001000100010001ULL;
1529 val64 = 0x0001000100000000ULL;
1532 val64 = 0x8080808040404040ULL;
1536 val64 = 0x0001020001020001ULL;
1538 val64 = 0x0200010200010200ULL;
1540 val64 = 0x0102000102000102ULL;
1542 val64 = 0x0001020001020001ULL;
1544 val64 = 0x0200010200000000ULL;
1547 val64 = 0x8080804040402020ULL;
1551 val64 = 0x0001020300010203ULL;
1556 val64 = 0x0001020300000000ULL;
1559 val64 = 0x8080404020201010ULL;
1563 val64 = 0x0001020304000102ULL;
1565 val64 = 0x0304000102030400ULL;
1567 val64 = 0x0102030400010203ULL;
1569 val64 = 0x0400010203040001ULL;
1571 val64 = 0x0203040000000000ULL;
1574 val64 = 0x8080404020201008ULL;
1578 val64 = 0x0001020304050001ULL;
1580 val64 = 0x0203040500010203ULL;
1582 val64 = 0x0405000102030405ULL;
1584 val64 = 0x0001020304050001ULL;
1586 val64 = 0x0203040500000000ULL;
1589 val64 = 0x8080404020100804ULL;
1593 val64 = 0x0001020304050600ULL;
1595 val64 = 0x0102030405060001ULL;
1597 val64 = 0x0203040506000102ULL;
1599 val64 = 0x0304050600010203ULL;
1601 val64 = 0x0405060000000000ULL;
1604 val64 = 0x8080402010080402ULL;
1608 val64 = 0x0001020304050607ULL;
1613 val64 = 0x0001020300000000ULL;
1616 val64 = 0x8040201008040201ULL;
1623 for (i = 0; i < 8; i++)
1641 if (rts_frm_len[i] != 0) {
1648 for (i = 0; i < 64; i++) {
1649 if (rts_ds_steer(nic, i, 0) ==
FAILURE) {
1651 "%s: rts_ds_steer failed on codepoint %d\n",
1784 for (i = 0; i < 4; i++) {
1785 val64 |= (((
u64)0xFF00 |
1792 for (i = 0; i < 4; i++) {
1793 val64 |= (((
u64)0xFF00 |
1807 if (nic->
config.bus_speed == 266) {
1832 #define LINK_UP_DOWN_INTERRUPT 1
1833 #define MAC_RMAC_ERR_TIMER 2
1835 static int s2io_link_fault_indication(
struct s2io_nic *nic)
1856 temp64 =
readq(addr);
1859 temp64 &= ~((
u64)value);
1861 temp64 |= ((
u64)value);
1865 static void en_dis_err_alarms(
struct s2io_nic *nic,
u16 mask,
int flag)
1868 register u64 gen_int_mask = 0;
1966 do_s2io_write_bits(interruptible,
2003 static void en_dis_able_nic_intrs(
struct s2io_nic *nic,
u16 mask,
int flag)
2022 if (s2io_link_fault_indication(nic) ==
2087 static int verify_pcc_quiescent(
struct s2io_nic *sp,
int flag)
2095 if (flag ==
false) {
2096 if ((!herc && (sp->
pdev->revision >= 4)) || herc) {
2104 if ((!herc && (sp->
pdev->revision >= 4)) || herc) {
2106 ADAPTER_STATUS_RMAC_PCC_IDLE))
2110 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2127 static int verify_xena_quiescence(
struct s2io_nic *sp)
2132 mode = s2io_verify_pci_mode(sp);
2179 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2194 static void fix_mac_address(
struct s2io_nic *sp)
2219 static int start_nic(
struct s2io_nic *nic)
2223 register u64 val64 = 0;
2254 if (vlan_tag_strip == 0) {
2283 if (!verify_xena_quiescence(nic)) {
2285 "Adapter status reads: 0x%llx\n",
2286 dev->
name, (
unsigned long long)val64);
2311 subid = nic->
pdev->subsystem_device;
2312 if (((subid & 0xFF) >= 0x07) &&
2315 val64 |= 0x0000800000000000ULL;
2317 val64 = 0x0411040400000000ULL;
2327 struct TxD *txdlp,
int get_off)
2348 frg_cnt = skb_shinfo(skb)->nr_frags;
2351 for (j = 0; j < frg_cnt; j++, txds++) {
2355 pci_unmap_page(nic->
pdev,
2372 static void free_tx_buffers(
struct s2io_nic *nic)
2387 unsigned long flags;
2390 for (j = 0; j < tx_cfg->
fifo_len; j++) {
2392 skb = s2io_txdl_getskb(&mac_control->
fifos[i], txdp, j);
2400 "%s: forcibly freeing %d skbs on FIFO%d\n",
2404 spin_unlock_irqrestore(&fifo->
tx_lock, flags);
2418 static void stop_nic(
struct s2io_nic *nic)
2421 register u64 val64 = 0;
2463 int off,
size, block_no, block_no1;
2469 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2473 struct swStat *swstats = &ring->
nic->mac_control.stats_info->sw_stat;
2478 while (alloc_tab < alloc_cnt) {
2483 rxdp = ring->
rx_blocks[block_no].rxds[off].virt_addr;
2485 rxd_index = off + 1;
2487 rxd_index += (block_no * ring->
rxd_count);
2489 if ((block_no == block_no1) &&
2504 rxdp = ring->
rx_blocks[block_no].block_virt_addr;
2506 ring->
dev->name, rxdp);
2526 skb = netdev_alloc_skb(nic->
dev, size);
2542 rxdp1 = (
struct RxD1 *)rxdp;
2546 pci_map_single(ring->
pdev, skb->
data,
2549 if (pci_dma_mapping_error(nic->
pdev,
2551 goto pci_map_failed;
2563 rxdp3 = (
struct RxD3 *)rxdp;
2572 ba = &ring->
ba[block_no][off];
2573 skb_reserve(skb, BUF0_LEN);
2574 tmp = (
u64)(
unsigned long)skb->
data;
2577 skb->
data = (
void *) (
unsigned long)
tmp;
2578 skb_reset_tail_pointer(skb);
2582 pci_map_single(ring->
pdev, ba->
ba_0,
2585 if (pci_dma_mapping_error(nic->
pdev,
2587 goto pci_map_failed;
2589 pci_dma_sync_single_for_device(ring->
pdev,
2607 if (pci_dma_mapping_error(nic->
pdev,
2609 goto pci_map_failed;
2613 pci_map_single(ring->
pdev,
2618 if (pci_dma_mapping_error(nic->
pdev,
2620 pci_unmap_single(ring->
pdev,
2625 goto pci_map_failed;
2635 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2643 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2673 static void free_rxd_blk(
struct s2io_nic *sp,
int ring_no,
int blk)
2685 for (j = 0 ; j < rxd_count[sp->
rxd_mode]; j++) {
2686 rxdp = mac_control->
rings[ring_no].
2692 rxdp1 = (
struct RxD1 *)rxdp;
2693 pci_unmap_single(sp->
pdev,
2701 rxdp3 = (
struct RxD3 *)rxdp;
2702 pci_unmap_single(sp->
pdev,
2706 pci_unmap_single(sp->
pdev,
2710 pci_unmap_single(sp->
pdev,
2718 mac_control->
rings[ring_no].rx_bufs_left -= 1;
2731 static void free_rx_buffers(
struct s2io_nic *sp)
2734 int i, blk = 0, buf_cnt = 0;
2741 for (blk = 0; blk < rx_ring_sz[
i]; blk++)
2742 free_rxd_blk(sp, i, blk);
2750 dev->
name, buf_cnt, i);
2756 if (fill_rx_buffers(nic, ring, 0) == -
ENOMEM) {
2780 int pkts_processed = 0;
2783 struct s2io_nic *nic = netdev_priv(dev);
2785 int budget_org = budget;
2787 if (
unlikely(!is_s2io_card_up(nic)))
2790 pkts_processed = rx_intr_handler(ring, budget);
2791 s2io_chk_rx_buffers(nic, ring);
2793 if (pkts_processed < budget_org) {
2798 val8 = (ring->
ring_no == 0) ? 0x3f : 0xbf;
2802 return pkts_processed;
2805 static int s2io_poll_inta(
struct napi_struct *napi,
int budget)
2808 int pkts_processed = 0;
2809 int ring_pkts_processed,
i;
2811 int budget_org = budget;
2815 if (
unlikely(!is_s2io_card_up(nic)))
2820 ring_pkts_processed = rx_intr_handler(ring, budget);
2821 s2io_chk_rx_buffers(nic, ring);
2822 pkts_processed += ring_pkts_processed;
2823 budget -= ring_pkts_processed;
2827 if (pkts_processed < budget_org) {
2833 return pkts_processed;
2836 #ifdef CONFIG_NET_POLL_CONTROLLER
2846 static void s2io_netpoll(
struct net_device *dev)
2848 struct s2io_nic *nic = netdev_priv(dev);
2849 const int irq = nic->
pdev->irq;
2851 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2856 if (pci_channel_offline(nic->
pdev))
2869 tx_intr_handler(&mac_control->
fifos[i]);
2875 rx_intr_handler(ring, 0);
2881 if (fill_rx_buffers(nic, ring, 0) == -
ENOMEM) {
2883 "%s: Out of memory in Rx Netpoll!!\n",
2907 int get_block, put_block;
2911 int pkt_cnt = 0, napi_pkts = 0;
2917 get_block = get_info.block_index;
2920 rxdp = ring_data->
rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2922 while (RXD_IS_UP2DT(rxdp)) {
2927 if ((get_block == put_block) &&
2928 (get_info.offset + 1) ==
put_info.offset) {
2930 ring_data->
dev->name);
2936 ring_data->
dev->name);
2940 rxdp1 = (
struct RxD1 *)rxdp;
2949 rxdp3 = (
struct RxD3 *)rxdp;
2950 pci_dma_sync_single_for_cpu(ring_data->
pdev,
2954 pci_unmap_single(ring_data->
pdev,
2960 rx_osm_handler(ring_data, rxdp);
2964 rxds[get_info.offset].virt_addr;
2965 if (get_info.offset == rxd_count[ring_data->
rxd_mode]) {
2966 get_info.offset = 0;
2972 rxdp = ring_data->
rx_blocks[get_block].block_virt_addr;
2975 if (ring_data->
nic->config.napi) {
2982 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2985 if (ring_data->
lro) {
2990 update_L3L4_header(ring_data->
nic, lro);
2992 clear_lro_session(lro);
3011 static void tx_intr_handler(
struct fifo_info *fifo_data)
3018 unsigned long flags = 0;
3028 txdlp = fifo_data->
list_info[get_info.offset].list_virt_addr;
3030 (get_info.offset !=
put_info.offset) &&
3034 unsigned long long err;
3041 err_mask = err >> 48;
3065 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3067 spin_unlock_irqrestore(&fifo_data->
tx_lock, flags);
3079 if (get_info.offset == get_info.fifo_len + 1)
3080 get_info.offset = 0;
3081 txdlp = fifo_data->
list_info[get_info.offset].list_virt_addr;
3085 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->
config.multiq);
3087 spin_unlock_irqrestore(&fifo_data->
tx_lock, flags);
3100 static void s2io_mdio_write(
u32 mmd_type,
u64 addr,
u16 value,
3104 struct s2io_nic *sp = netdev_priv(dev);
3150 struct s2io_nic *sp = netdev_priv(dev);
3174 rval64 = rval64 & 0xFFFF0000;
3175 rval64 = rval64 >> 16;
3195 for (i = 0; i <
index; i++)
3199 *counter = *counter + 1;
3200 val64 = *regs_stat &
mask;
3201 val64 = val64 >> (index * 0x2);
3207 "Take Xframe NIC out of service.\n");
3209 "Excessive temperatures may result in premature transceiver failure.\n");
3213 "Take Xframe NIC out of service.\n");
3215 "Excessive bias currents may indicate imminent laser diode failure.\n");
3219 "Take Xframe NIC out of service.\n");
3221 "Excessive laser output power may saturate far-end receiver.\n");
3225 "Incorrect XPAK Alarm type\n");
3229 val64 = val64 << (index * 0x2);
3230 *regs_stat = (*regs_stat & (~mask)) | (val64);
3233 *regs_stat = *regs_stat & (~mask);
3244 static void s2io_updt_xpak_counter(
struct net_device *dev)
3252 struct s2io_nic *sp = netdev_priv(dev);
3260 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3262 "ERR: MDIO slave access failed - Returned %llx\n",
3263 (
unsigned long long)val64);
3270 "Returned: %llx- Expected: 0x%x\n",
3347 static int wait_for_cmd_complete(
void __iomem *addr,
u64 busy_bit,
3357 val64 =
readq(addr);
3359 if (!(val64 & busy_bit)) {
3364 if (val64 & busy_bit) {
3386 static u16 check_pci_device_id(
u16 id)
3410 static void s2io_reset(
struct s2io_nic *sp)
3417 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3418 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3423 __func__, pci_name(sp->
pdev));
3438 pci_read_config_word(sp->
pdev, 0x2, &val16);
3452 s2io_set_swapper(sp);
3455 do_s2io_restore_unicast_mc(sp);
3458 restore_xmsi_data(sp);
3466 pci_write_config_dword(sp->
pdev, 0x68, 0x7C);
3501 subid = sp->
pdev->subsystem_device;
3502 if (((subid & 0xFF) >= 0x07) &&
3505 val64 |= 0x0000800000000000ULL;
3507 val64 = 0x0411040400000000ULL;
3533 static int s2io_set_swapper(
struct s2io_nic *sp)
3537 u64 val64, valt, valr;
3545 if (val64 != 0x0123456789ABCDEFULL) {
3547 static const u64 value[] = {
3548 0xC30000C3C30000C3ULL,
3549 0x8100008181000081ULL,
3550 0x4200004242000042ULL,
3557 if (val64 == 0x0123456789ABCDEFULL)
3563 "feedback read %llx\n",
3564 dev->
name, (
unsigned long long)val64);
3572 valt = 0x0123456789ABCDEFULL;
3576 if (val64 != valt) {
3578 static const u64 value[] = {
3579 0x00C3C30000C3C300ULL,
3580 0x0081810000818100ULL,
3581 0x0042420000424200ULL,
3594 unsigned long long x = val64;
3596 "Write failed, Xmsi_addr reads:0x%llx\n", x);
3601 val64 &= 0xFFFF000000000000ULL;
3654 if (val64 != 0x0123456789ABCDEFULL) {
3657 "%s: Endian settings are wrong, feedback read %llx\n",
3658 dev->
name, (
unsigned long long)val64);
3665 static int wait_for_msix_trans(
struct s2io_nic *nic,
int i)
3669 int ret = 0, cnt = 0;
3673 if (!(val64 &
s2BIT(15)))
3686 static void restore_xmsi_data(
struct s2io_nic *nic)
3696 msix_index = (
i) ? ((i-1) * 8 + 1) : 0;
3701 if (wait_for_msix_trans(nic, msix_index)) {
3703 __func__, msix_index);
3709 static void store_xmsi_data(
struct s2io_nic *nic)
3720 msix_index = (
i) ? ((i-1) * 8 + 1) : 0;
3721 val64 = (
s2BIT(15) |
vBIT(msix_index, 26, 6));
3723 if (wait_for_msix_trans(nic, msix_index)) {
3725 __func__, msix_index);
3737 static int s2io_enable_msi_x(
struct s2io_nic *nic)
3742 int ret,
i,
j, msix_indx = 1;
3747 size = nic->
num_entries *
sizeof(
struct msix_entry);
3765 += (nic->
num_entries *
sizeof(
struct msix_entry));
3777 nic->
entries[
i].entry = ((i - 1) * 8) + 1;
3784 for (j = 0; j < nic->
config.rx_ring_num; j++) {
3800 sizeof(
struct msix_entry);
3813 pci_read_config_word(nic->
pdev, 0x42, &msi_control);
3815 pci_write_config_word(nic->
pdev, 0x42, msi_control);
3832 static int s2io_test_msi(
struct s2io_nic *sp)
3843 sp->
dev->name, pci_name(pdev), pdev->
irq);
3861 "using MSI(X) during test\n",
3862 sp->
dev->name, pci_name(pdev));
3874 static void remove_msix_isr(
struct s2io_nic *sp)
3892 pci_read_config_word(sp->
pdev, 0x42, &msi_control);
3893 msi_control &= 0xFFFE;
3894 pci_write_config_word(sp->
pdev, 0x42, msi_control);
3899 static void remove_inta_isr(
struct s2io_nic *sp)
3922 struct s2io_nic *sp = netdev_priv(dev);
3934 err = s2io_card_up(sp);
3938 goto hw_init_failed;
3945 goto hw_init_failed;
3947 s2io_start_all_tx_queue(sp);
3955 sizeof(
struct msix_entry);
3979 static int s2io_close(
struct net_device *dev)
3981 struct s2io_nic *sp = netdev_priv(dev);
3989 if (!is_s2io_card_up(sp))
3992 s2io_stop_all_tx_queue(sp);
3994 for (offset = 1; offset < config->
max_mc_addr; offset++) {
3995 tmp64 = do_s2io_read_unicast_mc(sp, offset);
3997 do_s2io_delete_unicast_mc(sp, tmp64);
4020 struct s2io_nic *sp = netdev_priv(dev);
4021 u16 frg_cnt, frg_len,
i,
queue, queue_len, put_off, get_off;
4025 unsigned long flags = 0;
4028 int do_spin_lock = 1;
4030 int enable_per_list_interrupt = 0;
4044 if (!is_s2io_card_up(sp)) {
4060 if (!ip_is_fragment(ip)) {
4061 th = (
struct tcphdr *)(((
unsigned char *)ip) +
4069 if (queue >= queue_len)
4070 queue = queue_len - 1;
4076 if (queue >= queue_len)
4077 queue = queue_len - 1;
4079 if (skb->
len > 1024)
4080 enable_per_list_interrupt = 1;
4089 fifo = &mac_control->
fifos[queue];
4099 if (__netif_subqueue_stopped(dev, fifo->
fifo_no)) {
4100 spin_unlock_irqrestore(&fifo->
tx_lock, flags);
4104 if (netif_queue_stopped(dev)) {
4105 spin_unlock_irqrestore(&fifo->
tx_lock, flags);
4112 txdp = fifo->
list_info[put_off].list_virt_addr;
4117 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4119 s2io_stop_tx_queue(sp, fifo->
fifo_no);
4121 spin_unlock_irqrestore(&fifo->
tx_lock, flags);
4138 if (enable_per_list_interrupt)
4139 if (put_off & (queue_len >> 5))
4146 frg_len = skb_headlen(skb);
4161 (
__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4169 goto pci_map_failed;
4176 goto pci_map_failed;
4183 frg_cnt = skb_shinfo(skb)->nr_frags;
4185 for (i = 0; i < frg_cnt; i++) {
4186 const skb_frag_t *frag = &skb_shinfo(skb)->frags[
i];
4188 if (!skb_frag_size(frag))
4193 skb_frag_size(frag),
4205 val64 = fifo->
list_info[put_off].list_phy_addr;
4223 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4226 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4228 s2io_stop_tx_queue(sp, fifo->
fifo_no);
4231 spin_unlock_irqrestore(&fifo->
tx_lock, flags);
4234 tx_intr_handler(fifo);
4240 s2io_stop_tx_queue(sp, fifo->
fifo_no);
4243 spin_unlock_irqrestore(&fifo->
tx_lock, flags);
4248 s2io_alarm_handle(
unsigned long data)
4253 s2io_handle_errors(dev);
4263 if (
unlikely(!is_s2io_card_up(sp)))
4272 val8 = (ring->
ring_no == 0) ? 0x7f : 0xff;
4275 napi_schedule(&ring->
napi);
4277 rx_intr_handler(ring, 0);
4278 s2io_chk_rx_buffers(sp, ring);
4284 static irqreturn_t s2io_msix_fifo_handle(
int irq,
void *dev_id)
4293 if (
unlikely(!is_s2io_card_up(sp)))
4305 s2io_txpic_intr_handle(sp);
4311 tx_intr_handler(&fifos[i]);
4321 static void s2io_txpic_intr_handle(
struct s2io_nic *sp)
4342 }
else if (val64 & GPIO_INT_REG_LINK_UP) {
4363 }
else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4391 static int do_s2io_chk_alarm_bit(
u64 value,
void __iomem *addr,
4392 unsigned long long *cnt)
4395 val64 =
readq(addr);
4396 if (val64 & value) {
4413 static void s2io_handle_errors(
void *dev_id)
4416 struct s2io_nic *sp = netdev_priv(dev);
4418 u64 temp64 = 0, val64 = 0;
4424 if (!is_s2io_card_up(sp))
4427 if (pci_channel_offline(sp->
pdev))
4438 s2io_updt_xpak_counter(dev);
4464 for (i = 0; i < 4; i++) {
4465 temp64 = (val64 &
vBIT(0xFFFF, (i*16), 16));
4466 temp64 >>= 64 - ((i+1)*16);
4471 for (i = 0; i < 4; i++) {
4472 temp64 = (val64 &
vBIT(0xFFFF, (i*16), 16));
4473 temp64 >>= 64 - ((i+1)*16);
4697 s2io_stop_all_tx_queue(sp);
4715 static irqreturn_t s2io_isr(
int irq,
void *dev_id)
4718 struct s2io_nic *sp = netdev_priv(dev);
4726 if (pci_channel_offline(sp->
pdev))
4729 if (!is_s2io_card_up(sp))
4753 napi_schedule(&sp->
napi);
4770 rx_intr_handler(ring, 0);
4779 if (reason & GEN_INTR_TXTRAFFIC)
4783 tx_intr_handler(&mac_control->
fifos[i]);
4785 if (reason & GEN_INTR_TXPIC)
4786 s2io_txpic_intr_handle(sp);
4791 if (!config->
napi) {
4795 s2io_chk_rx_buffers(sp, ring);
4803 }
else if (!reason) {
4814 static void s2io_updt_stats(
struct s2io_nic *sp)
4820 if (is_s2io_card_up(sp)) {
4828 if (!(val64 &
s2BIT(0)))
4848 struct s2io_nic *sp = netdev_priv(dev);
4854 s2io_updt_stats(sp);
4865 sp->
stats.rx_packets += delta;
4866 dev->
stats.rx_packets += delta;
4870 sp->
stats.tx_packets += delta;
4871 dev->
stats.tx_packets += delta;
4875 sp->
stats.rx_bytes += delta;
4876 dev->
stats.rx_bytes += delta;
4880 sp->
stats.tx_bytes += delta;
4881 dev->
stats.tx_bytes += delta;
4889 sp->
stats.tx_errors += delta;
4890 dev->
stats.tx_errors += delta;
4908 delta -= sp->
stats.multicast;
4938 static void s2io_set_multicast(
struct net_device *dev)
4942 struct s2io_nic *sp = netdev_priv(dev);
4944 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4997 if (vlan_tag_strip != 1) {
5019 if (vlan_tag_strip != 0) {
5036 "%s: No more Rx filters can be added - "
5037 "please enable ALL_MULTI instead\n",
5046 for (i = 0; i < prev_cnt; i++) {
5062 "%s: Adding Multicasts failed\n",
5073 mac_addr |= ha->
addr[
j];
5092 "%s: Adding Multicasts failed\n",
5104 static void do_s2io_store_unicast_mc(
struct s2io_nic *sp)
5111 for (offset = 0; offset < config->
max_mc_addr; offset++) {
5112 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5116 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5121 static void do_s2io_restore_unicast_mc(
struct s2io_nic *sp)
5126 for (offset = 0; offset < config->
max_mac_addr; offset++)
5127 do_s2io_prog_unicast(sp->
dev,
5137 static int do_s2io_add_mc(
struct s2io_nic *sp,
u8 *addr)
5145 mac_addr |= addr[
i];
5153 tmp64 = do_s2io_read_unicast_mc(sp, i);
5157 if (tmp64 == mac_addr)
5162 "CAM full no space left for multicast MAC\n");
5166 do_s2io_copy_mac_addr(sp, i, mac_addr);
5168 return do_s2io_add_mac(sp, mac_addr, i);
5172 static int do_s2io_add_mac(
struct s2io_nic *sp,
u64 addr,
int off)
5194 static int do_s2io_delete_unicast_mc(
struct s2io_nic *sp,
u64 addr)
5202 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5203 if (tmp64 == addr) {
5205 if (do_s2io_add_mac(sp, dis_addr, offset) ==
FAILURE)
5208 do_s2io_store_unicast_mc(sp);
5213 (
unsigned long long)addr);
5218 static u64 do_s2io_read_unicast_mc(
struct s2io_nic *sp,
int offset)
5220 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5244 static int s2io_set_mac_addr(
struct net_device *dev,
void *
p)
5248 if (!is_valid_ether_addr(addr->
sa_data))
5254 return do_s2io_prog_unicast(dev, dev->
dev_addr);
5266 static int do_s2io_prog_unicast(
struct net_device *dev,
u8 *addr)
5268 struct s2io_nic *sp = netdev_priv(dev);
5269 register u64 mac_addr = 0, perm_addr = 0;
5281 mac_addr |= addr[
i];
5287 if (mac_addr == perm_addr)
5292 tmp64 = do_s2io_read_unicast_mc(sp, i);
5296 if (tmp64 == mac_addr) {
5298 "MAC addr:0x%llx already present in CAM\n",
5299 (
unsigned long long)mac_addr);
5308 do_s2io_copy_mac_addr(sp, i, mac_addr);
5310 return do_s2io_add_mac(sp, mac_addr, i);
5325 static int s2io_ethtool_sset(
struct net_device *dev,
5328 struct s2io_nic *sp = netdev_priv(dev);
5334 s2io_close(sp->
dev);
5355 struct s2io_nic *sp = netdev_priv(dev);
5363 if (netif_carrier_ok(sp->
dev)) {
5367 ethtool_cmd_speed_set(info, -1);
5387 static void s2io_ethtool_gdrvinfo(
struct net_device *dev,
5390 struct s2io_nic *sp = netdev_priv(dev);
5413 static void s2io_ethtool_gregs(
struct net_device *dev,
5418 u8 *reg_space = (
u8 *)space;
5419 struct s2io_nic *sp = netdev_priv(dev);
5424 for (i = 0; i < regs->
len; i += 8) {
5426 memcpy((reg_space + i), ®, 8);
5433 static void s2io_set_led(
struct s2io_nic *sp,
bool on)
5436 u16 subid = sp->
pdev->subsystem_device;
5440 ((subid & 0xFF) >= 0x07)) {
5472 static int s2io_ethtool_set_led(
struct net_device *dev,
5475 struct s2io_nic *sp = netdev_priv(dev);
5477 u16 subid = sp->
pdev->subsystem_device;
5482 pr_err(
"Adapter Link down, cannot blink LED\n");
5493 s2io_set_led(sp,
true);
5497 s2io_set_led(sp,
false);
5508 static void s2io_ethtool_gringparam(
struct net_device *dev,
5511 struct s2io_nic *sp = netdev_priv(dev);
5512 int i, tx_desc_count = 0, rx_desc_count = 0;
5524 for (i = 0; i < sp->
config.rx_ring_num; i++)
5525 rx_desc_count += sp->
config.rx_cfg[i].num_rxd;
5529 for (i = 0; i < sp->
config.tx_fifo_num; i++)
5530 tx_desc_count += sp->
config.tx_cfg[i].fifo_len;
5545 static void s2io_ethtool_getpause_data(
struct net_device *dev,
5549 struct s2io_nic *sp = netdev_priv(dev);
5572 static int s2io_ethtool_setpause_data(
struct net_device *dev,
5576 struct s2io_nic *sp = netdev_priv(dev);
5583 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5608 #define S2IO_DEV_ID 5
5624 while (exit_cnt < 5) {
5643 while (exit_cnt < 5) {
5676 static int write_eeprom(
struct s2io_nic *sp,
int off,
u64 data,
int cnt)
5678 int exit_cnt = 0, ret = -1;
5690 while (exit_cnt < 5) {
5703 int write_cnt = (cnt == 8) ? 0 : cnt;
5712 while (exit_cnt < 5) {
5727 static void s2io_vpd_read(
struct s2io_nic *nic)
5731 int i = 0,
cnt,
len, fail = 0;
5732 int vpd_addr = 0x80;
5751 for (i = 0; i < 256; i += 4) {
5752 pci_write_config_byte(nic->
pdev, (vpd_addr + 2), i);
5753 pci_read_config_byte(nic->
pdev, (vpd_addr + 2), &data);
5754 pci_write_config_byte(nic->
pdev, (vpd_addr + 3), 0);
5755 for (cnt = 0; cnt < 5; cnt++) {
5757 pci_read_config_byte(nic->
pdev, (vpd_addr + 3), &data);
5766 pci_read_config_dword(nic->
pdev, (vpd_addr + 4),
5767 (
u32 *)&vpd_data[i]);
5772 for (cnt = 0; cnt < 252; cnt++) {
5773 if ((vpd_data[cnt] ==
'S') &&
5774 (vpd_data[cnt+1] ==
'N')) {
5775 len = vpd_data[cnt+2];
5811 static int s2io_ethtool_geeprom(
struct net_device *dev,
5816 struct s2io_nic *sp = netdev_priv(dev);
5818 eeprom->
magic = sp->
pdev->vendor | (sp->
pdev->device << 16);
5823 for (i = 0; i < eeprom->
len; i += 4) {
5829 memcpy((data_buf + i), &valid, 4);
5848 static int s2io_ethtool_seeprom(
struct net_device *dev,
5852 int len = eeprom->
len, cnt = 0;
5854 struct s2io_nic *sp = netdev_priv(dev);
5856 if (eeprom->
magic != (sp->
pdev->vendor | (sp->
pdev->device << 16))) {
5858 "ETHTOOL_WRITE_EEPROM Err: "
5859 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5860 (sp->
pdev->vendor | (sp->
pdev->device << 16)),
5866 data = (
u32)data_buf[cnt] & 0x000000FF;
5868 valid = (
u32)(data << 24);
5872 if (write_eeprom(sp, (eeprom->
offset + cnt),
valid, 0)) {
5874 "ETHTOOL_WRITE_EEPROM Err: "
5875 "Cannot write into the specified offset\n");
5901 u64 val64 = 0, exp_val;
5905 if (val64 != 0x123456789abcdefULL) {
5911 if (val64 != 0xc000ffff00000000ULL) {
5918 exp_val = 0x0404040404040404ULL;
5920 exp_val = 0x0808080808080808ULL;
5921 if (val64 != exp_val) {
5927 if (val64 != 0x000000001923141EULL) {
5932 val64 = 0x5A5A5A5A5A5A5A5AULL;
5935 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5940 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5943 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5968 u64 ret_data, org_4F0, org_7F0;
5969 u8 saved_4F0 = 0, saved_7F0 = 0;
5977 if (!write_eeprom(sp, 0, 0, 3))
5987 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5992 if (ret_data != 0x012345) {
5994 "Data written %llx Data read %llx\n",
5995 dev->
name, (
unsigned long long)0x12345,
5996 (
unsigned long long)ret_data);
6001 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6005 if (!write_eeprom(sp, 0x07C, 0, 3))
6009 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6014 if (ret_data != 0x012345) {
6016 "Data written %llx Data read %llx\n",
6017 dev->
name, (
unsigned long long)0x12345,
6018 (
unsigned long long)ret_data);
6023 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6027 if (!write_eeprom(sp, 0x080, 0, 3))
6031 if (!write_eeprom(sp, 0x0FC, 0, 3))
6035 if (!write_eeprom(sp, 0x100, 0, 3))
6039 if (!write_eeprom(sp, 0x4EC, 0, 3))
6045 write_eeprom(sp, 0x4F0, org_4F0, 3);
6047 write_eeprom(sp, 0x7F0, org_7F0, 3);
6070 int cnt = 0, ret = -1;
6151 while (iteration < 2) {
6152 val64 = 0x55555555aaaa0000ULL;
6154 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6157 val64 = 0xaaaa5a5555550000ULL;
6159 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6162 val64 = 0x55aaaaaaaa5a0000ULL;
6164 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6167 val64 = (
u64) (0x0000003ffffe0100ULL);
6175 for (cnt = 0; cnt < 5; cnt++) {
6188 for (cnt = 0; cnt < 5; cnt++) {
6228 static void s2io_ethtool_test(
struct net_device *dev,
6232 struct s2io_nic *sp = netdev_priv(dev);
6233 int orig_state = netif_running(sp->
dev);
6238 s2io_close(sp->
dev);
6240 if (s2io_register_test(sp, &data[0]))
6245 if (s2io_rldram_test(sp, &data[3]))
6250 if (s2io_eeprom_test(sp, &data[1]))
6253 if (s2io_bist_test(sp, &data[4]))
6272 if (s2io_link_test(sp, &data[2]))
6282 static void s2io_get_ethtool_stats(
struct net_device *dev,
6287 struct s2io_nic *sp = netdev_priv(dev);
6292 s2io_updt_stats(sp);
6512 tmp_stats[i++] =
count;
6559 static int s2io_ethtool_get_regs_len(
struct net_device *dev)
6565 static int s2io_get_eeprom_len(
struct net_device *dev)
6570 static int s2io_get_sset_count(
struct net_device *dev,
int sset)
6572 struct s2io_nic *sp = netdev_priv(dev);
6591 static void s2io_ethtool_get_strings(
struct net_device *dev,
6592 u32 stringset,
u8 *data)
6595 struct s2io_nic *sp = netdev_priv(dev);
6597 switch (stringset) {
6602 stat_size =
sizeof(ethtool_xena_stats_keys);
6603 memcpy(data, ðtool_xena_stats_keys, stat_size);
6606 ðtool_enhanced_stats_keys,
6607 sizeof(ethtool_enhanced_stats_keys));
6608 stat_size +=
sizeof(ethtool_enhanced_stats_keys);
6611 memcpy(data + stat_size, ðtool_driver_stats_keys,
6612 sizeof(ethtool_driver_stats_keys));
6618 struct s2io_nic *sp = netdev_priv(dev);
6621 if (changed && netif_running(dev)) {
6624 s2io_stop_all_tx_queue(sp);
6627 rc = s2io_card_up(sp);
6631 s2io_start_all_tx_queue(sp);
6639 static const struct ethtool_ops netdev_ethtool_ops = {
6640 .get_settings = s2io_ethtool_gset,
6641 .set_settings = s2io_ethtool_sset,
6642 .get_drvinfo = s2io_ethtool_gdrvinfo,
6643 .get_regs_len = s2io_ethtool_get_regs_len,
6644 .get_regs = s2io_ethtool_gregs,
6646 .get_eeprom_len = s2io_get_eeprom_len,
6647 .get_eeprom = s2io_ethtool_geeprom,
6648 .set_eeprom = s2io_ethtool_seeprom,
6649 .get_ringparam = s2io_ethtool_gringparam,
6650 .get_pauseparam = s2io_ethtool_getpause_data,
6651 .set_pauseparam = s2io_ethtool_setpause_data,
6652 .self_test = s2io_ethtool_test,
6653 .get_strings = s2io_ethtool_get_strings,
6654 .set_phys_id = s2io_ethtool_set_led,
6655 .get_ethtool_stats = s2io_get_ethtool_stats,
6656 .get_sset_count = s2io_get_sset_count,
6687 static int s2io_change_mtu(
struct net_device *dev,
int new_mtu)
6689 struct s2io_nic *sp = netdev_priv(dev);
6698 if (netif_running(dev)) {
6699 s2io_stop_all_tx_queue(sp);
6701 ret = s2io_card_up(sp);
6707 s2io_wake_all_tx_queue(sp);
6710 u64 val64 = new_mtu;
6735 if (!netif_running(dev))
6743 subid = nic->
pdev->subsystem_device;
6755 if (verify_xena_quiescence(nic)) {
6772 "%s: Error: device is not Quiescent\n",
6774 s2io_stop_all_tx_queue(nic);
6801 static int set_rxd_buffer_pointer(
struct s2io_nic *sp,
struct RxD_t *rxdp,
6810 struct RxD1 *rxdp1 = (
struct RxD1 *)rxdp;
6821 *skb = netdev_alloc_skb(dev, size);
6824 "%s: Out of memory to allocate %s\n",
6825 dev->
name,
"1 buf mode SKBs");
6835 pci_map_single(sp->
pdev, (*skb)->data,
6839 goto memalloc_failed;
6843 struct RxD3 *rxdp3 = (
struct RxD3 *)rxdp;
6850 *skb = netdev_alloc_skb(dev, size);
6853 "%s: Out of memory to allocate %s\n",
6861 pci_map_single(sp->
pdev, (*skb)->data,
6865 goto memalloc_failed;
6867 pci_map_single(sp->
pdev, ba->
ba_0, BUF0_LEN,
6869 if (pci_dma_mapping_error(sp->
pdev,
6871 pci_unmap_single(sp->
pdev,
6875 goto memalloc_failed;
6883 if (pci_dma_mapping_error(sp->
pdev,
6885 pci_unmap_single(sp->
pdev,
6888 pci_unmap_single(sp->
pdev,
6892 goto memalloc_failed;
6901 dev_kfree_skb(*skb);
6905 static void set_rxd_buffer_size(
struct s2io_nic *sp,
struct RxD_t *rxdp,
6918 static int rxd_owner_bit_reset(
struct s2io_nic *sp)
6920 int i,
j,
k, blk_cnt = 0,
size;
6927 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6943 for (j = 0; j <
blk_cnt; j++) {
6944 for (k = 0; k < rxd_count[sp->
rxd_mode]; k++) {
6947 ba = &ring->
ba[
j][
k];
6948 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6956 set_rxd_buffer_size(sp, rxdp, size);
6967 static int s2io_add_isr(
struct s2io_nic *sp)
6974 ret = s2io_enable_msi_x(sp);
6984 store_xmsi_data(sp);
6988 int i, msix_rx_cnt = 0;
6997 s2io_msix_ring_handle,
7006 s2io_msix_fifo_handle,
7016 "%s @Addr:0x%llx Data:0x%llx\n",
7018 (
unsigned long long)
7020 (
unsigned long long)
7025 remove_msix_isr(sp);
7028 "%s:MSI-X-%d registration "
7029 "failed\n", dev->
name, i);
7032 "%s: Defaulting to INTA\n",
7042 pr_info(
"MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
7044 "MSI-X-TX entries enabled through alarm vector\n");
7059 static void s2io_rem_isr(
struct s2io_nic *sp)
7062 remove_msix_isr(sp);
7064 remove_inta_isr(sp);
7067 static void do_s2io_card_down(
struct s2io_nic *sp,
int do_io)
7071 register u64 val64 = 0;
7075 if (!is_s2io_card_up(sp))
7088 for (; off < sp->
config.rx_ring_num; off++)
7092 napi_disable(&sp->
napi);
7113 rxd_owner_bit_reset(sp);
7116 if (verify_xena_quiescence(sp)) {
7125 "adapter status reads 0x%llx\n",
7126 (
unsigned long long)val64);
7134 free_tx_buffers(sp);
7137 free_rx_buffers(sp);
7142 static void s2io_card_down(
struct s2io_nic *sp)
7144 do_s2io_card_down(sp, 1);
7147 static int s2io_card_up(
struct s2io_nic *sp)
7177 ret = fill_rx_buffers(sp, ring, 1);
7182 free_rx_buffers(sp);
7192 for (i = 0; i < sp->
config.rx_ring_num; i++)
7195 napi_enable(&sp->
napi);
7208 s2io_set_multicast(dev);
7214 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7219 if (start_nic(sp)) {
7222 free_rx_buffers(sp);
7227 if (s2io_add_isr(sp) != 0) {
7231 free_rx_buffers(sp);
7243 en_dis_able_nic_intrs(sp, interruptible,
ENABLE_INTRS);
7247 en_dis_able_nic_intrs(sp, interruptible,
ENABLE_INTRS);
7263 static void s2io_restart_nic(
struct work_struct *work)
7270 if (!netif_running(dev))
7274 if (s2io_card_up(sp)) {
7277 s2io_wake_all_tx_queue(sp);
7296 static void s2io_tx_watchdog(
struct net_device *dev)
7298 struct s2io_nic *sp = netdev_priv(dev);
7301 if (netif_carrier_ok(dev)) {
7325 static int rx_osm_handler(
struct ring_info *ring_data,
struct RxD_t * rxdp)
7331 int ring_no = ring_data->
ring_no;
7332 u16 l3_csum, l4_csum;
7345 err_mask = err >> 48;
7390 if (err_mask != 0x5) {
7392 dev->
name, err_mask);
7393 dev->
stats.rx_crc_errors++;
7413 unsigned char *buff =
skb_push(skb, buf0_len);
7415 struct buffAdd *ba = &ring_data->
ba[get_block][get_off];
7421 ((!ring_data->
lro) ||
7433 if (ring_data->
lro) {
7438 ret = s2io_club_tcp_session(ring_data,
7447 lro_append_pkt(sp,
lro, skb, tcp_len);
7450 lro_append_pkt(sp,
lro, skb, tcp_len);
7453 clear_lro_session(
lro);
7461 clear_lro_session(
lro);
7472 "%s: Samadhana!!\n",
7482 skb_checksum_none_assert(skb);
7485 skb_checksum_none_assert(skb);
7489 skb_record_rx_queue(skb, ring_no);
7509 static void s2io_link(
struct s2io_nic *sp,
int link)
7518 s2io_stop_all_tx_queue(sp);
7531 s2io_wake_all_tx_queue(sp);
7549 static void s2io_init_pci(
struct s2io_nic *sp)
7551 u16 pci_cmd = 0, pcix_cmd = 0;
7568 static int s2io_verify_parm(
struct pci_dev *pdev,
u8 *dev_intr_type,
7573 if ((tx_fifo_num >
MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7575 "(%d) not supported\n", tx_fifo_num);
7577 if (tx_fifo_num < 1)
7586 *dev_multiq = multiq;
7588 if (tx_steering_type && (1 == tx_fifo_num)) {
7591 "Tx steering is not supported with "
7592 "one fifo. Disabling Tx steering.\n");
7599 "Requested transmit steering not supported\n");
7604 if (rx_ring_num > MAX_RX_RINGS) {
7606 "Requested number of rx rings not supported\n");
7612 if ((*dev_intr_type !=
INTA) && (*dev_intr_type !=
MSI_X)) {
7614 "Defaulting to INTA\n");
7615 *dev_intr_type =
INTA;
7618 if ((*dev_intr_type ==
MSI_X) &&
7622 "Defaulting to INTA\n");
7623 *dev_intr_type =
INTA;
7626 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7635 "supported\nDefaulting to %d\n",
7652 static int rts_ds_steer(
struct s2io_nic *nic,
u8 ds_codepoint,
u8 ring)
7655 register u64 val64 = 0;
7657 if (ds_codepoint > 63)
7675 .ndo_open = s2io_open,
7676 .ndo_stop = s2io_close,
7677 .ndo_get_stats = s2io_get_stats,
7678 .ndo_start_xmit = s2io_xmit,
7680 .ndo_set_rx_mode = s2io_set_multicast,
7681 .ndo_do_ioctl = s2io_ioctl,
7682 .ndo_set_mac_address = s2io_set_mac_addr,
7683 .ndo_change_mtu = s2io_change_mtu,
7684 .ndo_set_features = s2io_set_features,
7685 .ndo_tx_timeout = s2io_tx_watchdog,
7686 #ifdef CONFIG_NET_POLL_CONTROLLER
7687 .ndo_poll_controller = s2io_netpoll,
7711 int dma_flag =
false;
7712 u32 mac_up, mac_down;
7713 u64 val64 = 0, tmp64 = 0;
7719 u8 dev_intr_type = intr_type;
7722 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7729 "%s: pci_enable_device failed\n", __func__);
7736 if (pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64))) {
7738 "Unable to obtain 64bit DMA "
7739 "for consistent allocations\n");
7743 }
else if (!pci_set_dma_mask(pdev,
DMA_BIT_MASK(32))) {
7757 dev = alloc_etherdev_mq(
sizeof(
struct s2io_nic), tx_fifo_num);
7759 dev = alloc_etherdev(
sizeof(
struct s2io_nic));
7767 pci_set_drvdata(pdev, dev);
7771 sp = netdev_priv(dev);
7776 if (rx_ring_mode == 1)
7778 if (rx_ring_mode == 2)
7781 sp->
config.intr_type = dev_intr_type;
7829 config->
multiq = dev_multiq;
7874 for (i = 0; i < rx_ring_num; i++) {
7888 if (init_shared_mem(sp)) {
7891 goto mem_alloc_failed;
7899 goto bar0_remap_failed;
7907 goto bar1_remap_failed;
7937 if (s2io_set_swapper(sp)) {
7941 goto set_swap_failed;
7946 mode = s2io_verify_pci_mode(sp);
7951 goto set_swap_failed;
7957 ret = s2io_enable_msi_x(sp);
7960 ret = s2io_test_msi(sp);
7962 remove_msix_isr(sp);
7967 "MSI-X requested but failed to enable\n");
7988 fix_mac_address(sp);
8004 mac_down = (
u32)tmp64;
8005 mac_up = (
u32) (tmp64 >> 32);
8031 do_s2io_store_unicast_mc(sp);
8039 store_xmsi_data(sp);
8050 for (i = 0; i < sp->
config.tx_fifo_num; i++) {
8060 subid = sp->
pdev->subsystem_device;
8061 if ((subid & 0xFF) >= 0x07) {
8063 val64 |= 0x0000800000000000ULL;
8065 val64 = 0x0411040400000000ULL;
8075 goto register_failed;
8082 s2io_driver_version);
8086 mode = s2io_print_pci_mode(sp);
8090 goto set_swap_failed;
8104 switch (sp->
config.napi) {
8119 switch (sp->
config.intr_type) {
8128 for (i = 0; i < sp->
config.tx_fifo_num; i++) {
8139 switch (sp->
config.tx_steering_type) {
8146 "%s: Priority steering enabled for transmit\n",
8151 "%s: Default steering enabled for transmit\n",
8159 "%s: UDP Fragmentation Offload(UFO) enabled\n",
8185 free_shared_mem(sp);
8188 pci_set_drvdata(pdev,
NULL);
8205 struct net_device *dev = pci_get_drvdata(pdev);
8213 sp = netdev_priv(dev);
8220 free_shared_mem(sp);
8224 pci_set_drvdata(pdev,
NULL);
8235 static int __init s2io_starter(
void)
8237 return pci_register_driver(&s2io_driver);
8245 static __exit void s2io_closer(
void)
8254 static int check_L2_lro_capable(
u8 *
buffer,
struct iphdr **ip,
8263 "%s: Non-TCP frames not supported for LRO\n",
8269 if ((l2_type == 0) || (l2_type == 4)) {
8283 *ip = (
struct iphdr *)(buffer + ip_off);
8284 ip_len = (
u8)((*ip)->ihl);
8286 *tcp = (
struct tcphdr *)((
unsigned long)*ip + ip_len);
8291 static int check_for_socket_match(
struct lro *
lro,
struct iphdr *ip,
8295 if ((lro->
iph->saddr != ip->
saddr) ||
8303 static inline int get_l4_pyld_length(
struct iphdr *ip,
struct tcphdr *tcp)
8305 return ntohs(ip->
tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8308 static void initiate_new_session(
struct lro *lro,
u8 *l2h,
8310 u32 tcp_pyld_len,
u16 vlan_tag)
8326 if (tcp->doff == 8) {
8336 static void update_L3L4_header(
struct s2io_nic *sp,
struct lro *lro)
8368 static void aggregate_new_rx(
struct lro *lro,
struct iphdr *ip,
8390 static int verify_l3_l4_lro_capable(
struct lro *l_lro,
struct iphdr *ip,
8397 if (!tcp_pyld_len) {
8406 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8410 if (tcp->urg || tcp->psh || tcp->rst ||
8411 tcp->syn || tcp->fin ||
8412 tcp->ece || tcp->cwr || !tcp->ack) {
8425 if (tcp->doff != 5 && tcp->doff != 8)
8428 if (tcp->doff == 8) {
8429 ptr = (
u8 *)(tcp + 1);
8441 if (*((
__be32 *)(ptr+6)) == 0)
8448 static int s2io_club_tcp_session(
struct ring_info *ring_data,
u8 *buffer,
8449 u8 **tcp,
u32 *tcp_len,
struct lro **lro,
8458 ret = check_L2_lro_capable(buffer, &ip, (
struct tcphdr **)tcp,
8466 tcph = (
struct tcphdr *)*tcp;
8467 *tcp_len = get_l4_pyld_length(ip, tcph);
8469 struct lro *l_lro = &ring_data->
lro0_n[
i];
8471 if (check_for_socket_match(l_lro, ip, tcph))
8476 if ((*lro)->tcp_next_seq !=
ntohl(tcph->
seq)) {
8478 "expected 0x%x, actual 0x%x\n",
8480 (*lro)->tcp_next_seq,
8488 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8503 if (verify_l3_l4_lro_capable(
NULL, ip, tcph, *tcp_len))
8507 struct lro *l_lro = &ring_data->
lro0_n[
i];
8525 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8529 update_L3L4_header(sp, *lro);
8532 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8534 update_L3L4_header(sp, *lro);
8546 static void clear_lro_session(
struct lro *lro)
8548 static u16 lro_struct_size =
sizeof(
struct lro);
8550 memset(lro, 0, lro_struct_size);
8553 static void queue_rx_frame(
struct sk_buff *skb,
u16 vlan_tag)
8556 struct s2io_nic *sp = netdev_priv(dev);
8560 __vlan_hwaccel_put_tag(skb, vlan_tag);
8567 static void lro_append_pkt(
struct s2io_nic *sp,
struct lro *lro,
8573 first->
len += tcp_len;
8576 if (skb_shinfo(first)->frag_list)
8579 skb_shinfo(first)->frag_list =
skb;
8596 struct net_device *netdev = pci_get_drvdata(pdev);
8597 struct s2io_nic *sp = netdev_priv(netdev);
8604 if (netif_running(netdev)) {
8606 do_s2io_card_down(sp, 0);
8624 struct net_device *netdev = pci_get_drvdata(pdev);
8625 struct s2io_nic *sp = netdev_priv(netdev);
8628 pr_err(
"Cannot re-enable PCI device after reset.\n");
8645 static void s2io_io_resume(
struct pci_dev *pdev)
8647 struct net_device *netdev = pci_get_drvdata(pdev);
8648 struct s2io_nic *sp = netdev_priv(netdev);
8650 if (netif_running(netdev)) {
8651 if (s2io_card_up(sp)) {
8652 pr_err(
"Can't bring device back up after reset.\n");
8658 pr_err(
"Can't restore mac addr after reset.\n");
8664 netif_tx_wake_all_queues(netdev);