16 #include <linux/pci.h>
18 #include <linux/slab.h>
24 #define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
25 status = __vxge_hw_vpath_stats_access(vpath, \
26 VXGE_HW_STATS_OP_READ, \
29 if (status != VXGE_HW_OK) \
38 val64 =
readq(&vp_reg->rxmac_vcfg0);
40 writeq(val64, &vp_reg->rxmac_vcfg0);
41 val64 =
readq(&vp_reg->rxmac_vcfg0);
51 u64 val64, rxd_count, rxd_spat;
52 int count = 0, total_count = 0;
57 vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
80 if ((rxd_count <= rxd_spat) || (val64 > 0))
101 int i, total_count = 0;
140 }
while (++i <= max_millis);
149 __vxge_hw_pio_mem_write32_lower((
u32)
vxge_bVALn(val64, 32, 32), addr);
151 __vxge_hw_pio_mem_write32_upper((
u32)
vxge_bVALn(val64, 0, 32), addr);
154 return __vxge_hw_device_register_poll(addr, mask, max_millis);
167 spin_lock(&vpath->
lock);
169 spin_unlock(&vpath->
lock);
183 status = __vxge_hw_pio_mem_write64(val64,
196 while ((status !=
VXGE_HW_OK) && retry++ < max_retry) {
199 status = __vxge_hw_device_register_poll(
218 spin_unlock(&vpath->
lock);
226 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
232 status = vxge_hw_vpath_fw_api(vpath,
236 &data0, &data1, &steer_ctrl);
249 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
256 status = vxge_hw_vpath_fw_api(vpath,
260 &data0, &data1, &steer_ctrl);
280 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
283 int ret_code, sec_code;
288 status = vxge_hw_vpath_fw_api(vpath,
292 &data0, &data1, &steer_ctrl);
304 data0 = *((
u64 *)fwdata);
305 data1 = *((
u64 *)fwdata + 1);
307 status = vxge_hw_vpath_fw_api(vpath,
311 &data0, &data1, &steer_ctrl);
325 fwdata += (data0 >> 8) & 0xFFFFFFFF;
335 "corrupted data from .ncf file\n");
352 "generic error. Unknown error type\n");
377 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
386 data1 = steer_ctrl = 0;
388 status = vxge_hw_vpath_fw_api(vpath,
391 0, &data0, &data1, &steer_ctrl);
429 vpath = channel->
vph->vpath;
432 for (i = 0; i < channel->
length; i++)
436 switch (channel->
type) {
440 channel)->stats->common_stats;
445 channel)->stats->common_stats;
463 for (i = 0; i < channel->
length; i++) {
474 channel->post_index = 0;
475 channel->compl_index = 0;
502 __vxge_hw_device_vpath_reset_in_prog_check(
u64 __iomem *vpath_rst_in_prog)
505 status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
521 val64 =
readq(&legacy_reg->toc_swapper_fb);
531 &legacy_reg->pifm_rd_swap_en);
533 &legacy_reg->pifm_rd_flip_en);
535 &legacy_reg->pifm_wr_swap_en);
537 &legacy_reg->pifm_wr_flip_en);
542 &legacy_reg->pifm_rd_swap_en);
544 &legacy_reg->pifm_wr_swap_en);
549 &legacy_reg->pifm_rd_flip_en);
551 &legacy_reg->pifm_wr_flip_en);
557 val64 =
readq(&legacy_reg->toc_swapper_fb);
571 __vxge_hw_device_toc_get(
void __iomem *bar0)
580 status = __vxge_hw_legacy_swapper_set(legacy_reg);
605 hldev->
toc_reg = __vxge_hw_device_toc_get(hldev->
bar0);
642 status = __vxge_hw_device_vpath_reset_in_prog_check(
682 return access_rights;
692 if (__vxge_hw_device_access_rights_get(host_type,
709 val64 =
readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
736 __vxge_hw_vpath_func_id_get(hldev->
vpmgmt_reg[i]);
791 status = __vxge_hw_verify_pci_e_info(hldev);
812 u64 data0, data1 = 0, steer_ctrl = 0;
815 status = vxge_hw_vpath_fw_api(vpath,
818 0, &data0, &data1, &steer_ctrl);
852 flash_version->
major =
854 flash_version->
minor =
856 flash_version->
build =
861 flash_version->
build);
876 u64 data0, data1 = 0, steer_ctrl = 0;
884 status = vxge_hw_vpath_fw_api(vpath,
887 0, &data0, &data1, &steer_ctrl);
895 data1 = steer_ctrl = 0;
897 status = vxge_hw_vpath_fw_api(vpath,
900 0, &data0, &data1, &steer_ctrl);
910 data1 = steer_ctrl = 0;
912 status = vxge_hw_vpath_fw_api(vpath,
915 0, &data0, &data1, &steer_ctrl);
934 u64 data0, data1 = 0, steer_ctrl = 0;
939 status = vxge_hw_vpath_fw_api(vpath,
942 0, &data0, &data1, &steer_ctrl);
959 data0 = 0, data1 = 0, steer_ctrl = 0;
964 status = vxge_hw_vpath_fw_api(vpath, action,
966 0, &data0, &data1, &steer_ctrl);
975 macaddr[i - 1] = (
u8) (data0 & 0xFF);
978 macaddr_mask[i - 1] = (
u8) (data1 & 0xFF);
983 data0 = 0, data1 = 0, steer_ctrl = 0;
985 }
while (!is_valid_ether_addr(macaddr));
1011 toc = __vxge_hw_device_toc_get(bar0);
1018 common_reg = bar0 + val64;
1020 status = __vxge_hw_device_vpath_reset_in_prog_check(
1038 vpmgmt_reg = bar0 + val64;
1040 hw_info->
func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
1041 if (__vxge_hw_device_access_rights_get(hw_info->
host_type,
1047 mrpcim_reg = bar0 + val64;
1056 vpath.
vp_reg = bar0 + val64;
1059 status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
1063 status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
1067 status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
1079 vpath.
vp_reg = bar0 + val64;
1082 status = __vxge_hw_vpath_addr_get(&vpath,
1101 if (blockpool ==
NULL) {
1106 hldev = blockpool->
hldev;
1109 pci_unmap_single(hldev->
pdev,
1114 vxge_os_dma_free(hldev->
pdev,
1149 if (blockpool ==
NULL) {
1151 goto blockpool_create_exit;
1154 blockpool->
hldev = hldev;
1163 for (i = 0; i < pool_size + pool_max; i++) {
1166 if (entry ==
NULL) {
1167 __vxge_hw_blockpool_destroy(blockpool);
1169 goto blockpool_create_exit;
1174 for (i = 0; i < pool_size; i++) {
1175 memblock = vxge_os_dma_malloc(
1180 if (memblock ==
NULL) {
1181 __vxge_hw_blockpool_destroy(blockpool);
1183 goto blockpool_create_exit;
1186 dma_addr = pci_map_single(hldev->
pdev, memblock,
1190 vxge_os_dma_free(hldev->
pdev, memblock, &acc_handle);
1191 __vxge_hw_blockpool_destroy(blockpool);
1193 goto blockpool_create_exit;
1206 if (entry !=
NULL) {
1213 list_add(&entry->
item,
1217 __vxge_hw_blockpool_destroy(blockpool);
1219 goto blockpool_create_exit;
1223 blockpool_create_exit:
1254 status = __vxge_hw_device_fifo_config_check(&vp_config->
fifo);
1295 status = __vxge_hw_device_vpath_config_check(
1324 status = __vxge_hw_device_config_check(device_config);
1329 if (hldev ==
NULL) {
1347 __vxge_hw_device_pci_e_init(hldev);
1349 status = __vxge_hw_device_reg_addr_get(hldev);
1355 __vxge_hw_device_host_info_get(hldev);
1364 if (device_config->
vp_config[i].ring.enable ==
1366 nblocks += device_config->
vp_config[
i].ring.ring_blocks;
1368 if (device_config->
vp_config[i].fifo.enable ==
1370 nblocks += device_config->
vp_config[
i].fifo.fifo_blocks;
1374 if (__vxge_hw_blockpool_create(hldev,
1384 status = __vxge_hw_device_initialize(hldev);
1405 __vxge_hw_blockpool_destroy(&hldev->
block_pool);
1423 goto vpath_stats_access_exit;
1432 status = __vxge_hw_pio_mem_write64(val64,
1435 vpath->
hldev->config.device_poll_millis);
1441 vpath_stats_access_exit:
1457 val64 = (
u64 *)vpath_tx_stats;
1465 status = __vxge_hw_vpath_stats_access(vpath,
1488 val64 = (
u64 *) vpath_rx_stats;
1495 status = __vxge_hw_vpath_stats_access(vpath,
1497 offset >> 3, val64);
1583 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->
tx_stats);
1587 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->
rx_stats);
1666 status = __vxge_hw_vpath_stats_get(
1715 status = __vxge_hw_pio_mem_write64(val64,
1718 hldev->
config.device_poll_millis);
1741 val64 = (
u64 *)aggr_stats;
1752 ((offset + (104 * port)) >> 3), val64);
1775 val64 = (
u64 *) port_stats;
1786 ((offset + (608 * port)) >> 3), val64);
1809 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1814 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1821 status = vxge_hw_device_xmac_port_stats_get(hldev,
1832 status = __vxge_hw_vpath_xmac_tx_stats_get(
1838 status = __vxge_hw_vpath_xmac_rx_stats_get(
1858 #if defined(VXGE_DEBUG_TRACE_MASK) || \
1859 defined(VXGE_DEBUG_ERR_MASK)
1864 #if defined(VXGE_DEBUG_ERR_MASK)
1868 #if defined(VXGE_DEBUG_TRACE_MASK)
1879 #if defined(VXGE_DEBUG_ERR_MASK)
1895 #if defined(VXGE_DEBUG_TRACE_MASK)
1994 __vxge_hw_ring_block_memblock_idx(
u8 *
block)
2004 __vxge_hw_ring_block_memblock_idx_set(
u8 *block,
u32 memblock_idx)
2015 __vxge_hw_ring_block_next_pointer_set(
u8 *block,
dma_addr_t dma_next)
2029 dma_object = ring->
mempool->memblocks_dma_arr;
2032 return dma_object->
addr;
2048 memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
2057 dma_item_offset = (
u8 *)item - (
u8 *)memblock;
2059 return memblock_dma_object->
addr + dma_item_offset;
2066 static void __vxge_hw_ring_rxdblock_link(
struct vxge_hw_mempool *mempoolh,
2070 u8 *to_item , *from_item;
2082 to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
2086 __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
2108 void *rxdblock_priv;
2112 u32 reserve_index = ring->
channel.reserve_ptr -
2114 u32 memblock_item_idx;
2116 ring->
channel.reserve_arr[reserve_index] = ((
u8 *)item) +
2122 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
2123 memblock_index, item,
2124 &memblock_item_idx);
2126 rxdp = ring->
channel.reserve_arr[reserve_index];
2134 __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
2138 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
2143 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
2197 hldev = vph->
vpath->hldev;
2198 vp_id = vph->
vpath->vp_id;
2212 if (channel ==
NULL)
2214 INIT_LIST_HEAD(&channel->
item);
2219 channel->
devh = hldev;
2247 __vxge_hw_channel_free(channel);
2271 if (block_addr ==
NULL) {
2277 dma_addr = pci_map_single(devh->
pdev, block_addr, length,
2280 if (
unlikely(pci_dma_mapping_error(devh->
pdev, dma_addr))) {
2281 vxge_os_dma_free(devh->
pdev, block_addr, &acc_handle);
2298 if (entry !=
NULL) {
2318 vxge_os_dma_malloc_async(
struct pci_dev *pdev,
void *devh,
unsigned long size)
2328 vaddr =
kmalloc((size), flags);
2330 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
2347 for (i = 0; i < nreq; i++)
2348 vxge_os_dma_malloc_async(
2349 (blockpool->
hldev)->pdev,
2363 void *memblock =
NULL;
2370 memblock = vxge_os_dma_malloc(devh->
pdev, size,
2374 if (memblock ==
NULL) {
2379 dma_object->
addr = pci_map_single(devh->
pdev, memblock, size,
2383 dma_object->
addr))) {
2384 vxge_os_dma_free(devh->
pdev, memblock,
2398 if (entry !=
NULL) {
2405 list_add(&entry->
item,
2410 if (memblock !=
NULL)
2411 __vxge_hw_blockpool_blocks_add(blockpool);
2431 (blockpool->
hldev)->pdev,
2437 (blockpool->
hldev)->pdev,
2455 void *memblock,
u32 size,
2465 pci_unmap_single(devh->
pdev, dma_object->
addr, size,
2482 if (entry !=
NULL) {
2488 list_add(&entry->
item,
2496 __vxge_hw_blockpool_blocks_remove(blockpool);
2503 static void __vxge_hw_mempool_destroy(
struct vxge_hw_mempool *mempool)
2558 for (i = start_block_idx; i < end_block_idx; i++) {
2560 u32 is_last = ((end_block_idx - 1) == i);
2579 __vxge_hw_blockpool_malloc(mempool->
devh,
2595 for (j = 0; j < n_items; j++) {
2596 u32 index = i * n_items +
j;
2602 ((
char *)the_memblock + j*mempool->
item_size);
2607 dma_object, index, is_last);
2638 u32 memblocks_to_allocate;
2642 if (memblock_size < item_size) {
2648 if (mempool ==
NULL) {
2673 __vxge_hw_mempool_destroy(mempool);
2683 __vxge_hw_mempool_destroy(mempool);
2694 __vxge_hw_mempool_destroy(mempool);
2703 __vxge_hw_mempool_destroy(mempool);
2715 status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
2718 __vxge_hw_mempool_destroy(mempool);
2768 __vxge_hw_ring_abort(ring);
2770 status = __vxge_hw_channel_reset(channel);
2793 __vxge_hw_ring_abort(ring);
2796 __vxge_hw_mempool_destroy(ring->
mempool);
2799 __vxge_hw_channel_free(&ring->
channel);
2822 if ((vp ==
NULL) || (attr ==
NULL)) {
2827 hldev = vp->
vpath->hldev;
2828 vp_id = vp->
vpath->vp_id;
2830 config = &hldev->
config.vp_config[vp_id].ring;
2833 vxge_hw_ring_rxds_per_block_get(config->
buffer_mode);
2845 vp->
vpath->ringh = ring;
2849 ring->stats = &vp->
vpath->sw_stats->ring_stats;
2871 vxge_hw_ring_rxds_per_block_get(config->
buffer_mode);
2875 ring->
mempool = __vxge_hw_mempool_create(hldev,
2879 ring->
config->ring_blocks,
2880 ring->
config->ring_blocks,
2884 __vxge_hw_ring_delete(vp);
2888 status = __vxge_hw_channel_initialize(&ring->
channel);
2890 __vxge_hw_ring_delete(vp);
2904 __vxge_hw_ring_delete(vp);
2911 ring->stats->common_stats.usage_cnt = 0;
2942 device_config->
vp_config[
i].ring.ring_blocks =
2945 device_config->
vp_config[
i].ring.buffer_mode =
2948 device_config->
vp_config[
i].ring.scatter_mode =
2951 device_config->
vp_config[
i].ring.rxds_limit =
2956 device_config->
vp_config[
i].fifo.fifo_blocks =
2962 device_config->
vp_config[
i].fifo.memblock_size =
2965 device_config->
vp_config[
i].fifo.alignment_size =
2971 device_config->
vp_config[
i].fifo.no_snoop_bits =
2973 device_config->
vp_config[
i].tti.intr_enable =
2979 device_config->
vp_config[
i].tti.timer_ac_en =
2982 device_config->
vp_config[
i].tti.timer_ci_en =
2985 device_config->
vp_config[
i].tti.timer_ri_en =
3018 device_config->
vp_config[
i].rti.intr_enable =
3024 device_config->
vp_config[
i].rti.timer_ac_en =
3027 device_config->
vp_config[
i].rti.timer_ci_en =
3030 device_config->
vp_config[
i].rti.timer_ri_en =
3066 device_config->
vp_config[
i].rpa_strip_vlan_tag =
3080 #ifndef __BIG_ENDIAN
3083 val64 =
readq(&vpath_reg->vpath_general_cfg1);
3086 writeq(val64, &vpath_reg->vpath_general_cfg1);
3102 val64 =
readq(&legacy_reg->pifm_wr_swap_en);
3105 val64 =
readq(&vpath_reg->kdfcctl_cfg0);
3112 writeq(val64, &vpath_reg->kdfcctl_cfg0);
3174 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3186 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3199 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
3204 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
3301 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3314 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3327 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
3383 __vxge_hw_fifo_abort(fifo);
3384 status = __vxge_hw_channel_reset(&fifo->
channel);
3398 __vxge_hw_fifo_abort(fifo);
3401 __vxge_hw_mempool_destroy(fifo->
mempool);
3405 __vxge_hw_channel_free(&fifo->
channel);
3417 __vxge_hw_fifo_mempool_item_alloc(
3422 u32 memblock_item_idx;
3433 __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
3434 &memblock_item_idx);
3443 txdl_priv->
dma_offset = (
char *)txdp - (
char *)memblock;
3463 u32 txdl_size, txdl_per_memblock;
3467 if ((vp ==
NULL) || (attr ==
NULL)) {
3472 config = &vpath->
hldev->config.vp_config[vpath->
vp_id].fifo;
3488 vpath->
fifoh = fifo;
3493 fifo->stats = &vpath->
sw_stats->fifo_stats;
3542 __vxge_hw_fifo_delete(vp);
3547 fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
3550 __vxge_hw_mempool_create(vpath->
hldev,
3551 fifo->
config->memblock_size,
3560 __vxge_hw_fifo_delete(vp);
3565 status = __vxge_hw_channel_initialize(&fifo->
channel);
3567 __vxge_hw_fifo_delete(vp);
3600 status = __vxge_hw_device_register_poll(
3629 u64 data0, data1 = 0, steer_ctrl = 0;
3632 if (hldev ==
NULL) {
3640 status = vxge_hw_vpath_fw_api(vpath,
3643 0, &data0, &data1, &steer_ctrl);
3675 status = vxge_hw_vpath_fw_api(vp->
vpath, action, rts_table, offset,
3676 data0, data1, &steer_ctrl);
3693 u32 rts_table,
u32 offset,
u64 steer_data0,
3696 u64 data0, data1 = 0, steer_ctrl = 0;
3704 data0 = steer_data0;
3709 data1 = steer_data1;
3711 status = vxge_hw_vpath_fw_api(vp->
vpath, action, rts_table, offset,
3712 &data0, &data1, &steer_ctrl);
3781 vxge_hw_rts_rth_data0_data1_get(
u32 j,
u64 *data0,
u64 *data1,
3833 max_entries = (((
u32)1) << itable_size);
3835 if (vp->
vpath->hldev->config.rth_it_type
3850 action, rts_table, j, data0, data1);
3866 vpath_handles[mtable[itable[j]]], action,
3867 rts_table, j, data0, data1);
3876 for (i = 0; i < vpath_count; i++) {
3883 while (j < max_entries) {
3884 if (mtable[itable[j]] != i) {
3888 vxge_hw_rts_rth_data0_data1_get(j,
3889 &data0, &data1, 1, itable);
3894 while (j < max_entries) {
3895 if (mtable[itable[j]] != i) {
3899 vxge_hw_rts_rth_data0_data1_get(j,
3900 &data0, &data1, 2, itable);
3905 while (j < max_entries) {
3906 if (mtable[itable[j]] != i) {
3910 vxge_hw_rts_rth_data0_data1_get(j,
3911 &data0, &data1, 3, itable);
3916 while (j < max_entries) {
3917 if (mtable[itable[j]] != i) {
3921 vxge_hw_rts_rth_data0_data1_get(j,
3922 &data0, &data1, 4, itable);
3956 u64 rxd_new_count, rxd_spat;
3961 rxd_new_count =
readl(&ring->
vp_reg->prc_rxd_doorbell);
3965 if (rxd_new_count >= rxd_spat)
3976 __vxge_hw_vpath_mgmt_read(
3980 u32 i, mtu = 0, max_pyld = 0;
3987 rxmac_cfg0_port_vpmgmt_clone[i]);
4025 status = __vxge_hw_device_register_poll(
4026 &vpath->
hldev->common_reg->vpath_rst_in_prog,
4028 1 << (16 - vpath->
vp_id)),
4029 vpath->
hldev->config.device_poll_millis);
4046 __vxge_hw_pio_mem_write32_upper((
u32)
vxge_bVALn(val64, 0, 32),
4065 status = __vxge_hw_ring_reset(vpath->
ringh);
4071 status = __vxge_hw_fifo_reset(vpath->
fifoh);
4106 if (vpath->
vp_config->ring.scatter_mode !=
4111 switch (vpath->
vp_config->ring.scatter_mode) {
4130 __vxge_hw_ring_first_block_address_get(
4164 status = __vxge_hw_kdfc_swapper_set(hldev->
legacy_reg, vp_reg);
4180 ((vpath->
vp_config->fifo.memblock_size /
4203 #ifndef __BIG_ENDIAN
4211 vpath_stride =
readq(&hldev->
toc_reg->toc_kdfc_vpath_stride);
4215 (hldev->
kdfc + (vp_id *
4257 if (vp_config->
mtu !=
4260 if ((vp_config->
mtu +
4261 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->
max_mtu)
4264 VXGE_HW_MAC_HEADER_MAX_SIZE);
4277 if (hldev->
config.rth_it_type ==
4329 config->
tti.btimer_val);
4335 if (config->
tti.timer_ac_en)
4342 if (config->
tti.timer_ci_en)
4351 config->
tti.urange_a);
4357 config->
tti.urange_b);
4363 config->
tti.urange_c);
4399 if (config->
tti.timer_ri_en)
4409 config->
tti.rtimer_val);
4421 config->
tti.ltimer_val);
4436 config->
rti.btimer_val);
4442 if (config->
rti.timer_ac_en)
4449 if (config->
rti.timer_ci_en)
4458 config->
rti.urange_a);
4464 config->
rti.urange_b);
4470 config->
rti.urange_c);
4506 if (config->
rti.timer_ri_en)
4516 config->
rti.rtimer_val);
4528 config->
rti.ltimer_val);
4573 status = __vxge_hw_vpath_swapper_set(vpath->
vp_reg);
4577 status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
4581 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4585 status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4592 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4635 spin_lock(&vpath->
lock);
4637 spin_unlock(&vpath->
lock);
4684 __vxge_hw_vpath_reset(hldev, vp_id);
4686 status = __vxge_hw_vpath_reset_check(vpath);
4692 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4705 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4707 __vxge_hw_vp_terminate(hldev, vp_id);
4769 status = __vxge_hw_vpath_stats_get(vpath, vpath->
hw_stats);
4794 if (entry !=
NULL) {
4801 __vxge_hw_blockpool_blocks_add(blockpool);
4825 goto vpath_open_exit1;
4828 status = __vxge_hw_vp_initialize(hldev, attr->
vp_id,
4831 goto vpath_open_exit1;
4836 goto vpath_open_exit2;
4842 status = __vxge_hw_fifo_create(vp, &attr->
fifo_attr);
4844 goto vpath_open_exit6;
4848 status = __vxge_hw_ring_create(vp, &attr->
ring_attr);
4850 goto vpath_open_exit7;
4852 __vxge_hw_vpath_prc_configure(hldev, attr->
vp_id);
4855 vpath->
fifoh->tx_intr_num =
4859 vpath->
stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4863 goto vpath_open_exit8;
4870 hldev->
stats.hw_dev_info_stats.vpath_info[attr->
vp_id] =
4874 &hldev->
stats.hw_dev_info_stats.vpath_info_sav[attr->
vp_id];
4880 status = vxge_hw_vpath_stats_enable(vp);
4882 goto vpath_open_exit8;
4897 __vxge_hw_ring_delete(vp);
4900 __vxge_hw_fifo_delete(vp);
4904 __vxge_hw_vp_terminate(hldev, attr->
vp_id);
4922 struct vxgedev *vdev = netdev_priv(vpath->
hldev->ndev);
4923 u64 new_count, val64, val164;
4927 new_count &= 0x1fff;
4934 &vpath->
vp_reg->prc_rxd_doorbell);
4945 new_count -= (val64 + 1);
4946 val64 =
min(val164, new_count) / 4;
4973 __vxge_hw_blockpool_blocks_remove(blockpool);
4990 devh = vpath->
hldev;
4994 goto vpath_close_exit;
5006 goto vpath_close_exit;
5012 __vxge_hw_ring_delete(vp);
5015 __vxge_hw_fifo_delete(vp);
5018 __vxge_hw_blockpool_block_free(devh, vpath->
stats_block);
5022 __vxge_hw_vp_terminate(devh, vp_id);
5038 vp_id = vpath->
vp_id;
5045 status = __vxge_hw_vpath_reset(vpath->
hldev, vp_id);
5065 vp_id = vp->
vpath->vp_id;
5067 hldev = vpath->
hldev;
5074 status = __vxge_hw_vpath_reset_check(vpath);
5078 status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
5082 status = __vxge_hw_vpath_initialize(hldev, vp_id);
5087 __vxge_hw_vpath_prc_configure(hldev, vp_id);
5096 &vpath->
vp_reg->stats_cfg);
5098 status = vxge_hw_vpath_stats_enable(vp);
5115 hldev = vp->
vpath->hldev;
5118 1 << (16 - vp->
vpath->vp_id));
5120 __vxge_hw_pio_mem_write32_upper((
u32)
vxge_bVALn(val64, 0, 32),