Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vxge-config.h
Go to the documentation of this file.
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice. This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11  * Virtualized Server Adapter.
12  * Copyright(c) 2002-2010 Exar Corp.
13  ******************************************************************************/
14 #ifndef VXGE_CONFIG_H
15 #define VXGE_CONFIG_H
16 #include <linux/hardirq.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 #include <asm/io.h>
20 
21 #ifndef VXGE_CACHE_LINE_SIZE
22 #define VXGE_CACHE_LINE_SIZE 128
23 #endif
24 
25 #ifndef VXGE_ALIGN
26 #define VXGE_ALIGN(adrs, size) \
27  (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
28 #endif
29 
30 #define VXGE_HW_MIN_MTU 68
31 #define VXGE_HW_MAX_MTU 9600
32 #define VXGE_HW_DEFAULT_MTU 1500
33 
34 #define VXGE_HW_MAX_ROM_IMAGES 8
35 
36 struct eprom_image {
41 };
42 
43 #ifdef VXGE_DEBUG_ASSERT
44 
54 #define vxge_assert(test) BUG_ON(!(test))
55 #else
56 #define vxge_assert(test)
57 #endif /* end of VXGE_DEBUG_ASSERT */
58 
72  VXGE_NONE = 0,
75 };
76 
77 #define NULL_VPID 0xFFFFFFFF
78 #ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
79 #define VXGE_DEBUG_MODULE_MASK 0xffffffff
80 #define VXGE_DEBUG_TRACE_MASK 0xffffffff
81 #define VXGE_DEBUG_ERR_MASK 0xffffffff
82 #define VXGE_DEBUG_MASK 0x000001ff
83 #else
84 #define VXGE_DEBUG_MODULE_MASK 0x20000000
85 #define VXGE_DEBUG_TRACE_MASK 0x20000000
86 #define VXGE_DEBUG_ERR_MASK 0x20000000
87 #define VXGE_DEBUG_MASK 0x00000001
88 #endif
89 
90 /*
91  * @VXGE_COMPONENT_LL: do debug for vxge link layer module
92  * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
93  *
94  * This enumeration going to be used to distinguish modules
95  * or libraries during compilation and runtime. Makefile must declare
96  * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
97  */
98 #define VXGE_COMPONENT_LL 0x20000000
99 #define VXGE_COMPONENT_ALL 0xffffffff
100 
101 #define VXGE_HW_BASE_INF 100
102 #define VXGE_HW_BASE_ERR 200
103 #define VXGE_HW_BASE_BADCFG 300
104 
110 
113 
136 
144 
146 };
147 
159 };
160 
174 };
175 
200 };
201 
212 #define VXGE_HW_FW_STRLEN 32
218 };
219 
225 };
226 
253 #define VXGE_HW_FIFO_ENABLE 1
254 #define VXGE_HW_FIFO_DISABLE 0
255 
257 #define VXGE_HW_MIN_FIFO_BLOCKS 2
258 #define VXGE_HW_MAX_FIFO_BLOCKS 128
259 
261 #define VXGE_HW_MIN_FIFO_FRAGS 1
262 #define VXGE_HW_MAX_FIFO_FRAGS 256
263 
265 #define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
266 #define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
267 #define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
268 
270 #define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
271 #define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
272 #define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
273 
275 #define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
276 #define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
277 #define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
278 
280 #define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
281 #define VXGE_HW_FIFO_NO_SNOOP_TXD 1
282 #define VXGE_HW_FIFO_NO_SNOOP_FRM 2
283 #define VXGE_HW_FIFO_NO_SNOOP_ALL 3
284 #define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
285 
286 };
314 #define VXGE_HW_RING_ENABLE 1
315 #define VXGE_HW_RING_DISABLE 0
316 #define VXGE_HW_RING_DEFAULT 1
317 
319 #define VXGE_HW_MIN_RING_BLOCKS 1
320 #define VXGE_HW_MAX_RING_BLOCKS 128
321 #define VXGE_HW_DEF_RING_BLOCKS 2
322 
324 #define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
325 #define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
326 #define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
327 #define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
328 
330 #define VXGE_HW_RING_SCATTER_MODE_A 0
331 #define VXGE_HW_RING_SCATTER_MODE_B 1
332 #define VXGE_HW_RING_SCATTER_MODE_C 2
333 #define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
334 
336 #define VXGE_HW_DEF_RING_RXDS_LIMIT 44
337 };
338 
362 
363 #define VXGE_HW_VPATH_PRIORITY_MIN 0
364 #define VXGE_HW_VPATH_PRIORITY_MAX 16
365 #define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
366 
368 #define VXGE_HW_VPATH_BANDWIDTH_MIN 0
369 #define VXGE_HW_VPATH_BANDWIDTH_MAX 100
370 #define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
371 
376 
378 #define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
379 #define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
380 #define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
381 
383 #define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
384 #define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
385 #define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
386 
387 };
418 #define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
419 #define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
420 #define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
421 
424 #define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
425 #define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
426 #define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
427 #define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
428 
429 #define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
430 
432 #define VXGE_HW_INTR_MODE_IRQLINE 0
433 #define VXGE_HW_INTR_MODE_MSIX 1
434 #define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
435 
436 #define VXGE_HW_INTR_MODE_DEF 0
437 
438  rth_en:1,
439 #define VXGE_HW_RTH_DISABLE 0
440 #define VXGE_HW_RTH_ENABLE 1
441 #define VXGE_HW_RTH_DEFAULT 0
442 
443  rth_it_type:1,
444 #define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
445 #define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
446 #define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
447 
448  rts_mac_en:1,
449 #define VXGE_HW_RTS_MAC_DISABLE 0
450 #define VXGE_HW_RTS_MAC_ENABLE 1
451 #define VXGE_HW_RTS_MAC_DEFAULT 0
452 
453  hwts_en:1;
454 #define VXGE_HW_HWTS_DISABLE 0
455 #define VXGE_HW_HWTS_ENABLE 1
456 #define VXGE_HW_HWTS_DEFAULT 1
457 
459 };
460 
513  void (*link_up)(struct __vxge_hw_device *devh);
514  void (*link_down)(struct __vxge_hw_device *devh);
515  void (*crit_err)(struct __vxge_hw_device *devh,
516  enum vxge_hw_event type, u64 ext_data);
517 };
518 
519 /*
520  * struct __vxge_hw_blockpool_entry - Block private data structure
521  * @item: List header used to link.
522  * @length: Length of the block
523  * @memblock: Virtual address block
524  * @dma_addr: DMA Address of the block.
525  * @dma_handle: DMA handle of the block.
526  * @acc_handle: DMA acc handle
527  *
528  * Block is allocated with a header to put the blocks into list.
529  *
530  */
532  struct list_head item;
534  void *memblock;
538 };
539 
540 /*
541  * struct __vxge_hw_blockpool - Block Pool
542  * @hldev: HW device
543  * @block_size: size of each block.
544  * @Pool_size: Number of blocks in the pool
545  * @pool_max: Maximum number of blocks above which to free additional blocks
546  * @req_out: Number of block requests with OS out standing
547  * @free_block_list: List of free blocks
548  *
549  * Block pool contains the DMA blocks preallocated.
550  *
551  */
560 };
561 
562 /*
563  * enum enum __vxge_hw_channel_type - Enumerated channel types.
564  * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
565  * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
566  * @VXGE_HW_CHANNEL_TYPE_RING: ring.
567  * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
568  * (and recognized) channel types. Currently: 2.
569  *
570  * Enumerated channel types. Currently there are only two link-layer
571  * channels - Titan fifo and Titan ring. In the future the list will grow.
572  */
578 };
579 
580 /*
581  * struct __vxge_hw_channel
582  * @item: List item; used to maintain a list of open channels.
583  * @type: Channel type. See enum vxge_hw_channel_type{}.
584  * @devh: Device handle. HW device object that contains _this_ channel.
585  * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
586  * @length: Channel length. Currently allocated number of descriptors.
587  * The channel length "grows" when more descriptors get allocated.
588  * See _hw_mempool_grow.
589  * @reserve_arr: Reserve array. Contains descriptors that can be reserved
590  * by driver for the subsequent send or receive operation.
591  * See vxge_hw_fifo_txdl_reserve(),
592  * vxge_hw_ring_rxd_reserve().
593  * @reserve_ptr: Current pointer in the resrve array
594  * @reserve_top: Reserve top gives the maximum number of dtrs available in
595  * reserve array.
596  * @work_arr: Work array. Contains descriptors posted to the channel.
597  * Note that at any point in time @work_arr contains 3 types of
598  * descriptors:
599  * 1) posted but not yet consumed by Titan device;
600  * 2) consumed but not yet completed;
601  * 3) completed but not yet freed
602  * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
603  * @post_index: Post index. At any point in time points on the
604  * position in the channel, which'll contain next to-be-posted
605  * descriptor.
606  * @compl_index: Completion index. At any point in time points on the
607  * position in the channel, which will contain next
608  * to-be-completed descriptor.
609  * @free_arr: Free array. Contains completed descriptors that were freed
610  * (i.e., handed over back to HW) by driver.
611  * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
612  * @free_ptr: current pointer in free array
613  * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
614  * to store per-operation control information.
615  * @stats: Pointer to common statistics
616  * @userdata: Per-channel opaque (void*) user-defined context, which may be
617  * driver object, ULP connection, etc.
618  * Once channel is open, @userdata is passed back to user via
619  * vxge_hw_channel_callback_f.
620  *
621  * HW channel object.
622  *
623  * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
624  */
626  struct list_head item;
632  void **reserve_arr;
635  void **work_arr;
638  void **free_arr;
640  void **orig_arr;
642  void *userdata;
646 
648 
649 /*
650  * struct __vxge_hw_virtualpath - Virtual Path
651  *
652  * @vp_id: Virtual path id
653  * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
654  * @hldev: Hal device
655  * @vp_config: Virtual Path Config
656  * @vp_reg: VPATH Register map address in BAR0
657  * @vpmgmt_reg: VPATH_MGMT register map address
658  * @max_mtu: Max mtu that can be supported
659  * @vsport_number: vsport attached to this vpath
660  * @max_kdfc_db: Maximum kernel mode doorbells
661  * @max_nofl_db: Maximum non offload doorbells
662  * @tx_intr_num: Interrupt Number associated with the TX
663 
664  * @ringh: Ring Queue
665  * @fifoh: FIFO Queue
666  * @vpath_handles: Virtual Path handles list
667  * @stats_block: Memory for DMAing stats
668  * @stats: Vpath statistics
669  *
670  * Virtual path structure to encapsulate the data related to a virtual path.
671  * Virtual paths are allocated by the HW upon getting configuration from the
672  * driver and inserted into the list of virtual paths.
673  */
676 
678 #define VXGE_HW_VP_NOT_OPEN 0
679 #define VXGE_HW_VP_OPEN 1
680 
686 
695 
704 };
705 
706 /*
707  * struct __vxge_hw_vpath_handle - List item to store callback information
708  * @item: List head to keep the item in linked list
709  * @vpath: Virtual path to which this item belongs
710  *
711  * This structure is used to store the callback information.
712  */
714  struct list_head item;
716 };
717 
718 /*
719  * struct __vxge_hw_device
720  *
721  * HW device object.
722  */
735 #define VXGE_HW_DEVICE_MAGIC 0x12345678
736 #define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
737  void __iomem *bar0;
738  struct pci_dev *pdev;
739  struct net_device *ndev;
742 
744 
748 #define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
749 #define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
750 #define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
770 
778 };
779 
780 #define VXGE_HW_INFO_LEN 64
781 
798 #define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
799 #define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
800 #define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
801 #define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
802 #define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
803 #define VXGE_HW_SR_VH_FUNCTION0 5
804 #define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
805 #define VXGE_HW_VH_NORMAL_FUNCTION 7
807 #define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
808 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
809 #define VXGE_HW_FUNCTION_MODE_SRIOV 2
810 #define VXGE_HW_FUNCTION_MODE_MRIOV 3
811 #define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
812 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
813 #define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
814 #define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
815 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
816 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
817 #define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
818 
830 };
831 
841  void __iomem *bar0;
842  struct pci_dev *pdev;
844 };
845 
846 #define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
847 
848 #define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
849  if (i < 16) { \
850  m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
851  m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
852  } \
853  else { \
854  m1[0] = 0x80000000; \
855  m1[1] = 0x40000000; \
856  } \
857 }
858 
859 #define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
860  if (i < 16) { \
861  m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
862  m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
863  } \
864  else { \
865  m1[0] = 0; \
866  m1[1] = 0; \
867  } \
868 }
869 
870 #define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
871  status = vxge_hw_mrpcim_stats_access(hldev, \
872  VXGE_HW_STATS_OP_READ, \
873  loc, \
874  offset, \
875  &val64); \
876  if (status != VXGE_HW_OK) \
877  return status; \
878 }
879 
880 /*
881  * struct __vxge_hw_ring - Ring channel.
882  * @channel: Channel "base" of this ring, the common part of all HW
883  * channels.
884  * @mempool: Memory pool, the pool from which descriptors get allocated.
885  * (See vxge_hw_mm.h).
886  * @config: Ring configuration, part of device configuration
887  * (see struct vxge_hw_device_config{}).
888  * @ring_length: Length of the ring
889  * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
890  * as per Titan User Guide.
891  * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
892  * 1-buffer mode descriptor is 32 byte long, etc.
893  * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
894  * per-descriptor data (e.g., DMA handle for Solaris)
895  * @per_rxd_space: Per rxd space requested by driver
896  * @rxds_per_block: Number of descriptors per hardware-defined RxD
897  * block. Depends on the (1-, 3-, 5-) buffer mode.
898  * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
899  * usage. Not to confuse with @rxd_priv_size.
900  * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
901  * @callback: Channel completion callback. HW invokes the callback when there
902  * are new completions on that channel. In many implementations
903  * the @callback executes in the hw interrupt context.
904  * @rxd_init: Channel's descriptor-initialize callback.
905  * See vxge_hw_ring_rxd_init_f{}.
906  * If not NULL, HW invokes the callback when opening
907  * the ring.
908  * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
909  * HW invokes the callback when closing the corresponding channel.
910  * See also vxge_hw_channel_rxd_term_f{}.
911  * @stats: Statistics for ring
912  * Ring channel.
913  *
914  * Note: The structure is cache line aligned to better utilize
915  * CPU cache performance.
916  */
937 
939  struct __vxge_hw_ring *ringh,
940  void *rxdh,
941  u8 t_code,
942  void *userdata);
943 
945  void *rxdh,
946  void *userdata);
947 
949  void *rxdh,
951  void *userdata);
952 
956 
974 };
975 /*
976  * struct __vxge_hw_fifo - Fifo.
977  * @channel: Channel "base" of this fifo, the common part of all HW
978  * channels.
979  * @mempool: Memory pool, from which descriptors get allocated.
980  * @config: Fifo configuration, part of device configuration
981  * (see struct vxge_hw_device_config{}).
982  * @interrupt_type: Interrupt type to be used
983  * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
984  * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
985  * on TxDL please refer to Titan UG.
986  * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
987  * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
988  * @priv_size: Per-Tx descriptor space reserved for driver
989  * usage.
990  * @per_txdl_space: Per txdl private space for the driver
991  * @callback: Fifo completion callback. HW invokes the callback when there
992  * are new completions on that fifo. In many implementations
993  * the @callback executes in the hw interrupt context.
994  * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
995  * HW invokes the callback when closing the corresponding fifo.
996  * See also vxge_hw_fifo_txdl_term_f{}.
997  * @stats: Statistics of this fifo
998  *
999  * Fifo channel.
1000  * Note: The structure is cache line aligned.
1001  */
1019 
1021  struct __vxge_hw_fifo *fifo_handle,
1022  void *txdlh,
1023  enum vxge_hw_fifo_tcode t_code,
1024  void *userdata,
1025  struct sk_buff ***skb_ptr,
1026  int nr_skb,
1027  int *more);
1028 
1030  void *txdlh,
1032  void *userdata);
1033 
1036 
1037 /*
1038  * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
1039  * @dma_addr: DMA (mapped) address of _this_ descriptor.
1040  * @dma_handle: DMA handle used to map the descriptor onto device.
1041  * @dma_offset: Descriptor's offset in the memory block. HW allocates
1042  * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
1043  * Each memblock is a contiguous block of DMA-able memory.
1044  * @frags: Total number of fragments (that is, contiguous data buffers)
1045  * carried by this TxDL.
1046  * @align_vaddr_start: Aligned virtual address start
1047  * @align_vaddr: Virtual address of the per-TxDL area in memory used for
1048  * alignement. Used to place one or more mis-aligned fragments
1049  * @align_dma_addr: DMA address translated from the @align_vaddr.
1050  * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
1051  * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
1052  * @align_dma_offset: The current offset into the @align_vaddr area.
1053  * Grows while filling the descriptor, gets reset.
1054  * @align_used_frags: Number of fragments used.
1055  * @alloc_frags: Total number of fragments allocated.
1056  * @unused: TODO
1057  * @next_txdl_priv: (TODO).
1058  * @first_txdp: (TODO).
1059  * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
1060  * TxDL list.
1061  * @txdlh: Corresponding txdlh to this TxDL.
1062  * @memblock: Pointer to the TxDL memory block or memory page.
1063  * on the next send operation.
1064  * @dma_object: DMA address and handle of the memory block that contains
1065  * the descriptor. This member is used only in the "checked"
1066  * version of the HW (to enforce certain assertions);
1067  * otherwise it gets compiled out.
1068  * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
1069  *
1070  * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
1071  * information associated with the descriptor. Note that driver can ask HW
1072  * to allocate additional per-descriptor space for its own (driver-specific)
1073  * purposes.
1074  *
1075  * See also: struct vxge_hw_ring_rxd_priv{}.
1076  */
1093  void *memblock;
1094 };
1095 
1096 /*
1097  * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
1098  * @control_0: Bits 0 to 7 - Doorbell type.
1099  * Bits 8 to 31 - Reserved.
1100  * Bits 32 to 39 - The highest TxD in this TxDL.
1101  * Bits 40 to 47 - Reserved.
1102  * Bits 48 to 55 - Reserved.
1103  * Bits 56 to 63 - No snoop flags.
1104  * @txdl_ptr: The starting location of the TxDL in host memory.
1105  *
1106  * Created by the host and written to the adapter via PIO to a Kernel Doorbell
1107  * FIFO. All non-offload doorbell wrapper fields must be written by the host as
1108  * part of a doorbell write. Consumed by the adapter but is not written by the
1109  * adapter.
1110  */
1113 #define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
1114 #define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
1115 #define VXGE_HW_NODBW_TYPE_NODBW 0
1116 
1117 #define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
1118 #define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
1119 
1120 #define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
1121 #define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
1122 #define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
1123 #define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
1124 
1126 };
1127 
1128 /*
1129  * TX Descriptor
1130  */
1131 
1261 #define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1262 
1263 #define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1264 #define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1265 #define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
1266 
1267 
1268 #define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
1269 #define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
1270 #define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
1271 
1272 
1273 #define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
1274 
1275 #define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
1276 
1277 #define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
1278 
1280 #define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
1281 #define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
1282 #define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
1283 #define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
1284 
1285 #define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
1286 
1287 #define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
1288 
1289 #define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
1290 #define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
1291 
1293 
1295 };
1296 
1388 #define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
1389 
1390 #define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1391 
1392 #define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
1393 
1394 #define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
1395 
1396 #define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
1397 
1398 #define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1399 #define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1400 
1401 #define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
1402 
1403 #define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
1404 
1405 #define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
1406 
1407 #define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
1408 
1409 #define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
1410 
1411 #define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
1412 
1413 #define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
1414 
1415 #define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
1416 
1417 #define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
1418 
1419 #define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
1420 
1421 #define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
1422 
1424 
1425 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
1426 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
1427 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
1428 
1429 #define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
1430 
1431 #define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
1432 
1434 };
1435 
1440 };
1441 
1462 };
1463 
1465  struct __vxge_hw_device *devh,
1466  enum vxge_debug_level level,
1467  u32 mask);
1468 
1469 u32
1471 
1472 u32
1474 
1481 static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
1482 {
1483  return sizeof(struct vxge_hw_ring_rxd_1);
1484 }
1485 
1492 static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
1493 {
1494  return (u32)((VXGE_HW_BLOCK_SIZE-16) /
1495  sizeof(struct vxge_hw_ring_rxd_1));
1496 }
1497 
1513 static inline
1514 void vxge_hw_ring_rxd_1b_set(
1515  void *rxdh,
1516  dma_addr_t dma_pointer,
1517  u32 size)
1518 {
1519  struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1520  rxdp->buffer0_ptr = dma_pointer;
1523 }
1524 
1539 static inline
1540 void vxge_hw_ring_rxd_1b_get(
1541  struct __vxge_hw_ring *ring_handle,
1542  void *rxdh,
1543  u32 *pkt_length)
1544 {
1545  struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1546 
1547  *pkt_length =
1549 }
1550 
1561 static inline
1562 void vxge_hw_ring_rxd_1b_info_get(
1563  struct __vxge_hw_ring *ring_handle,
1564  void *rxdh,
1566 {
1567 
1568  struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1569  rxd_info->syn_flag =
1571  rxd_info->is_icmp =
1573  rxd_info->fast_path_eligible =
1575  rxd_info->l3_cksum_valid =
1577  rxd_info->l3_cksum =
1579  rxd_info->l4_cksum_valid =
1581  rxd_info->l4_cksum =
1583  rxd_info->frame =
1585  rxd_info->proto =
1587  rxd_info->is_vlan =
1589  rxd_info->vlan =
1591  rxd_info->rth_bucket =
1593  rxd_info->rth_it_hit =
1595  rxd_info->rth_spdm_hit =
1597  rxd_info->rth_hash_type =
1599  rxd_info->rth_value =
1601 }
1602 
1612 static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
1613 {
1614  struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1615  return (void *)(size_t)rxdp->host_control;
1616 }
1617 
1634 static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
1635 {
1636  struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1637  txdp->control_1 |= cksum_bits;
1638 }
1639 
1654 static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
1655 {
1656  struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1657 
1659  txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
1660 }
1661 
1670 static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
1671 {
1672  struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1673 
1675  txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
1676 }
1677 
1689 static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
1690 {
1691  struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1692 
1693  return (void *)(size_t)txdp->host_control;
1694 }
1695 
1721  struct __vxge_hw_ring *ringh,
1722  void *rxdh,
1723  u8 t_code,
1724  void *userdata);
1725 
1727  void *rxdh,
1728  void *userdata);
1729 
1731  void *rxdh,
1732  enum vxge_hw_rxd_state state,
1733  void *userdata);
1734 
1735  void *userdata;
1737 };
1738 
1816 
1818  struct __vxge_hw_fifo *fifo_handle,
1819  void *txdlh,
1820  enum vxge_hw_fifo_tcode t_code,
1821  void *userdata,
1822  struct sk_buff ***skb_ptr,
1823  int nr_skb, int *more);
1824 
1826  void *txdlh,
1828  void *userdata);
1829 
1830  void *userdata;
1832 };
1833 
1847 };
1848 
1850  void __iomem *bar0,
1852 
1854  struct vxge_hw_device_config *device_config);
1855 
1863 static inline
1864 enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
1865  struct __vxge_hw_device *devh)
1866 {
1867  return devh->link_state;
1868 }
1869 
1870 void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
1871 
1872 const u8 *
1874 
1876 
1877 const u8 *
1879 
1881  struct __vxge_hw_device **devh,
1882  struct vxge_hw_device_attr *attr,
1883  struct vxge_hw_device_config *device_config);
1884 
1886  struct __vxge_hw_device *devh,
1887  u32 port,
1888  u32 *tx,
1889  u32 *rx);
1890 
1892  struct __vxge_hw_device *devh,
1893  u32 port,
1894  u32 tx,
1895  u32 rx);
1896 
1897 static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
1898  unsigned long size,
1899  struct pci_dev **p_dmah,
1900  struct pci_dev **p_dma_acch)
1901 {
1902  gfp_t flags;
1903  void *vaddr;
1904  unsigned long misaligned = 0;
1905  int realloc_flag = 0;
1906  *p_dma_acch = *p_dmah = NULL;
1907 
1908  if (in_interrupt())
1909  flags = GFP_ATOMIC | GFP_DMA;
1910  else
1911  flags = GFP_KERNEL | GFP_DMA;
1912 realloc:
1913  vaddr = kmalloc((size), flags);
1914  if (vaddr == NULL)
1915  return vaddr;
1916  misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
1918  if (realloc_flag)
1919  goto out;
1920 
1921  if (misaligned) {
1922  /* misaligned, free current one and try allocating
1923  * size + VXGE_CACHE_LINE_SIZE memory
1924  */
1925  kfree(vaddr);
1926  size += VXGE_CACHE_LINE_SIZE;
1927  realloc_flag = 1;
1928  goto realloc;
1929  }
1930 out:
1931  *(unsigned long *)p_dma_acch = misaligned;
1932  vaddr = (void *)((u8 *)vaddr + misaligned);
1933  return vaddr;
1934 }
1935 
1936 static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
1937  struct pci_dev **p_dma_acch)
1938 {
1939  unsigned long misaligned = *(unsigned long *)p_dma_acch;
1940  u8 *tmp = (u8 *)vaddr;
1941  tmp -= misaligned;
1942  kfree((void *)tmp);
1943 }
1944 
1945 /*
1946  * __vxge_hw_mempool_item_priv - will return pointer on per item private space
1947  */
1948 static inline void*
1949 __vxge_hw_mempool_item_priv(
1950  struct vxge_hw_mempool *mempool,
1951  u32 memblock_idx,
1952  void *item,
1953  u32 *memblock_item_idx)
1954 {
1955  ptrdiff_t offset;
1956  void *memblock = mempool->memblocks_arr[memblock_idx];
1957 
1958 
1959  offset = (u32)((u8 *)item - (u8 *)memblock);
1960  vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
1961 
1962  (*memblock_item_idx) = (u32) offset / mempool->item_size;
1963  vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
1964 
1965  return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
1966  (*memblock_item_idx) * mempool->items_priv_size;
1967 }
1968 
1969 /*
1970  * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
1971  * for the fifo.
1972  * @fifo: Fifo
1973  * @txdp: Poniter to a TxD
1974  */
1975 static inline struct __vxge_hw_fifo_txdl_priv *
1977  struct __vxge_hw_fifo *fifo,
1978  struct vxge_hw_fifo_txd *txdp)
1979 {
1980  return (struct __vxge_hw_fifo_txdl_priv *)
1981  (((char *)((ulong)txdp->host_control)) +
1982  fifo->per_txdl_space);
1983 }
1984 
1986  struct __vxge_hw_device *devh,
1987  struct vxge_hw_vpath_attr *attr,
1988  struct __vxge_hw_vpath_handle **vpath_handle);
1989 
1991  struct __vxge_hw_vpath_handle *vpath_handle);
1992 
1993 enum vxge_hw_status
1995  struct __vxge_hw_vpath_handle *vpath_handle);
1996 
1997 enum vxge_hw_status
1999  struct __vxge_hw_vpath_handle *vpath_handle);
2000 
2001 void
2003 
2004 enum vxge_hw_status
2006 
2008  struct __vxge_hw_vpath_handle *vpath_handle,
2009  u32 new_mtu);
2010 
2011 void
2013 
2014 #ifndef readq
2015 static inline u64 readq(void __iomem *addr)
2016 {
2017  u64 ret = 0;
2018  ret = readl(addr + 4);
2019  ret <<= 32;
2020  ret |= readl(addr);
2021 
2022  return ret;
2023 }
2024 #endif
2025 
2026 #ifndef writeq
2027 static inline void writeq(u64 val, void __iomem *addr)
2028 {
2029  writel((u32) (val), addr);
2030  writel((u32) (val >> 32), (addr + 4));
2031 }
2032 #endif
2033 
2034 static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
2035 {
2036  writel(val, addr + 4);
2037 }
2038 
2039 static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
2040 {
2041  writel(val, addr);
2042 }
2043 
2044 enum vxge_hw_status
2045 vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
2046 
2047 enum vxge_hw_status
2048 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2049 
2063 #if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
2064 #define vxge_debug_ll(level, mask, fmt, ...) do { \
2065  if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
2066  (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
2067  if ((mask & VXGE_DEBUG_MASK) == mask) \
2068  printk(fmt "\n", __VA_ARGS__); \
2069 } while (0)
2070 #else
2071 #define vxge_debug_ll(level, mask, fmt, ...)
2072 #endif
2073 
2075  struct __vxge_hw_vpath_handle **vpath_handles,
2076  u32 vpath_count,
2077  u8 *mtable,
2078  u8 *itable,
2079  u32 itable_size);
2080 
2082  struct __vxge_hw_vpath_handle *vpath_handle,
2084  struct vxge_hw_rth_hash_types *hash_type,
2085  u16 bucket_size);
2086 
2087 enum vxge_hw_status
2089 
2090 #define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
2091 #define VXGE_HW_MAX_POLLING_COUNT 100
2092 
2093 void
2095 
2096 enum vxge_hw_status
2097 vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
2098  u32 *minor, u32 *build);
2099 
2101 
2102 enum vxge_hw_status
2103 vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
2104  int size);
2105 
2106 enum vxge_hw_status
2108  struct eprom_image *eprom_image_data);
2109 
2110 int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
2111 #endif