Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vxge-traffic.h
Go to the documentation of this file.
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice. This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-traffic.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11  * Virtualized Server Adapter.
12  * Copyright(c) 2002-2010 Exar Corp.
13  ******************************************************************************/
14 #ifndef VXGE_TRAFFIC_H
15 #define VXGE_TRAFFIC_H
16 
17 #include "vxge-reg.h"
18 #include "vxge-version.h"
19 
20 #define VXGE_HW_DTR_MAX_T_CODE 16
21 #define VXGE_HW_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
22 #define VXGE_HW_INTR_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
23 #define VXGE_HW_MAX_VIRTUAL_PATHS 17
24 
25 #define VXGE_HW_MAC_MAX_MAC_PORT_ID 2
26 
27 #define VXGE_HW_DEFAULT_32 0xffffffff
28 /* frames sizes */
29 #define VXGE_HW_HEADER_802_2_SIZE 3
30 #define VXGE_HW_HEADER_SNAP_SIZE 5
31 #define VXGE_HW_HEADER_VLAN_SIZE 4
32 #define VXGE_HW_MAC_HEADER_MAX_SIZE \
33  (ETH_HLEN + \
34  VXGE_HW_HEADER_802_2_SIZE + \
35  VXGE_HW_HEADER_VLAN_SIZE + \
36  VXGE_HW_HEADER_SNAP_SIZE)
37 
38 /* 32bit alignments */
39 #define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 2
40 #define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2
41 #define VXGE_HW_HEADER_802_2_ALIGN 3
42 #define VXGE_HW_HEADER_SNAP_ALIGN 1
43 
44 #define VXGE_HW_L3_CKSUM_OK 0xFFFF
45 #define VXGE_HW_L4_CKSUM_OK 0xFFFF
46 
47 /* Forward declarations */
48 struct __vxge_hw_device;
50 struct vxge_hw_vp_config;
52 struct __vxge_hw_channel;
53 struct __vxge_hw_fifo;
54 struct __vxge_hw_ring;
55 struct vxge_hw_ring_attr;
56 struct vxge_hw_mempool;
57 
58 #ifndef TRUE
59 #define TRUE 1
60 #endif
61 
62 #ifndef FALSE
63 #define FALSE 0
64 #endif
65 
66 /*VXGE_HW_STATUS_H*/
67 
68 #define VXGE_HW_EVENT_BASE 0
69 #define VXGE_LL_EVENT_BASE 100
70 
94  /* HW events */
109 };
110 
111 #define VXGE_HW_SET_LEVEL(a, b) (((a) > (b)) ? (a) : (b))
112 
113 /*
114  * struct vxge_hw_mempool_dma - Represents DMA objects passed to the
115  caller.
116  */
119  struct pci_dev *handle;
121 };
122 
123 /*
124  * vxge_hw_mempool_item_f - Mempool item alloc/free callback
125  * @mempoolh: Memory pool handle.
126  * @memblock: Address of memory block
127  * @memblock_index: Index of memory block
128  * @item: Item that gets allocated or freed.
129  * @index: Item's index in the memory pool.
130  * @is_last: True, if this item is the last one in the pool; false - otherwise.
131  * userdata: Per-pool user context.
132  *
133  * Memory pool allocation/deallocation callback.
134  */
135 
136 /*
137  * struct vxge_hw_mempool - Memory pool.
138  */
140 
142  struct vxge_hw_mempool *mempoolh,
143  u32 memblock_index,
145  u32 index,
146  u32 is_last);
147 
148  void *userdata;
161  void **items_arr;
163 };
164 
165 #define VXGE_HW_MAX_INTR_PER_VP 4
166 #define VXGE_HW_VPATH_INTR_TX 0
167 #define VXGE_HW_VPATH_INTR_RX 1
168 #define VXGE_HW_VPATH_INTR_EINTA 2
169 #define VXGE_HW_VPATH_INTR_BMAP 3
170 
171 #define VXGE_HW_BLOCK_SIZE 4096
172 
234 
236 #define VXGE_HW_TIM_INTR_ENABLE 1
237 #define VXGE_HW_TIM_INTR_DISABLE 0
238 #define VXGE_HW_TIM_INTR_DEFAULT 0
239 
241 #define VXGE_HW_MIN_TIM_BTIMER_VAL 0
242 #define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864
243 #define VXGE_HW_USE_FLASH_DEFAULT (~0)
244 
246 #define VXGE_HW_TIM_TIMER_AC_ENABLE 1
247 #define VXGE_HW_TIM_TIMER_AC_DISABLE 0
248 
250 #define VXGE_HW_TIM_TIMER_CI_ENABLE 1
251 #define VXGE_HW_TIM_TIMER_CI_DISABLE 0
252 
254 #define VXGE_HW_TIM_TIMER_RI_ENABLE 1
255 #define VXGE_HW_TIM_TIMER_RI_DISABLE 0
256 
258 #define VXGE_HW_MIN_TIM_RTIMER_VAL 0
259 #define VXGE_HW_MAX_TIM_RTIMER_VAL 67108864
260 
262 #define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL 17
263 #define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL 18
264 #define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL 19
265 #define VXGE_HW_TIM_UTIL_SEL_PER_VPATH 63
266 
268 #define VXGE_HW_MIN_TIM_LTIMER_VAL 0
269 #define VXGE_HW_MAX_TIM_LTIMER_VAL 67108864
270 
271  /* Line utilization interrupts */
273 #define VXGE_HW_MIN_TIM_URANGE_A 0
274 #define VXGE_HW_MAX_TIM_URANGE_A 100
275 
277 #define VXGE_HW_MIN_TIM_UEC_A 0
278 #define VXGE_HW_MAX_TIM_UEC_A 65535
279 
281 #define VXGE_HW_MIN_TIM_URANGE_B 0
282 #define VXGE_HW_MAX_TIM_URANGE_B 100
283 
285 #define VXGE_HW_MIN_TIM_UEC_B 0
286 #define VXGE_HW_MAX_TIM_UEC_B 65535
287 
289 #define VXGE_HW_MIN_TIM_URANGE_C 0
290 #define VXGE_HW_MAX_TIM_URANGE_C 100
291 
293 #define VXGE_HW_MIN_TIM_UEC_C 0
294 #define VXGE_HW_MAX_TIM_UEC_C 65535
295 
297 #define VXGE_HW_MIN_TIM_UEC_D 0
298 #define VXGE_HW_MAX_TIM_UEC_D 65535
299 };
300 
301 #define VXGE_HW_STATS_OP_READ 0
302 #define VXGE_HW_STATS_OP_CLEAR_STAT 1
303 #define VXGE_HW_STATS_OP_CLEAR_ALL_VPATH_STATS 2
304 #define VXGE_HW_STATS_OP_CLEAR_ALL_STATS_OF_LOC 2
305 #define VXGE_HW_STATS_OP_CLEAR_ALL_STATS 3
306 
307 #define VXGE_HW_STATS_LOC_AGGR 17
308 #define VXGE_HW_STATS_AGGRn_OFFSET 0x00720
309 
310 #define VXGE_HW_STATS_VPATH_TX_OFFSET 0x0
311 #define VXGE_HW_STATS_VPATH_RX_OFFSET 0x00090
312 
313 #define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET (0x001d0 >> 3)
314 #define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(bits) \
315  vxge_bVALn(bits, 0, 32)
316 
317 #define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(bits) \
318  vxge_bVALn(bits, 32, 32)
319 
320 #define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET (0x001d8 >> 3)
321 #define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(bits) \
322  vxge_bVALn(bits, 0, 32)
323 
324 #define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(bits) \
325  vxge_bVALn(bits, 32, 32)
326 
385 /*0x000*/ u64 tx_frms;
386 /*0x008*/ u64 tx_data_octets;
387 /*0x010*/ u64 tx_mcast_frms;
388 /*0x018*/ u64 tx_bcast_frms;
391 /*0x030*/ u64 rx_frms;
392 /*0x038*/ u64 rx_data_octets;
393 /*0x040*/ u64 rx_mcast_frms;
394 /*0x048*/ u64 rx_bcast_frms;
398 } __packed;
399 
740 /*0x000*/ u64 tx_ttl_frms;
741 /*0x008*/ u64 tx_ttl_octets;
742 /*0x010*/ u64 tx_data_octets;
743 /*0x018*/ u64 tx_mcast_frms;
744 /*0x020*/ u64 tx_bcast_frms;
745 /*0x028*/ u64 tx_ucast_frms;
746 /*0x030*/ u64 tx_tagged_frms;
747 /*0x038*/ u64 tx_vld_ip;
749 /*0x048*/ u64 tx_icmp;
750 /*0x050*/ u64 tx_tcp;
751 /*0x058*/ u64 tx_rst_tcp;
752 /*0x060*/ u64 tx_udp;
753 /*0x068*/ u32 tx_parse_error;
757 /*0x07c*/ u32 tx_lacpdu_frms;
758 /*0x080*/ u32 tx_drop_ip;
764 /*0x098*/ u32 unused1;
766 /*0x09e*/ u16 tx_drop_frms;
767 /*0x0a0*/ u64 rx_ttl_frms;
768 /*0x0a8*/ u64 rx_vld_frms;
770 /*0x0b8*/ u64 rx_ttl_octets;
771 /*0x0c0*/ u64 rx_data_octets;
777 /*0x0f0*/ u64 rx_tagged_frms;
778 /*0x0f8*/ u64 rx_long_frms;
779 /*0x100*/ u64 rx_usized_frms;
780 /*0x108*/ u64 rx_osized_frms;
781 /*0x110*/ u64 rx_frag_frms;
782 /*0x118*/ u64 rx_jabber_frms;
783 /*0x120*/ u64 rx_ttl_64_frms;
793 /*0x170*/ u64 rx_ip;
794 /*0x178*/ u64 rx_accepted_ip;
795 /*0x180*/ u64 rx_ip_octets;
796 /*0x188*/ u64 rx_err_ip;
797 /*0x190*/ u64 rx_icmp;
798 /*0x198*/ u64 rx_tcp;
799 /*0x1a0*/ u64 rx_udp;
800 /*0x1a8*/ u64 rx_err_tcp;
801 /*0x1b0*/ u64 rx_pause_count;
807 /*0x1e0*/ u64 rx_drop_frms;
809 /*0x1f0*/ u64 rx_drop_ip;
810 /*0x1f8*/ u64 rx_drop_udp;
812 /*0x204*/ u32 rx_lacpdu_frms;
815 /*0x210*/ u32 rx_fcs_discard;
818 /*0x21c*/ u32 rx_len_discard;
819 /*0x220*/ u32 rx_rpa_discard;
821 /*0x228*/ u32 rx_rts_discard;
824 /*0x234*/ u32 rx_red_discard;
831 /*0x250*/ u32 rx_local_fault;
833 /*0x258*/ u32 rx_jettison;
835 } __packed;
836 
911 } __packed;
912 
1083 } __packed;
1084 
1104 };
1105 
1162 /*0x004*/ u32 unused1;
1164 /*0x00c*/ u32 unused2;
1166 /*0x014*/ u32 unused3;
1170 /*0x02c*/ u32 unused4;
1172 /*0x034*/ u32 unused5;
1178 /*0x04c*/ u32 unused6;
1180 /*0x054*/ u32 unused7;
1183 /*0x220*/ u64 unused9;
1189 /*0x23a*/ u8 unused10[6];
1191 /*0x244*/ u32 unused11;
1192 /*0x248*/ u16 rxd_returned;
1193 /*0x24a*/ u8 unused12[6];
1199 /*0x25e*/ u64 rx_wol_frms;
1201 } __packed;
1202 
1203 
1467 /*0x0008*/ struct {
1469  /*0x0004*/ u32 unused1;
1471 /*0x0090*/ struct {
1473  /*0x0004*/ u32 unused2;
1475 /*0x0118*/ struct {
1477  /*0x0004*/ u32 unused3;
1486 /*0x01bc*/ u32 unused4;
1488 /*0x01c4*/ u32 unused5;
1494 /*0x01dc*/ u32 unused6;
1495 /*0x01e0*/ struct {
1496  /*0x0000*/ u16 unused7;
1497  /*0x0002*/ u16 pci_depl_cplh;
1498  /*0x0004*/ u16 pci_depl_nph;
1499  /*0x0006*/ u16 pci_depl_ph;
1500  } pci_depl_h_vplane[17];
1501 /*0x0268*/ struct {
1502  /*0x0000*/ u16 unused8;
1503  /*0x0002*/ u16 pci_depl_cpld;
1504  /*0x0004*/ u16 pci_depl_npd;
1505  /*0x0006*/ u16 pci_depl_pd;
1506  } pci_depl_d_vplane[17];
1511 /*0x0af0*/ u64 unused7;
1512 /*0x0af8*/ u64 unused8;
1513 /*0x0b00*/ u64 unused9;
1514 /*0x0b08*/ u64 unused10;
1515 /*0x0b10*/ u32 unused11;
1517 /*0x0b18*/ u32 unused12;
1518 /*0x0b1c*/ u8 unused13;
1522 /*0x0b20*/ u32 unused14;
1523 /*0x0b24*/ u8 unused15;
1527 } __packed;
1528 
1541 };
1542 
1562 };
1563 
1584 };
1585 
1603 
1604 };
1605 
1642 };
1643 
1659 };
1660 
1682 };
1683 
1692 };
1693 
1716  /* handles */
1718 
1719  /* HW device hardware statistics */
1721 
1722  /* HW device "soft" stats */
1725 
1726 };
1727 
1729  struct __vxge_hw_device *devh);
1730 
1732  struct __vxge_hw_device *devh,
1733  struct vxge_hw_device_stats_hw_info *hw_stats);
1734 
1736  struct __vxge_hw_device *devh,
1737  struct vxge_hw_device_stats_sw_info *sw_stats);
1738 
1740 
1742 
1743 enum vxge_hw_status
1745  struct __vxge_hw_device *devh,
1746  u32 operation,
1747  u32 location,
1748  u32 offset,
1749  u64 *stat);
1750 
1751 enum vxge_hw_status
1753  struct vxge_hw_xmac_stats *xmac_stats);
1754 
1776 };
1777 
1778 enum vxge_hw_status
1781  u32 index,
1782  u32 offset,
1783  u64 *value);
1784 
1785 enum vxge_hw_status
1788  u32 index,
1789  u32 offset,
1790  u64 value);
1791 
1809 };
1810 
1860 };
1905 };
1906 
1908  struct __vxge_hw_ring *ring_handle,
1909  void **rxdh);
1910 
1911 void
1913  struct __vxge_hw_ring *ring_handle,
1914  void *rxdh);
1915 
1916 void
1918  struct __vxge_hw_ring *ring_handle,
1919  void *rxdh);
1920 
1921 enum vxge_hw_status
1922 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
1923 
1924 void
1926  struct __vxge_hw_ring *ring_handle,
1927  void *rxdh);
1928 
1930  struct __vxge_hw_ring *ring_handle,
1931  void *rxdh);
1932 
1934  struct __vxge_hw_ring *ring_handle,
1935  void **rxdh,
1936  u8 *t_code);
1937 
1939  struct __vxge_hw_ring *ring_handle,
1940  void *rxdh,
1941  u8 t_code);
1942 
1944  struct __vxge_hw_ring *ring_handle,
1945  void *rxdh);
1946 
1967  VXGE_HW_FRAME_PROTO_UDP)
1968 };
1969 
1984 };
1985 
2013 };
2014 
2016  struct __vxge_hw_fifo *fifoh,
2017  void **txdlh,
2018  void **txdl_priv);
2019 
2021  struct __vxge_hw_fifo *fifo_handle,
2022  void *txdlh,
2023  u32 frag_idx,
2024  dma_addr_t dma_pointer,
2025  u32 size);
2026 
2028  struct __vxge_hw_fifo *fifo_handle,
2029  void *txdlh);
2030 
2032  struct __vxge_hw_fifo *fifo_handle);
2033 
2035  struct __vxge_hw_fifo *fifoh,
2036  void **txdlh,
2037  enum vxge_hw_fifo_tcode *t_code);
2038 
2040  struct __vxge_hw_fifo *fifoh,
2041  void *txdlh,
2042  enum vxge_hw_fifo_tcode t_code);
2043 
2045  struct __vxge_hw_fifo *fifoh,
2046  void *txdlh);
2047 
2048 /*
2049  * Device
2050  */
2051 
2052 #define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
2053 #define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
2054 
2055 /*
2056  * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
2057  * @dma_addr: DMA (mapped) address of _this_ descriptor.
2058  * @dma_handle: DMA handle used to map the descriptor onto device.
2059  * @dma_offset: Descriptor's offset in the memory block. HW allocates
2060  * descriptors in memory blocks of %VXGE_HW_BLOCK_SIZE
2061  * bytes. Each memblock is contiguous DMA-able memory. Each
2062  * memblock contains 1 or more 4KB RxD blocks visible to the
2063  * Titan hardware.
2064  * @dma_object: DMA address and handle of the memory block that contains
2065  * the descriptor. This member is used only in the "checked"
2066  * version of the HW (to enforce certain assertions);
2067  * otherwise it gets compiled out.
2068  * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
2069  *
2070  * Per-receive decsriptor HW-private data. HW uses the space to keep DMA
2071  * information associated with the descriptor. Note that driver can ask HW
2072  * to allocate additional per-descriptor space for its own (driver-specific)
2073  * purposes.
2074  */
2079 #ifdef VXGE_DEBUG_ASSERT
2081 #endif
2082 };
2083 
2086  struct vxge_hw_mempool *mempoolh,
2087  u32 memblock_index,
2089  u32 index,
2090  u32 is_last);
2091 };
2092 
2093 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
2094  ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
2095 
2096 enum vxge_hw_status
2098  struct __vxge_hw_vpath_handle *vpath_handle,
2099  u32 action,
2100  u32 rts_table,
2101  u32 offset,
2102  u64 *data1,
2103  u64 *data2);
2104 
2105 enum vxge_hw_status
2107  struct __vxge_hw_vpath_handle *vpath_handle,
2108  u32 action,
2109  u32 rts_table,
2110  u32 offset,
2111  u64 data1,
2112  u64 data2);
2113 
2114 enum vxge_hw_status
2116  struct __vxge_hw_device *devh,
2117  u32 vp_id);
2118 
2120  struct __vxge_hw_device *devh);
2121 
2122 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *devh, u32 intr_mode);
2123 
2125  struct __vxge_hw_device *devh);
2126 
2128  struct __vxge_hw_device *devh);
2129 
2131  struct __vxge_hw_device *devh);
2132 
2134  struct __vxge_hw_device *devh,
2135  u32 skip_alarms,
2136  u64 *reason);
2137 
2139  struct __vxge_hw_device *devh);
2140 
2141 /*
2142  * Virtual Paths
2143  */
2144 
2146 
2148 
2150  struct __vxge_hw_vpath_handle *vpath_handle);
2151 
2156 };
2157 
2158 enum vxge_hw_status
2160  struct __vxge_hw_vpath_handle *vpath_handle,
2161  u8 *macaddr,
2162  u8 *macaddr_mask,
2163  enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
2164 
2165 enum vxge_hw_status
2167  struct __vxge_hw_vpath_handle *vpath_handle,
2168  u8 *macaddr,
2169  u8 *macaddr_mask);
2170 
2171 enum vxge_hw_status
2173  struct __vxge_hw_vpath_handle *vpath_handle,
2174  u8 *macaddr,
2175  u8 *macaddr_mask);
2176 
2177 enum vxge_hw_status
2179  struct __vxge_hw_vpath_handle *vpath_handle,
2180  u8 *macaddr,
2181  u8 *macaddr_mask);
2182 
2183 enum vxge_hw_status
2185  struct __vxge_hw_vpath_handle *vpath_handle,
2186  u64 vid);
2187 
2188 enum vxge_hw_status
2190  struct __vxge_hw_vpath_handle *vpath_handle,
2191  u64 *vid);
2192 
2193 enum vxge_hw_status
2195  struct __vxge_hw_vpath_handle *vpath_handle,
2196  u64 vid);
2197 
2198 enum vxge_hw_status
2200  struct __vxge_hw_vpath_handle *vpath_handle,
2201  u64 etype);
2202 
2203 enum vxge_hw_status
2205  struct __vxge_hw_vpath_handle *vpath_handle,
2206  u64 *etype);
2207 
2208 enum vxge_hw_status
2210  struct __vxge_hw_vpath_handle *vpath_handle,
2211  u64 *etype);
2212 
2213 enum vxge_hw_status
2215  struct __vxge_hw_vpath_handle *vpath_handle,
2216  u64 etype);
2217 
2219  struct __vxge_hw_vpath_handle *vpath_handle);
2220 
2222  struct __vxge_hw_vpath_handle *vpath_handle);
2223 
2225  struct __vxge_hw_vpath_handle *vpath_handle);
2226 
2228  struct __vxge_hw_vpath_handle *vpath_handle);
2229 
2231  struct __vxge_hw_vpath_handle *vpath_handle);
2232 
2234  struct __vxge_hw_ring *ringh);
2235 
2237  struct __vxge_hw_fifo *fifoh,
2238  struct sk_buff ***skb_ptr, int nr_skb, int *more);
2239 
2241  struct __vxge_hw_vpath_handle *vpath_handle,
2242  u32 skip_alarms);
2243 
2244 void
2245 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
2246  int *tim_msix_id, int alarm_msix_id);
2247 
2248 void
2249 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
2250  int msix_id);
2251 
2252 void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
2253 
2254 void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
2255 
2256 void
2258  int msix_id);
2259 
2261  struct __vxge_hw_vpath_handle *vpath_handle);
2262 
2264  struct __vxge_hw_vpath_handle *vpath_handle);
2265 
2267  struct __vxge_hw_vpath_handle *vpath_handle);
2268 
2270  struct __vxge_hw_vpath_handle *vpath_handle);
2271 
2272 void
2273 vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
2274 
2275 void
2276 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
2277 
2278 void
2279 vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
2280 
2281 void
2283  void **dtrh);
2284 
2285 void
2287 
2288 void
2290 
2291 int
2293 
2295 
2297 
2298 #endif