Linux Kernel
3.7.1
|
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <linux/dma-mapping.h>
#include <linux/prefetch.h>
#include "t4vf_common.h"
#include "t4vf_defs.h"
#include "../cxgb4/t4_regs.h"
#include "../cxgb4/t4fw_api.h"
#include "../cxgb4/t4_msg.h"
Go to the source code of this file.
Data Structures | |
struct | tx_sw_desc |
struct | rx_sw_desc |
Macros | |
#define | POISON_BUF_VAL -1 |
Enumerations | |
enum | { EQ_UNIT = SGE_EQ_IDXSIZE, FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), MAX_TX_RECLAIM = 16, MAX_RX_REFILL = 16, RX_QCHECK_PERIOD = (HZ / 2), TX_QCHECK_PERIOD = (HZ / 2), MAX_TIMER_TX_RECLAIM = 100, FL_STARVE_THRES = 4, ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1, ETHTXQ_MAX_SGL_LEN, ETHTXQ_MAX_HDR, ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR, ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT), MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK, MAX_CTRL_WR_LEN = 256, MAX_IMM_TX_LEN, RX_COPY_THRES = 256, RX_PULL_LEN = 128, RX_SKB_LEN = 512 } |
enum | { RX_LARGE_BUF = 1 << 0, RX_UNMAPPED_BUF = 1 << 1 } |
anonymous enum |
process_responses - process responses from an SGE response queue : the ingress response queue to process : how many responses can be processed in this round
Process responses from a Scatter Gather Engine response queue up to the supplied budget. Responses include received packets as well as control messages from firmware or hardware.
Additionally choose the interrupt holdoff time for the next interrupt on this queue. If the system is under memory shortage use a fairly long delay to help recovery.
int t4vf_eth_xmit | ( | struct sk_buff * | skb, |
struct net_device * | dev | ||
) |
irq_handler_t t4vf_intr_handler | ( | struct adapter * | adapter | ) |
irqreturn_t t4vf_intr_msi | ( | int | irq, |
void * | cookie | ||
) |
|
read |
int t4vf_sge_alloc_eth_txq | ( | struct adapter * | adapter, |
struct sge_eth_txq * | txq, | ||
struct net_device * | dev, | ||
struct netdev_queue * | devq, | ||
unsigned int | iqid | ||
) |
int t4vf_sge_alloc_rxq | ( | struct adapter * | adapter, |
struct sge_rspq * | rspq, | ||
bool | iqasynch, | ||
struct net_device * | dev, | ||
int | intr_dest, | ||
struct sge_fl * | fl, | ||
rspq_handler_t | hnd | ||
) |
t4vf_sge_alloc_rxq - allocate an SGE RX Queue : the adapter : pointer to to the new rxq's Response Queue to be filled in : if 0, a normal rspq; if 1, an asynchronous event queue : the network device associated with the new rspq : MSI-X vector index (overriden in MSI mode) : pointer to the new rxq's Free List to be filled in : the interrupt handler to invoke for the rspq
t4vf_sge_init - initialize SGE : the adapter
Performs SGE initialization needed every time after a chip reset. We do not initialize any of the queue sets here, instead the driver top-level must request those individually. We also do not enable DMA here, that should be done after the queues have been set up.
irqreturn_t t4vf_sge_intr_msix | ( | int | irq, |
void * | cookie | ||
) |