|
void * | __kmalloc_reserve (size_t size, gfp_t flags, int node, unsigned long ip, bool *pfmemalloc) |
|
struct sk_buff * | __alloc_skb (unsigned int size, gfp_t gfp_mask, int flags, int node) |
|
| EXPORT_SYMBOL (__alloc_skb) |
|
struct sk_buff * | build_skb (void *data, unsigned int frag_size) |
|
| EXPORT_SYMBOL (build_skb) |
|
void * | netdev_alloc_frag (unsigned int fragsz) |
|
| EXPORT_SYMBOL (netdev_alloc_frag) |
|
struct sk_buff * | __netdev_alloc_skb (struct net_device *dev, unsigned int length, gfp_t gfp_mask) |
|
| EXPORT_SYMBOL (__netdev_alloc_skb) |
|
void | skb_add_rx_frag (struct sk_buff *skb, int i, struct page *page, int off, int size, unsigned int truesize) |
|
| EXPORT_SYMBOL (skb_add_rx_frag) |
|
void | __kfree_skb (struct sk_buff *skb) |
|
| EXPORT_SYMBOL (__kfree_skb) |
|
void | kfree_skb (struct sk_buff *skb) |
|
| EXPORT_SYMBOL (kfree_skb) |
|
void | consume_skb (struct sk_buff *skb) |
|
| EXPORT_SYMBOL (consume_skb) |
|
struct sk_buff * | skb_morph (struct sk_buff *dst, struct sk_buff *src) |
|
| EXPORT_SYMBOL_GPL (skb_morph) |
|
int | skb_copy_ubufs (struct sk_buff *skb, gfp_t gfp_mask) |
|
| EXPORT_SYMBOL_GPL (skb_copy_ubufs) |
|
struct sk_buff * | skb_clone (struct sk_buff *skb, gfp_t gfp_mask) |
|
| EXPORT_SYMBOL (skb_clone) |
|
struct sk_buff * | skb_copy (const struct sk_buff *skb, gfp_t gfp_mask) |
|
| EXPORT_SYMBOL (skb_copy) |
|
struct sk_buff * | __pskb_copy (struct sk_buff *skb, int headroom, gfp_t gfp_mask) |
|
| EXPORT_SYMBOL (__pskb_copy) |
|
int | pskb_expand_head (struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask) |
|
| EXPORT_SYMBOL (pskb_expand_head) |
|
struct sk_buff * | skb_realloc_headroom (struct sk_buff *skb, unsigned int headroom) |
|
| EXPORT_SYMBOL (skb_realloc_headroom) |
|
struct sk_buff * | skb_copy_expand (const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t gfp_mask) |
|
| EXPORT_SYMBOL (skb_copy_expand) |
|
int | skb_pad (struct sk_buff *skb, int pad) |
|
| EXPORT_SYMBOL (skb_pad) |
|
unsigned char * | skb_put (struct sk_buff *skb, unsigned int len) |
|
| EXPORT_SYMBOL (skb_put) |
|
unsigned char * | skb_push (struct sk_buff *skb, unsigned int len) |
|
| EXPORT_SYMBOL (skb_push) |
|
unsigned char * | skb_pull (struct sk_buff *skb, unsigned int len) |
|
| EXPORT_SYMBOL (skb_pull) |
|
void | skb_trim (struct sk_buff *skb, unsigned int len) |
|
| EXPORT_SYMBOL (skb_trim) |
|
int | ___pskb_trim (struct sk_buff *skb, unsigned int len) |
|
| EXPORT_SYMBOL (___pskb_trim) |
|
unsigned char * | __pskb_pull_tail (struct sk_buff *skb, int delta) |
|
| EXPORT_SYMBOL (__pskb_pull_tail) |
|
int | skb_copy_bits (const struct sk_buff *skb, int offset, void *to, int len) |
|
| EXPORT_SYMBOL (skb_copy_bits) |
|
int | skb_splice_bits (struct sk_buff *skb, unsigned int offset, struct pipe_inode_info *pipe, unsigned int tlen, unsigned int flags) |
|
int | skb_store_bits (struct sk_buff *skb, int offset, const void *from, int len) |
|
| EXPORT_SYMBOL (skb_store_bits) |
|
__wsum | skb_checksum (const struct sk_buff *skb, int offset, int len, __wsum csum) |
|
| EXPORT_SYMBOL (skb_checksum) |
|
__wsum | skb_copy_and_csum_bits (const struct sk_buff *skb, int offset, u8 *to, int len, __wsum csum) |
|
| EXPORT_SYMBOL (skb_copy_and_csum_bits) |
|
void | skb_copy_and_csum_dev (const struct sk_buff *skb, u8 *to) |
|
| EXPORT_SYMBOL (skb_copy_and_csum_dev) |
|
struct sk_buff * | skb_dequeue (struct sk_buff_head *list) |
|
| EXPORT_SYMBOL (skb_dequeue) |
|
struct sk_buff * | skb_dequeue_tail (struct sk_buff_head *list) |
|
| EXPORT_SYMBOL (skb_dequeue_tail) |
|
void | skb_queue_purge (struct sk_buff_head *list) |
|
| EXPORT_SYMBOL (skb_queue_purge) |
|
void | skb_queue_head (struct sk_buff_head *list, struct sk_buff *newsk) |
|
| EXPORT_SYMBOL (skb_queue_head) |
|
void | skb_queue_tail (struct sk_buff_head *list, struct sk_buff *newsk) |
|
| EXPORT_SYMBOL (skb_queue_tail) |
|
void | skb_unlink (struct sk_buff *skb, struct sk_buff_head *list) |
|
| EXPORT_SYMBOL (skb_unlink) |
|
void | skb_append (struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
|
| EXPORT_SYMBOL (skb_append) |
|
void | skb_insert (struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
|
| EXPORT_SYMBOL (skb_insert) |
|
void | skb_split (struct sk_buff *skb, struct sk_buff *skb1, const u32 len) |
|
| EXPORT_SYMBOL (skb_split) |
|
int | skb_shift (struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) |
|
void | skb_prepare_seq_read (struct sk_buff *skb, unsigned int from, unsigned int to, struct skb_seq_state *st) |
|
| EXPORT_SYMBOL (skb_prepare_seq_read) |
|
unsigned int | skb_seq_read (unsigned int consumed, const u8 **data, struct skb_seq_state *st) |
|
| EXPORT_SYMBOL (skb_seq_read) |
|
void | skb_abort_seq_read (struct skb_seq_state *st) |
|
| EXPORT_SYMBOL (skb_abort_seq_read) |
|
unsigned int | skb_find_text (struct sk_buff *skb, unsigned int from, unsigned int to, struct ts_config *config, struct ts_state *state) |
|
| EXPORT_SYMBOL (skb_find_text) |
|
int | skb_append_datato_frags (struct sock *sk, struct sk_buff *skb, int(*getfrag)(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length) |
|
| EXPORT_SYMBOL (skb_append_datato_frags) |
|
unsigned char * | skb_pull_rcsum (struct sk_buff *skb, unsigned int len) |
|
| EXPORT_SYMBOL_GPL (skb_pull_rcsum) |
|
struct sk_buff * | skb_segment (struct sk_buff *skb, netdev_features_t features) |
|
| EXPORT_SYMBOL_GPL (skb_segment) |
|
int | skb_gro_receive (struct sk_buff **head, struct sk_buff *skb) |
|
| EXPORT_SYMBOL_GPL (skb_gro_receive) |
|
void __init | skb_init (void) |
|
int | skb_to_sgvec (struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
|
| EXPORT_SYMBOL_GPL (skb_to_sgvec) |
|
int | skb_cow_data (struct sk_buff *skb, int tailbits, struct sk_buff **trailer) |
|
| EXPORT_SYMBOL_GPL (skb_cow_data) |
|
int | sock_queue_err_skb (struct sock *sk, struct sk_buff *skb) |
|
| EXPORT_SYMBOL (sock_queue_err_skb) |
|
void | skb_tstamp_tx (struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps) |
|
| EXPORT_SYMBOL_GPL (skb_tstamp_tx) |
|
void | skb_complete_wifi_ack (struct sk_buff *skb, bool acked) |
|
| EXPORT_SYMBOL_GPL (skb_complete_wifi_ack) |
|
bool | skb_partial_csum_set (struct sk_buff *skb, u16 start, u16 off) |
|
| EXPORT_SYMBOL_GPL (skb_partial_csum_set) |
|
void | __skb_warn_lro_forwarding (const struct sk_buff *skb) |
|
| EXPORT_SYMBOL (__skb_warn_lro_forwarding) |
|
void | kfree_skb_partial (struct sk_buff *skb, bool head_stolen) |
|
| EXPORT_SYMBOL (kfree_skb_partial) |
|
bool | skb_try_coalesce (struct sk_buff *to, struct sk_buff *from, bool *fragstolen, int *delta_truesize) |
|
| EXPORT_SYMBOL (skb_try_coalesce) |
|
__alloc_skb - allocate a network buffer : size to allocate : allocation mask : If SKB_ALLOC_FCLONE is set, allocate from fclone cache instead of head cache and allocate a cloned (child) skb. If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for allocations in case the data is required for writeback : numa node to allocate memory on
Allocate a new &sk_buff. The returned buffer has no headroom and a tail room of at least size bytes. The object has a reference count of one. The return is the buffer. On a failure the return is NULL.
Buffers may only be allocated from interrupts using a of GFP_ATOMIC.
Definition at line 208 of file skbuff.c.
__netdev_alloc_skb - allocate an skbuff for rx on a specific device : network device to receive on : length to allocate : get_free_pages mask, passed to alloc_skb
Allocate a new &sk_buff and assign it a usage count of one. The buffer has unspecified headroom built in. Users should allocate the headroom they think they need without accounting for the built in space. The built in space is used for optimisations.
NULL is returned if there is no free memory.
Definition at line 426 of file skbuff.c.
__pskb_pull_tail - advance tail of skb header : buffer to reallocate : number of bytes to advance tail
The function makes a sense only on a fragmented &sk_buff, it expands header moving its tail forward and copying necessary data from fragmented part.
&sk_buff MUST have reference count of 1.
Returns NULL (and &sk_buff does not change) if pull failed or value of new tail of skb in the case of success.
All the pointers pointing into skb header may change and must be reloaded after call to this function.
Definition at line 1406 of file skbuff.c.
build_skb - build a network buffer : data buffer provided by caller : size of fragment, or 0 if head was kmalloced
Allocate a new &sk_buff. Caller provides space holding head and skb_shared_info. must have been allocated by kmalloc() The return is the new skb buffer. On a failure the return is NULL, and is not freed. Notes : Before IO, driver allocates only data buffer where NIC put incoming frame Driver should add room at head (NET_SKB_PAD) and MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) After IO, driver calls build_skb(), to allocate sk_buff and populate it before giving packet to stack. RX rings only contains data buffers, not full skbs.
Definition at line 308 of file skbuff.c.
pskb_expand_head - reallocate header of &sk_buff : buffer to reallocate : room to add at head : room to add at tail : allocation priority
Expands (or creates identical copy, if &nhead and &ntail are zero) header of skb. &sk_buff itself is not changed. &sk_buff MUST have reference count of 1. Returns zero in the case of success or error, if expansion failed. In the last case, &sk_buff is not changed.
All the pointers pointing into skb header may change and must be reloaded after call to this function.
Definition at line 1004 of file skbuff.c.
skb_copy - create private copy of an sk_buff : buffer to copy : allocation priority
Make a copy of both an &sk_buff and its data. This is used when the caller wishes to modify the data and needs a private copy of the data to alter. Returns NULL on failure or the pointer to the buffer on success. The returned buffer has a reference count of 1.
As by-product this function converts non-linear &sk_buff to linear one, so that &sk_buff becomes completely private and caller is allowed to modify all the data of returned buffer. This means that this function is not recommended for use in circumstances when only header is going to be modified. Use pskb_copy() instead.
Definition at line 905 of file skbuff.c.
skb_copy_bits - copy bits from skb to kernel buffer : source skb : offset in source : destination buffer : number of bytes to copy
Copy the specified number of bytes from the source skb to the destination buffer.
CAUTION ! : If its prototype is ever changed, check arch/{*}/net/{*}.S files, since it is called from BPF assembly code.
Definition at line 1538 of file skbuff.c.
skb_copy_expand - copy and expand sk_buff : buffer to copy : new free bytes at head : new free bytes at tail : allocation priority
Make a copy of both an &sk_buff and its data and while doing so allocate additional space.
This is used when the caller wishes to modify the data and needs a private copy of the data to alter as well as more space for new fields. Returns NULL on failure or the pointer to the buffer on success. The returned buffer has a reference count of 1.
You must pass GFP_ATOMIC as the allocation priority if this function is called from an interrupt.
Definition at line 1127 of file skbuff.c.
skb_copy_ubufs - copy userspace skb frags buffers to kernel : the skb to modify : allocation priority
This must be called on SKBTX_DEV_ZEROCOPY skb. It will copy all frags into kernel and drop the reference to userspace pages.
If this function is called from an interrupt gfp_mask() must be GFP_ATOMIC.
Returns 0 on success or a negative error code on failure to allocate kernel memory to copy to.
Definition at line 768 of file skbuff.c.
skb_cow_data - Check that a socket buffer's data buffers are writable : The socket buffer to check. : Amount of trailing space to be added : Returned pointer to the skb where the space begins
Make sure that the data buffers attached to a socket buffer are writable. If they are not, private copies are made of the data buffers and the socket buffer is set to use these instead.
If is given, make sure that there is space to write bytes of data beyond current end of socket buffer. will be set to point to the skb in which this space begins.
The number of scatterlist elements required to completely map the COW'd and extended socket buffer will be returned.
Definition at line 3160 of file skbuff.c.
skb_partial_csum_set - set up and verify partial csum values for packet : the skb to set : the number of bytes after skb->data to start checksumming. : the offset from start to place the checksum.
For untrusted partially-checksummed packets, we need to make sure the values for skb->csum_start and skb->csum_offset are valid so we don't oops.
This function checks and sets those values and skb->ip_summed: if this returns false you should drop the packet.
Definition at line 3358 of file skbuff.c.
unsigned int skb_seq_read |
( |
unsigned int |
consumed, |
|
|
const u8 ** |
data, |
|
|
struct skb_seq_state * |
st |
|
) |
| |
skb_seq_read - Sequentially read skb data : number of bytes consumed by the caller so far : destination pointer for data to be returned : state variable
Reads a block of skb data at &consumed relative to the lower offset specified to skb_prepare_seq_read(). Assigns the head of the data block to &data and returns the length of the block or 0 if the end of the skb data or the upper offset has been reached.
The caller is not required to consume all of the data returned, i.e. &consumed is typically set to the number of bytes already consumed and the next call to skb_seq_read() will return the remaining part of the block.
Note 1: The size of each block of data returned can be arbitrary, this limitation is the cost for zerocopy seqeuental reads of potentially non linear data.
Note 2: Fragment lists within fragments are not implemented at the moment, state->root_skb could be replaced with a stack for this purpose.
Definition at line 2525 of file skbuff.c.
skb_shift - Shifts paged data partially from skb to another : buffer into which tail data gets added : buffer from which the paged data comes from : shift up to this many bytes
Attempts to shift up to shiftlen worth of bytes, which may be less than the length of the skb, from skb to tgt. Returns number bytes shifted. It's up to caller to free skb if everything was shifted.
If runs out of frags, the whole operation is aborted.
Skb cannot include anything else but paged data while tgt is allowed to have non-paged data as well.
TODO: full sized shift could be optimized but that would need specialized skb free'er to handle frags without up-to-date nr_frags.
Definition at line 2363 of file skbuff.c.