39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/kernel.h>
49 #include <linux/slab.h>
50 #include <linux/netdevice.h>
51 #ifdef CONFIG_NET_CLS_ACT
54 #include <linux/string.h>
58 #include <linux/rtnetlink.h>
61 #include <linux/errqueue.h>
62 #include <linux/prefetch.h>
70 #include <asm/uaccess.h>
102 .release = sock_pipe_buf_release,
103 .steal = sock_pipe_buf_steal,
104 .get = sock_pipe_buf_get,
121 static void skb_over_panic(
struct sk_buff *
skb,
int sz,
void *
here)
123 pr_emerg(
"%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
124 __func__, here, skb->
len, sz, skb->
head, skb->
data,
125 (
unsigned long)skb->
tail, (
unsigned long)skb->
end,
126 skb->
dev ? skb->
dev->name :
"<NULL>");
139 static void skb_under_panic(
struct sk_buff *
skb,
int sz,
void *
here)
141 pr_emerg(
"%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
142 __func__, here, skb->
len, sz, skb->
head, skb->
data,
143 (
unsigned long)skb->
tail, (
unsigned long)skb->
end,
144 skb->
dev ? skb->
dev->name :
"<NULL>");
156 #define kmalloc_reserve(size, gfp, node, pfmemalloc) \
157 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
162 bool ret_pfmemalloc =
false;
175 ret_pfmemalloc =
true;
180 *pfmemalloc = ret_pfmemalloc;
217 cache = (flags & SKB_ALLOC_FCLONE)
218 ? skbuff_fclone_cache : skbuff_head_cache;
220 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
258 skb_reset_tail_pointer(skb);
260 #ifdef NET_SKBUFF_DATA_USES_OFFSET
265 shinfo = skb_shinfo(skb);
270 if (flags & SKB_ALLOC_FCLONE) {
312 unsigned int size = frag_size ? :
ksize(data);
326 skb_reset_tail_pointer(skb);
328 #ifdef NET_SKBUFF_DATA_USES_OFFSET
333 shinfo = skb_shinfo(skb);
351 #define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
352 #define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
353 #define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
355 static void *__netdev_alloc_frag(
unsigned int fragsz,
gfp_t gfp_mask)
384 if (nc->
frag.offset + fragsz > nc->
frag.size) {
393 nc->
frag.offset += fragsz;
436 if (sk_memalloc_socks())
439 data = __netdev_alloc_frag(fragsz, gfp_mask);
461 skb_fill_page_desc(skb, i, page, off, size);
468 static void skb_drop_list(
struct sk_buff **listp)
481 static inline void skb_drop_fraglist(
struct sk_buff *
skb)
483 skb_drop_list(&skb_shinfo(skb)->frag_list);
486 static void skb_clone_fraglist(
struct sk_buff *skb)
490 skb_walk_frags(skb, list)
497 put_page(virt_to_head_page(skb->head));
502 static void skb_release_data(
struct sk_buff *skb)
506 &skb_shinfo(skb)->dataref)) {
507 if (skb_shinfo(skb)->nr_frags) {
509 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
510 skb_frag_unref(skb, i);
520 uarg = skb_shinfo(skb)->destructor_arg;
525 if (skb_has_frag_list(skb))
526 skb_drop_fraglist(skb);
535 static void kfree_skbmem(
struct sk_buff *skb)
546 fclone_ref = (
atomic_t *) (skb + 2);
552 fclone_ref = (
atomic_t *) (skb + 1);
566 static void skb_release_head_state(
struct sk_buff *skb)
570 secpath_put(skb->sp);
576 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
577 nf_conntrack_put(skb->nfct);
579 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
580 nf_conntrack_put_reasm(skb->nfct_reasm);
582 #ifdef CONFIG_BRIDGE_NETFILTER
583 nf_bridge_put(skb->nf_bridge);
586 #ifdef CONFIG_NET_SCHED
588 #ifdef CONFIG_NET_CLS_ACT
595 static void skb_release_all(
struct sk_buff *skb)
597 skb_release_head_state(skb);
598 skb_release_data(skb);
612 skb_release_all(skb);
632 trace_kfree_skb(skb, __builtin_return_address(0));
653 trace_consume_skb(skb);
658 static void __copy_skb_header(
struct sk_buff *
new,
const struct sk_buff *old)
660 new->tstamp = old->
tstamp;
665 skb_dst_copy(
new, old);
666 new->rxhash = old->
rxhash;
669 new->no_fcs = old->
no_fcs;
671 new->sp = secpath_get(old->sp);
673 memcpy(new->cb, old->cb,
sizeof(old->cb));
674 new->csum = old->
csum;
678 skb_copy_queue_mapping(
new, old);
680 #if IS_ENABLED(CONFIG_IP_VS)
685 new->mark = old->
mark;
688 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
691 #ifdef CONFIG_NET_SCHED
692 new->tc_index = old->tc_index;
693 #ifdef CONFIG_NET_CLS_ACT
694 new->tc_verd = old->tc_verd;
699 skb_copy_secmark(
new, old);
708 #define C(x) n->x = skb->x
712 __copy_skb_header(n, skb);
748 skb_release_all(dst);
749 return __skb_clone(dst, src);
771 int num_frags = skb_shinfo(skb)->nr_frags;
773 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
775 for (i = 0; i < num_frags; i++) {
797 for (i = 0; i < num_frags; i++)
798 skb_frag_unref(skb, i);
803 for (i = num_frags - 1; i >= 0; i--) {
804 __skb_fill_page_desc(skb, i, head, 0,
805 skb_shinfo(skb)->frags[i].
size);
806 head = (
struct page *)head->
private;
832 if (skb_orphan_frags(skb, gfp_mask))
842 if (skb_pfmemalloc(skb))
854 return __skb_clone(n, skb);
858 static void copy_skb_header(
struct sk_buff *
new,
const struct sk_buff *old)
860 #ifndef NET_SKBUFF_DATA_USES_OFFSET
867 __copy_skb_header(
new, old);
869 #ifndef NET_SKBUFF_DATA_USES_OFFSET
871 new->transport_header +=
offset;
872 new->network_header +=
offset;
873 if (skb_mac_header_was_set(
new))
874 new->mac_header +=
offset;
876 skb_shinfo(
new)->gso_size = skb_shinfo(old)->gso_size;
877 skb_shinfo(
new)->gso_segs = skb_shinfo(old)->gso_segs;
878 skb_shinfo(
new)->gso_type = skb_shinfo(old)->gso_type;
881 static inline int skb_alloc_rx_flag(
const struct sk_buff *skb)
883 if (skb_pfmemalloc(skb))
907 int headerlen = skb_headroom(skb);
908 unsigned int size = skb_end_offset(skb) + skb->
data_len;
916 skb_reserve(n, headerlen);
923 copy_skb_header(n, skb);
944 unsigned int size = skb_headlen(skb) + headroom;
952 skb_reserve(n, headroom);
956 skb_copy_from_linear_data(skb, n->
data, n->
len);
962 if (skb_shinfo(skb)->nr_frags) {
965 if (skb_orphan_frags(skb, gfp_mask)) {
970 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
971 skb_shinfo(n)->frags[
i] = skb_shinfo(skb)->frags[
i];
972 skb_frag_ref(skb, i);
974 skb_shinfo(n)->nr_frags =
i;
977 if (skb_has_frag_list(skb)) {
978 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
979 skb_clone_fraglist(n);
982 copy_skb_header(n, skb);
1009 int size = nhead + skb_end_offset(skb) + ntail;
1014 if (skb_shared(skb))
1019 if (skb_pfmemalloc(skb))
1030 memcpy(data + nhead, skb->
head, skb_tail_pointer(skb) - skb->
head);
1041 if (skb_cloned(skb)) {
1043 if (skb_orphan_frags(skb, gfp_mask))
1045 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1046 skb_frag_ref(skb, i);
1048 if (skb_has_frag_list(skb))
1049 skb_clone_fraglist(skb);
1051 skb_release_data(skb);
1055 off = (data + nhead) - skb->
head;
1060 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1070 if (skb_mac_header_was_set(skb))
1093 int delta = headroom - skb_headroom(skb);
1128 int newheadroom,
int newtailroom,
1135 gfp_mask, skb_alloc_rx_flag(skb),
1137 int oldheadroom = skb_headroom(skb);
1138 int head_copy_len, head_copy_off;
1144 skb_reserve(n, newheadroom);
1149 head_copy_len = oldheadroom;
1151 if (newheadroom <= head_copy_len)
1152 head_copy_len = newheadroom;
1154 head_copy_off = newheadroom - head_copy_len;
1158 skb->
len + head_copy_len))
1161 copy_skb_header(n, skb);
1163 off = newheadroom - oldheadroom;
1166 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1169 if (skb_mac_header_was_set(skb))
1195 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1201 if (
likely(skb_cloned(skb) || ntail > 0)) {
1210 err = skb_linearize(skb);
1234 unsigned char *
tmp = skb_tail_pointer(skb);
1235 SKB_LINEAR_ASSERT(skb);
1239 skb_over_panic(skb, len, __builtin_return_address(0));
1258 skb_under_panic(skb, len, __builtin_return_address(0));
1275 return skb_pull_inline(skb, len);
1291 __skb_trim(skb, len);
1302 int offset = skb_headlen(skb);
1303 int nfrags = skb_shinfo(skb)->nr_frags;
1307 if (skb_cloned(skb) &&
1315 for (; i < nfrags; i++) {
1316 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1323 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1326 skb_shinfo(skb)->nr_frags =
i;
1328 for (; i < nfrags; i++)
1329 skb_frag_unref(skb, i);
1331 if (skb_has_frag_list(skb))
1332 skb_drop_fraglist(skb);
1336 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1337 fragp = &frag->
next) {
1338 int end = offset + frag->
len;
1340 if (skb_shared(frag)) {
1359 unlikely((err = pskb_trim(frag, len - offset))))
1363 skb_drop_list(&frag->
next);
1368 if (len > skb_headlen(skb)) {
1374 skb_set_tail_pointer(skb, len);
1414 if (eat > 0 || skb_cloned(skb)) {
1420 if (
skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1426 if (!skb_has_frag_list(skb))
1431 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1432 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1447 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1454 if (list->
len <= eat) {
1462 if (skb_shared(list)) {
1474 if (!pskb_pull(list, eat)) {
1483 while ((list = skb_shinfo(skb)->frag_list) != insp) {
1484 skb_shinfo(skb)->frag_list = list->
next;
1490 skb_shinfo(skb)->frag_list = clone;
1498 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1499 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1502 skb_frag_unref(skb, i);
1505 skb_shinfo(skb)->frags[
k] = skb_shinfo(skb)->frags[
i];
1507 skb_shinfo(skb)->frags[
k].page_offset += eat;
1508 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1514 skb_shinfo(skb)->nr_frags =
k;
1519 return skb_tail_pointer(skb);
1540 int start = skb_headlen(skb);
1544 if (offset > (
int)skb->
len - len)
1548 if ((copy = start - offset) > 0) {
1551 skb_copy_from_linear_data_offset(skb, offset, to, copy);
1552 if ((len -= copy) == 0)
1558 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1562 WARN_ON(start > offset + len);
1564 end = start + skb_frag_size(f);
1565 if ((copy = end - offset) > 0) {
1577 if ((len -= copy) == 0)
1585 skb_walk_frags(skb, frag_iter) {
1588 WARN_ON(start > offset + len);
1590 end = start + frag_iter->
len;
1591 if ((copy = end - offset) > 0) {
1596 if ((len -= copy) == 0)
1621 static struct page *linear_to_page(
struct page *
page,
unsigned int *len,
1622 unsigned int *offset,
1625 struct page_frag *pfrag = sk_page_frag(sk);
1642 unsigned int offset)
1655 unsigned int *len,
unsigned int offset,
1663 page = linear_to_page(page, len, &offset, skb, sk);
1667 if (spd_can_coalesce(spd, page, offset)) {
1680 static inline void __segment_seek(
struct page **page,
unsigned int *poff,
1681 unsigned int *
plen,
unsigned int off)
1688 *page = nth_page(*page, n);
1694 static bool __splice_segment(
struct page *page,
unsigned int poff,
1695 unsigned int plen,
unsigned int *off,
1696 unsigned int *len,
struct sk_buff *skb,
1712 __segment_seek(&page, &poff, &plen, *off);
1717 unsigned int flen =
min(*len, plen);
1722 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1725 __segment_seek(&page, &poff, &plen, flen);
1728 }
while (*len && plen);
1738 unsigned int *offset,
unsigned int *len,
1751 offset, len, skb, spd,
1752 skb_head_is_locked(skb),
1759 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1762 if (__splice_segment(skb_frag_page(f),
1764 offset, len, skb, spd,
false, sk, pipe))
1788 .ops = &sock_pipe_buf_ops,
1789 .spd_release = sock_spd_release,
1792 struct sock *sk = skb->
sk;
1799 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1807 skb_walk_frags(skb, frag_iter) {
1810 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1847 int start = skb_headlen(skb);
1851 if (offset > (
int)skb->
len - len)
1854 if ((copy = start - offset) > 0) {
1857 skb_copy_to_linear_data_offset(skb, offset, from, copy);
1858 if ((len -= copy) == 0)
1864 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1868 WARN_ON(start > offset + len);
1870 end = start + skb_frag_size(frag);
1871 if ((copy = end - offset) > 0) {
1882 if ((len -= copy) == 0)
1890 skb_walk_frags(skb, frag_iter) {
1893 WARN_ON(start > offset + len);
1895 end = start + frag_iter->
len;
1896 if ((copy = end - offset) > 0) {
1902 if ((len -= copy) == 0)
1922 int start = skb_headlen(skb);
1932 if ((len -= copy) == 0)
1938 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1942 WARN_ON(start > offset + len);
1944 end = start + skb_frag_size(frag);
1945 if ((copy = end - offset) > 0) {
1953 offset - start, copy, 0);
1955 csum = csum_block_add(csum, csum2, pos);
1964 skb_walk_frags(skb, frag_iter) {
1967 WARN_ON(start > offset + len);
1969 end = start + frag_iter->
len;
1970 if ((copy = end - offset) > 0) {
1976 csum = csum_block_add(csum, csum2, pos);
1977 if ((len -= copy) == 0)
1995 int start = skb_headlen(skb);
2006 if ((len -= copy) == 0)
2013 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2016 WARN_ON(start > offset + len);
2018 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2019 if ((copy = end - offset) > 0) {
2032 csum = csum_block_add(csum, csum2, pos);
2042 skb_walk_frags(skb, frag_iter) {
2046 WARN_ON(start > offset + len);
2048 end = start + frag_iter->
len;
2049 if ((copy = end - offset) > 0) {
2055 csum = csum_block_add(csum, csum2, pos);
2056 if ((len -= copy) == 0)
2075 csstart = skb_checksum_start_offset(skb);
2077 csstart = skb_headlen(skb);
2079 BUG_ON(csstart > skb_headlen(skb));
2081 skb_copy_from_linear_data(skb, to, csstart);
2084 if (csstart != skb->
len)
2086 skb->
len - csstart, 0);
2091 *((
__sum16 *)(to + csstuff)) = csum_fold(csum);
2107 unsigned long flags;
2111 result = __skb_dequeue(list);
2112 spin_unlock_irqrestore(&list->
lock, flags);
2127 unsigned long flags;
2131 result = __skb_dequeue_tail(list);
2132 spin_unlock_irqrestore(&list->
lock, flags);
2166 unsigned long flags;
2169 __skb_queue_head(list, newsk);
2170 spin_unlock_irqrestore(&list->
lock, flags);
2187 unsigned long flags;
2190 __skb_queue_tail(list, newsk);
2191 spin_unlock_irqrestore(&list->
lock, flags);
2207 unsigned long flags;
2210 __skb_unlink(skb, list);
2211 spin_unlock_irqrestore(&list->
lock, flags);
2227 unsigned long flags;
2230 __skb_queue_after(list, old, newsk);
2231 spin_unlock_irqrestore(&list->
lock, flags);
2249 unsigned long flags;
2252 __skb_insert(newsk, old->
prev, old, list);
2253 spin_unlock_irqrestore(&list->
lock, flags);
2257 static inline void skb_split_inside_header(
struct sk_buff *skb,
2259 const u32 len,
const int pos)
2263 skb_copy_from_linear_data_offset(skb, len,
skb_put(skb1, pos - len),
2266 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2267 skb_shinfo(skb1)->frags[
i] = skb_shinfo(skb)->frags[
i];
2269 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2270 skb_shinfo(skb)->nr_frags = 0;
2275 skb_set_tail_pointer(skb, len);
2278 static inline void skb_split_no_header(
struct sk_buff *skb,
2280 const u32 len,
int pos)
2283 const int nfrags = skb_shinfo(skb)->nr_frags;
2285 skb_shinfo(skb)->nr_frags = 0;
2290 for (i = 0; i < nfrags; i++) {
2291 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2293 if (pos + size > len) {
2294 skb_shinfo(skb1)->frags[
k] = skb_shinfo(skb)->frags[
i];
2305 skb_frag_ref(skb, i);
2306 skb_shinfo(skb1)->frags[0].page_offset += len -
pos;
2307 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2308 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2309 skb_shinfo(skb)->nr_frags++;
2313 skb_shinfo(skb)->nr_frags++;
2316 skb_shinfo(skb1)->nr_frags =
k;
2327 int pos = skb_headlen(skb);
2330 skb_split_inside_header(skb, skb1, len, pos);
2332 skb_split_no_header(skb, skb1, len, pos);
2340 static int skb_prepare_for_shift(
struct sk_buff *skb)
2369 BUG_ON(skb_headlen(skb));
2373 to = skb_shinfo(tgt)->nr_frags;
2374 fragfrom = &skb_shinfo(skb)->frags[
from];
2380 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2386 todo -= skb_frag_size(fragfrom);
2388 if (skb_prepare_for_shift(skb) ||
2389 skb_prepare_for_shift(tgt))
2393 fragfrom = &skb_shinfo(skb)->frags[
from];
2394 fragto = &skb_shinfo(tgt)->frags[merge];
2396 skb_frag_size_add(fragto, shiftlen);
2397 skb_frag_size_sub(fragfrom, shiftlen);
2407 if ((shiftlen == skb->
len) &&
2411 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2414 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2418 fragfrom = &skb_shinfo(skb)->frags[
from];
2419 fragto = &skb_shinfo(tgt)->frags[to];
2421 if (todo >= skb_frag_size(fragfrom)) {
2422 *fragto = *fragfrom;
2423 todo -= skb_frag_size(fragfrom);
2428 __skb_frag_ref(fragfrom);
2431 skb_frag_size_set(fragto, todo);
2434 skb_frag_size_sub(fragfrom, todo);
2443 skb_shinfo(tgt)->nr_frags = to;
2446 fragfrom = &skb_shinfo(skb)->frags[0];
2447 fragto = &skb_shinfo(tgt)->frags[merge];
2449 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2450 __skb_frag_unref(fragfrom);
2455 while (from < skb_shinfo(skb)->nr_frags)
2456 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2457 skb_shinfo(skb)->nr_frags = to;
2459 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2469 skb->
len -= shiftlen;
2472 tgt->
len += shiftlen;
2490 unsigned int to,
struct skb_seq_state *
st)
2492 st->lower_offset =
from;
2493 st->upper_offset = to;
2494 st->root_skb = st->cur_skb =
skb;
2495 st->frag_idx = st->stepped_offset = 0;
2496 st->frag_data =
NULL;
2526 struct skb_seq_state *
st)
2528 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2531 if (
unlikely(abs_offset >= st->upper_offset))
2535 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2537 if (abs_offset < block_limit && !st->frag_data) {
2538 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2539 return block_limit - abs_offset;
2542 if (st->frag_idx == 0 && !st->frag_data)
2543 st->stepped_offset += skb_headlen(st->cur_skb);
2545 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2546 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2547 block_limit = skb_frag_size(frag) + st->stepped_offset;
2549 if (abs_offset < block_limit) {
2554 (abs_offset - st->stepped_offset);
2556 return block_limit - abs_offset;
2559 if (st->frag_data) {
2561 st->frag_data =
NULL;
2565 st->stepped_offset += skb_frag_size(frag);
2568 if (st->frag_data) {
2570 st->frag_data =
NULL;
2573 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2574 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2577 }
else if (st->cur_skb->next) {
2578 st->cur_skb = st->cur_skb->next;
2601 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
2603 static unsigned int skb_ts_get_next_block(
unsigned int offset,
const u8 **
text,
2635 config->
finish = skb_ts_finish;
2639 ret = textsearch_find(config, state);
2640 return (ret <= to - from ? ret :
UINT_MAX);
2656 int (*getfrag)(
void *
from,
char *to,
int offset,
2657 int len,
int odd,
struct sk_buff *skb),
2662 struct page *page =
NULL;
2669 frg_cnt = skb_shinfo(skb)->nr_frags;
2683 skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2688 frg_cnt = skb_shinfo(skb)->nr_frags;
2689 frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2693 copy = (length >
left)? left : length;
2695 ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag),
2696 offset, copy, 0, skb);
2701 skb_frag_size_add(frag, copy);
2707 }
while (length > 0);
2729 skb_postpull_rcsum(skb, skb->
data, len);
2730 return skb->
data += len;
2747 struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2748 unsigned int mss = skb_shinfo(skb)->gso_size;
2749 unsigned int doffset = skb->
data - skb_mac_header(skb);
2750 unsigned int offset = doffset;
2751 unsigned int headroom;
2754 int nfrags = skb_shinfo(skb)->nr_frags;
2759 __skb_push(skb, doffset);
2760 headroom = skb_headroom(skb);
2761 pos = skb_headlen(skb);
2773 hsize = skb_headlen(skb) -
offset;
2776 if (hsize > len || !sg)
2779 if (!hsize && i >= nfrags) {
2789 hsize = skb_end_offset(nskb);
2790 if (skb_cow_head(nskb, doffset + headroom)) {
2796 skb_release_head_state(nskb);
2797 __skb_push(nskb, doffset);
2806 skb_reserve(nskb, headroom);
2807 __skb_put(nskb, doffset);
2816 __copy_skb_header(nskb, skb);
2821 nskb->
csum_start += skb_headroom(nskb) - headroom;
2823 skb_reset_mac_header(nskb);
2824 skb_set_network_header(nskb, skb->
mac_len);
2826 skb_network_header_len(skb));
2827 skb_copy_from_linear_data(skb, nskb->
data, doffset);
2829 if (fskb != skb_shinfo(skb)->frag_list)
2840 frag = skb_shinfo(nskb)->frags;
2842 skb_copy_from_linear_data_offset(skb, offset,
2845 while (pos < offset + len && i < nfrags) {
2846 *frag = skb_shinfo(skb)->frags[
i];
2847 __skb_frag_ref(frag);
2848 size = skb_frag_size(frag);
2852 skb_frag_size_sub(frag, offset - pos);
2855 skb_shinfo(nskb)->nr_frags++;
2857 if (pos + size <= offset + len) {
2861 skb_frag_size_sub(frag, pos + size - (offset + len));
2868 if (pos < offset + len) {
2871 BUG_ON(pos + fskb->
len != offset + len);
2883 SKB_FRAG_ASSERT(nskb);
2884 skb_shinfo(nskb)->frag_list = fskb2;
2891 }
while ((offset += len) < skb->
len);
2896 while ((skb = segs)) {
2900 return ERR_PTR(err);
2910 unsigned int headroom;
2911 unsigned int len = skb_gro_len(skb);
2912 unsigned int offset = skb_gro_offset(skb);
2913 unsigned int headlen = skb_headlen(skb);
2914 unsigned int delta_truesize;
2916 if (p->
len + len >= 65536)
2921 else if (headlen <= offset) {
2936 frag2 = skbinfo->
frags +
i;
2942 skb_frag_size_sub(frag, offset);
2957 struct page *page = virt_to_head_page(skb->
head);
2958 unsigned int first_size = headlen -
offset;
2959 unsigned int first_offset;
2964 first_offset = skb->
data -
2972 skb_frag_size_set(frag, first_size);
2980 }
else if (skb_gro_len(p) != pinfo->
gso_size)
2983 headroom = skb_headroom(p);
2984 nskb = alloc_skb(headroom + skb_gro_offset(p),
GFP_ATOMIC);
2988 __copy_skb_header(nskb, p);
2991 skb_reserve(nskb, headroom);
2992 __skb_put(nskb, skb_gro_offset(p));
2994 skb_set_mac_header(nskb, skb_mac_header(p) - p->
data);
2995 skb_set_network_header(nskb, skb_network_offset(p));
2996 skb_set_transport_header(nskb, skb_transport_offset(p));
2998 __skb_pull(p, skb_gro_offset(p));
2999 memcpy(skb_mac_header(nskb), skb_mac_header(p),
3000 p->
data - skb_mac_header(p));
3003 skb_shinfo(nskb)->frag_list =
p;
3004 skb_shinfo(nskb)->gso_size = pinfo->
gso_size;
3006 skb_header_release(p);
3021 if (offset > headlen) {
3022 unsigned int eat = offset - headlen;
3025 skb_frag_size_sub(&skbinfo->
frags[0], eat);
3031 __skb_pull(skb, offset);
3035 skb_header_release(skb);
3076 int start = skb_headlen(skb);
3084 sg_set_buf(sg, skb->
data + offset, copy);
3086 if ((len -= copy) == 0)
3091 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3094 WARN_ON(start > offset + len);
3096 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3097 if ((copy = end - offset) > 0) {
3102 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3112 skb_walk_frags(skb, frag_iter) {
3115 WARN_ON(start > offset + len);
3117 end = start + frag_iter->
len;
3118 if ((copy = end - offset) > 0) {
3121 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3123 if ((len -= copy) == 0)
3135 int nsg = __skb_to_sgvec(skb, sg, offset, len);
3137 sg_mark_end(&sg[nsg - 1]);
3164 struct sk_buff *skb1, **skb_p;
3170 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3175 if (!skb_has_frag_list(skb)) {
3181 if (skb_tailroom(skb) < tailbits &&
3193 skb_p = &skb_shinfo(skb)->frag_list;
3196 while ((skb1 = *skb_p) !=
NULL) {
3203 if (skb_shared(skb1))
3208 if (skb1->
next ==
NULL && tailbits) {
3209 if (skb_shinfo(skb1)->nr_frags ||
3210 skb_has_frag_list(skb1) ||
3211 skb_tailroom(skb1) < tailbits)
3212 ntail = tailbits + 128;
3218 skb_shinfo(skb1)->nr_frags ||
3219 skb_has_frag_list(skb1)) {
3234 skb_set_owner_w(skb2, skb1->
sk);
3246 skb_p = &skb1->
next;
3253 static void sock_rmem_free(
struct sk_buff *skb)
3255 struct sock *sk = skb->
sk;
3289 struct sock *sk = orig_skb->
sk;
3302 *skb_hwtstamps(skb) =
3314 memset(serr, 0,
sizeof(*serr));
3327 struct sock *sk = skb->
sk;
3335 memset(serr, 0,
sizeof(*serr));
3360 if (
unlikely(start > skb_headlen(skb)) ||
3361 unlikely((
int)start + off > skb_headlen(skb) - 2)) {
3363 start, off, skb_headlen(skb));
3383 skb_release_head_state(skb);
3399 bool *fragstolen,
int *delta_truesize)
3403 *fragstolen =
false;
3408 if (len <= skb_tailroom(to)) {
3410 *delta_truesize = 0;
3414 if (skb_has_frag_list(to) || skb_has_frag_list(from))
3417 if (skb_headlen(from) != 0) {
3421 if (skb_shinfo(to)->nr_frags +
3425 if (skb_head_is_locked(from))
3430 page = virt_to_head_page(from->
head);
3433 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
3434 page, offset, skb_headlen(from));
3437 if (skb_shinfo(to)->nr_frags +
3446 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
3447 skb_shinfo(from)->frags,
3448 skb_shinfo(from)->nr_frags *
sizeof(
skb_frag_t));
3449 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
3451 if (!skb_cloned(from))
3452 skb_shinfo(from)->nr_frags = 0;
3457 for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
3458 skb_frag_ref(from, i);
3464 *delta_truesize =
delta;