36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <asm/uaccess.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/poll.h>
50 #include <linux/slab.h>
63 static inline int connection_based(
struct sock *
sk)
71 unsigned long bits = (
unsigned long)key;
83 static int wait_for_packet(
struct sock *
sk,
int *
err,
long *timeo_p)
91 error = sock_error(sk);
106 if (connection_based(sk) &&
120 error = sock_intr_errno(*timeo_p);
162 int *
peeked,
int *off,
int *err)
169 int error = sock_error(sk);
183 unsigned long cpu_flags;
187 skb_queue_walk(queue, skb) {
190 if (*off >= skb->
len) {
197 __skb_unlink(skb, queue);
199 spin_unlock_irqrestore(&queue->
lock, cpu_flags);
202 spin_unlock_irqrestore(&queue->
lock, cpu_flags);
209 }
while (!wait_for_packet(sk, err, &timeo));
220 int noblock,
int *err)
232 sk_mem_reclaim_partial(sk);
247 sk_mem_reclaim_partial(sk);
248 unlock_sock_fast(sk, slow);
293 sk_mem_reclaim_partial(sk);
311 int start = skb_headlen(skb);
315 trace_skb_copy_datagram_iovec(skb, len);
323 if ((len -= copy) == 0)
329 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
335 end = start + skb_frag_size(frag);
336 if ((copy = end - offset) > 0) {
339 struct page *
page = skb_frag_page(frag);
345 offset - start, copy);
356 skb_walk_frags(skb, frag_iter) {
361 end = start + frag_iter->
len;
362 if ((copy = end - offset) > 0) {
369 if ((len -= copy) == 0)
395 const struct iovec *to,
int to_offset,
398 int start = skb_headlen(skb);
408 if ((len -= copy) == 0)
415 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
421 end = start + skb_frag_size(frag);
422 if ((copy = end - offset) > 0) {
425 struct page *
page = skb_frag_page(frag);
431 offset - start, to_offset, copy);
443 skb_walk_frags(skb, frag_iter) {
448 end = start + frag_iter->
len;
449 if ((copy = end - offset) > 0) {
457 if ((len -= copy) == 0)
484 const struct iovec *
from,
int from_offset,
487 int start = skb_headlen(skb);
498 if ((len -= copy) == 0)
505 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
511 end = start + skb_frag_size(frag);
512 if ((copy = end - offset) > 0) {
515 struct page *
page = skb_frag_page(frag);
522 from, from_offset, copy);
535 skb_walk_frags(skb, frag_iter) {
540 end = start + frag_iter->
len;
541 if ((copy = end - offset) > 0) {
550 if ((len -= copy) == 0)
565 static int skb_copy_and_csum_datagram(
const struct sk_buff *
skb,
int offset,
566 u8 __user *to,
int len,
569 int start = skb_headlen(skb);
583 if ((len -= copy) == 0)
590 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
596 end = start + skb_frag_size(frag);
597 if ((copy = end - offset) > 0) {
601 struct page *
page = skb_frag_page(frag);
613 *csump = csum_block_add(*csump, csum2, pos);
623 skb_walk_frags(skb, frag_iter) {
628 end = start + frag_iter->
len;
629 if ((copy = end - offset) > 0) {
633 if (skb_copy_and_csum_datagram(frag_iter,
638 *csump = csum_block_add(*csump, csum2, pos);
639 if ((len -= copy) == 0)
661 netdev_rx_csum_fault(skb->
dev);
688 int hlen,
struct iovec *iov)
709 if (skb_copy_and_csum_datagram(skb, hlen, iov->
iov_base,
715 netdev_rx_csum_fault(skb->
dev);
744 struct sock *sk = sock->
sk;
747 sock_poll_wait(file, sk_sleep(sk), wait);
763 if (connection_based(sk)) {
772 if (sock_writeable(sk))