13 #include <linux/dccp.h>
14 #include <linux/kernel.h>
16 #include <linux/slab.h>
25 static inline void dccp_event_ack_sent(
struct sock *
sk)
33 skb_set_owner_w(skb, sk);
45 static int dccp_transmit_skb(
struct sock *sk,
struct sk_buff *skb)
54 const u32 dccp_header_size =
sizeof(*dh) +
56 dccp_packet_hdr_len(dcb->dccpd_type);
92 skb_set_owner_w(skb, sk);
103 dh = dccp_zeroed_hdr(skb, dccp_header_size);
137 dccp_event_ack_sent(sk);
153 static u32 dccp_determine_ccmps(
const struct dccp_sock *dp)
159 return tx_ccid->
ccid_ops->ccid_ccmps;
166 u32 ccmps = dccp_determine_ccmps(dp);
167 u32 cur_mps = ccmps ?
min(pmtu, ccmps) :
pmtu;
171 sizeof(
struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
204 if (wq_has_sleeper(wq))
207 if (sock_writeable(sk))
220 static int dccp_wait_for_ccid(
struct sock *sk,
unsigned long delay)
244 static void dccp_xmit_packet(
struct sock *sk)
264 DCCP_WARN(
"Payload too large (%d) for featneg.\n", len);
269 inet_csk_schedule_ack(sk);
271 inet_csk(sk)->icsk_rto,
274 }
else if (dccp_ack_pending(sk)) {
280 err = dccp_transmit_skb(sk, skb);
313 while (*time_budget > 0 && (skb = skb_peek(&sk->
sk_write_queue))) {
316 switch (ccid_packet_dequeue_eval(rc)) {
323 DCCP_WARN(
"CCID did not manage to send all packets\n");
327 if (delay > *time_budget)
329 rc = dccp_wait_for_ccid(sk, delay);
332 *time_budget -= (delay -
rc);
336 dccp_xmit_packet(sk);
354 switch (ccid_packet_dequeue_eval(rc)) {
362 dccp_xmit_packet(sk);
384 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
388 inet_csk(sk)->icsk_retransmits++;
398 const u32 dccp_header_size =
sizeof(
struct dccp_hdr) +
399 sizeof(struct dccp_hdr_ext) +
400 sizeof(struct dccp_hdr_response);
407 skb_reserve(skb, sk->sk_prot->max_header);
409 skb_dst_set(skb, dst_clone(dst));
411 dreq = dccp_rsk(req);
412 if (inet_rsk(req)->acked)
419 goto response_failed;
422 goto response_failed;
425 dh = dccp_zeroed_hdr(skb, dccp_header_size);
433 dccp_hdr_set_seq(dh, dreq->
dreq_gss);
435 dccp_hdr_response(skb)->dccph_resp_service = dreq->
dreq_service;
437 dccp_csum_outgoing(skb);
440 inet_rsk(req)->acked = 1;
455 const u32 dccp_hdr_reset_len =
sizeof(
struct dccp_hdr) +
456 sizeof(struct dccp_hdr_ext) +
457 sizeof(struct dccp_hdr_reset);
458 struct dccp_hdr_reset *dhr;
461 skb = alloc_skb(sk->sk_prot->max_header,
GFP_ATOMIC);
465 skb_reserve(skb, sk->sk_prot->max_header);
468 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
475 dhr = dccp_hdr_reset(skb);
496 dccp_csum_outgoing(skb);
510 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
520 skb_reserve(skb, sk->sk_prot->max_header);
524 return dccp_transmit_skb(sk, skb);
554 skb_reserve(skb, sk->sk_prot->max_header);
558 dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
574 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
578 inet_csk_schedule_ack(sk);
587 skb_reserve(skb, sk->sk_prot->max_header);
589 dccp_transmit_skb(sk, skb);
597 void dccp_send_delayed_ack(
struct sock *sk)
645 skb_reserve(skb, sk->sk_prot->max_header);
653 dccp_sk(sk)->dccps_sync_scheduled = 0;
655 dccp_transmit_skb(sk, skb);
671 skb = alloc_skb(sk->sk_prot->max_header, prio);
676 skb_reserve(skb, sk->sk_prot->max_header);
683 skb = dccp_skb_entail(sk, skb);
697 dccp_transmit_skb(sk, skb);