44 #include <linux/slab.h>
45 #include <linux/types.h>
57 static void sctp_ulpq_reasm_drain(
struct sctp_ulpq *ulpq);
68 skb_queue_head_init(&ulpq->
reasm);
69 skb_queue_head_init(&ulpq->
lobby);
83 while ((skb = __skb_dequeue(&ulpq->
lobby)) !=
NULL) {
84 event = sctp_skb2event(skb);
88 while ((skb = __skb_dequeue(&ulpq->
reasm)) !=
NULL) {
89 event = sctp_skb2event(skb);
116 event = sctp_ulpq_reasm(ulpq, event);
121 skb_queue_head_init(&temp);
122 __skb_queue_tail(&temp, sctp_event2skb(event));
124 event = sctp_ulpq_order(ulpq, event);
148 if (!skb_queue_empty(&sp->
pd_lobby)) {
151 list = (
struct list_head *)&sctp_sk(sk)->pd_lobby;
152 INIT_LIST_HEAD(list);
161 if (!skb_queue_empty(&sp->
pd_lobby) && asoc) {
166 event = sctp_skb2event(skb);
167 if (event->
asoc == asoc) {
180 static void sctp_ulpq_set_pd(
struct sctp_ulpq *ulpq)
189 static int sctp_ulpq_clear_pd(
struct sctp_ulpq *ulpq)
192 sctp_ulpq_reasm_drain(ulpq);
215 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
235 queue = &sctp_sk(sk)->pd_lobby;
237 clear_pd =
event->msg_flags &
MSG_EOR;
246 if (sctp_sk(sk)->frag_interleave)
249 queue = &sctp_sk(sk)->pd_lobby;
257 sctp_skb_list_tail(skb_list, queue);
259 __skb_queue_tail(queue, skb);
266 sctp_ulpq_clear_pd(ulpq);
284 static void sctp_ulpq_store_reasm(
struct sctp_ulpq *ulpq,
294 pos = skb_peek_tail(&ulpq->
reasm);
296 __skb_queue_tail(&ulpq->
reasm, sctp_event2skb(event));
301 cevent = sctp_skb2event(pos);
303 if (TSN_lt(ctsn, tsn)) {
304 __skb_queue_tail(&ulpq->
reasm, sctp_event2skb(event));
309 skb_queue_walk(&ulpq->
reasm, pos) {
310 cevent = sctp_skb2event(pos);
313 if (TSN_lt(tsn, ctsn))
318 __skb_queue_before(&ulpq->
reasm, pos, sctp_event2skb(event));
337 struct sk_buff *
list = skb_shinfo(f_frag)->frag_list;
340 if (f_frag == l_frag)
346 for (last = list;
list; last =
list, list = list->
next);
354 if (skb_cloned(f_frag)) {
364 sctp_skb_set_owner_r(
new, f_frag->
sk);
366 skb_shinfo(
new)->frag_list =
pos;
368 skb_shinfo(f_frag)->frag_list =
pos;
372 __skb_unlink(f_frag, queue);
389 __skb_unlink(pos, queue);
398 event = sctp_skb2event(f_frag);
413 __u32 ctsn, next_tsn;
440 skb_queue_walk(&ulpq->
reasm, pos) {
441 cevent = sctp_skb2event(pos);
450 if (pos == ulpq->
reasm.next) {
465 if ((first_frag) && (ctsn == next_tsn)) {
476 if (first_frag && (ctsn == next_tsn))
491 if (!sctp_sk(asoc->
base.sk)->frag_interleave &&
495 cevent = sctp_skb2event(pd_first);
496 pd_point = sctp_sk(asoc->
base.sk)->pd_point;
497 if (pd_point && pd_point <= pd_len) {
498 retval = sctp_make_reassembled_event(sock_net(asoc->
base.sk),
503 sctp_ulpq_set_pd(ulpq);
509 retval = sctp_make_reassembled_event(sock_net(ulpq->
asoc->base.sk),
510 &ulpq->
reasm, first_frag, pos);
521 __u32 ctsn, next_tsn;
530 if (skb_queue_empty(&ulpq->
reasm))
533 last_frag = first_frag =
NULL;
538 skb_queue_walk(&ulpq->
reasm, pos) {
539 cevent = sctp_skb2event(pos);
548 }
else if (next_tsn == ctsn)
556 else if (ctsn != next_tsn)
570 retval = sctp_make_reassembled_event(sock_net(ulpq->
asoc->base.sk),
571 &ulpq->
reasm, first_frag, last_frag);
572 if (retval && is_last)
593 sctp_ulpq_store_reasm(ulpq, event);
595 retval = sctp_ulpq_retrieve_reassembled(ulpq);
603 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->
asoc->peer.tsn_map);
604 if (TSN_lte(ctsn, ctsnap))
605 retval = sctp_ulpq_retrieve_partial(ulpq);
616 __u32 ctsn, next_tsn;
624 if (skb_queue_empty(&ulpq->
reasm))
627 last_frag = first_frag =
NULL;
631 skb_queue_walk(&ulpq->
reasm, pos) {
632 cevent = sctp_skb2event(pos);
648 if (ctsn == next_tsn) {
663 retval = sctp_make_reassembled_event(sock_net(ulpq->
asoc->base.sk),
664 &ulpq->
reasm, first_frag, last_frag);
688 if (skb_queue_empty(&ulpq->
reasm))
691 skb_queue_walk_safe(&ulpq->
reasm, pos, tmp) {
692 event = sctp_skb2event(pos);
700 if (TSN_lte(tsn, fwd_tsn)) {
701 __skb_unlink(pos, &ulpq->
reasm);
713 static void sctp_ulpq_reasm_drain(
struct sctp_ulpq *ulpq)
718 if (skb_queue_empty(&ulpq->
reasm))
721 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) !=
NULL) {
724 skb_queue_head_init(&
temp);
725 __skb_queue_tail(&
temp, sctp_event2skb(event));
727 event = sctp_ulpq_order(ulpq, event);
742 static void sctp_ulpq_retrieve_ordered(
struct sctp_ulpq *ulpq,
752 in = &ulpq->
asoc->ssnmap->in;
770 if (cssn != sctp_ssn_peek(in, sid))
774 sctp_ssn_next(in, sid);
776 __skb_unlink(pos, &ulpq->
lobby);
779 __skb_queue_tail(event_list, pos);
784 static void sctp_ulpq_store_ordered(
struct sctp_ulpq *ulpq,
792 pos = skb_peek_tail(&ulpq->
lobby);
794 __skb_queue_tail(&ulpq->
lobby, sctp_event2skb(event));
805 __skb_queue_tail(&ulpq->
lobby, sctp_event2skb(event));
809 if ((sid == csid) && SSN_lt(cssn, ssn)) {
810 __skb_queue_tail(&ulpq->
lobby, sctp_event2skb(event));
817 skb_queue_walk(&ulpq->
lobby, pos) {
824 if (csid == sid && SSN_lt(ssn, cssn))
830 __skb_queue_before(&ulpq->
lobby, pos, sctp_event2skb(event));
846 in = &ulpq->
asoc->ssnmap->in;
849 if (ssn != sctp_ssn_peek(in, sid)) {
853 sctp_ulpq_store_ordered(ulpq, event);
858 sctp_ssn_next(in, sid);
863 sctp_ulpq_retrieve_ordered(ulpq, event);
871 static void sctp_ulpq_reap_ordered(
struct sctp_ulpq *ulpq,
__u16 sid)
881 in = &ulpq->
asoc->ssnmap->in;
884 skb_queue_head_init(&
temp);
900 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
903 __skb_unlink(pos, lobby);
906 event = sctp_skb2event(pos);
909 __skb_queue_tail(&
temp, pos);
915 if (event ==
NULL && pos != (
struct sk_buff *)lobby) {
920 if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
921 sctp_ssn_next(in, csid);
922 __skb_unlink(pos, lobby);
923 __skb_queue_tail(&
temp, pos);
924 event = sctp_skb2event(pos);
933 sctp_ulpq_retrieve_ordered(ulpq, event);
946 in = &ulpq->
asoc->ssnmap->in;
949 if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
953 sctp_ssn_skip(in, sid, ssn);
958 sctp_ulpq_reap_ordered(ulpq, sid);
970 tsnmap = &ulpq->
asoc->peer.tsn_map;
972 while ((skb = __skb_dequeue_tail(list)) !=
NULL) {
973 freed += skb_headlen(skb);
974 event = sctp_skb2event(skb);
989 return sctp_ulpq_renege_list(ulpq, &ulpq->
lobby, needed);
995 return sctp_ulpq_renege_list(ulpq, &ulpq->
reasm, needed);
1008 sp = sctp_sk(asoc->
base.sk);
1023 event = sctp_ulpq_retrieve_first(ulpq);
1027 sctp_ulpq_set_pd(ulpq);
1038 __u16 needed, freed;
1050 if (skb_queue_empty(&asoc->
base.sk->sk_receive_queue)) {
1051 freed = sctp_ulpq_renege_order(ulpq, needed);
1052 if (freed < needed) {
1053 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1057 if (chunk && (freed >= needed)) {
1066 sk_mem_reclaim(asoc->
base.sk);
1082 sk = ulpq->
asoc->base.sk;
1084 &sctp_sk(sk)->subscribe))
1092 if (sctp_ulpq_clear_pd(ulpq) || ev)