Commit d003b41b801124b96337973b01eada6a83673d23

Authored by Lee A. Roberts
Committed by David S. Miller
1 parent 95ac7b859f

sctp: fix association hangs due to partial delivery errors

In sctp_ulpq_tail_data(), use return values 0,1 to indicate whether
a complete event (with MSG_EOR set) was delivered.  A return value
of -ENOMEM continues to indicate an out-of-memory condition was
encountered.

In sctp_ulpq_retrieve_partial() and sctp_ulpq_retrieve_first(),
correct message reassembly logic for SCTP partial delivery.
Change logic to ensure that as much data as possible is sent
with the initial partial delivery and that following partial
deliveries contain all available data.

In sctp_ulpq_partial_delivery(), attempt partial delivery only
if the data on the head of the reassembly queue is at or before
the cumulative TSN ACK point.

In sctp_ulpq_renege(), use the modified return values from
sctp_ulpq_tail_data() to choose whether to attempt partial
delivery or to attempt to drain the reassembly queue as a
means to reduce memory pressure.  Remove call to
sctp_tsnmap_mark(), as this is handled correctly in call to
sctp_ulpq_tail_data().

Signed-off-by: Lee A. Roberts <lee.roberts@hp.com>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>

Showing 1 changed file with 43 additions and 11 deletions Side-by-side Diff

... ... @@ -106,6 +106,7 @@
106 106 {
107 107 struct sk_buff_head temp;
108 108 struct sctp_ulpevent *event;
  109 + int event_eor = 0;
109 110  
110 111 /* Create an event from the incoming chunk. */
111 112 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
112 113  
113 114  
... ... @@ -127,10 +128,12 @@
127 128 /* Send event to the ULP. 'event' is the sctp_ulpevent for
128 129 * very first SKB on the 'temp' list.
129 130 */
130   - if (event)
  131 + if (event) {
  132 + event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
131 133 sctp_ulpq_tail_event(ulpq, event);
  134 + }
132 135  
133   - return 0;
  136 + return event_eor;
134 137 }
135 138  
136 139 /* Add a new event for propagation to the ULP. */
137 140  
138 141  
... ... @@ -540,14 +543,19 @@
540 543 ctsn = cevent->tsn;
541 544  
542 545 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
  546 + case SCTP_DATA_FIRST_FRAG:
  547 + if (!first_frag)
  548 + return NULL;
  549 + goto done;
543 550 case SCTP_DATA_MIDDLE_FRAG:
544 551 if (!first_frag) {
545 552 first_frag = pos;
546 553 next_tsn = ctsn + 1;
547 554 last_frag = pos;
548   - } else if (next_tsn == ctsn)
  555 + } else if (next_tsn == ctsn) {
549 556 next_tsn++;
550   - else
  557 + last_frag = pos;
  558 + } else
551 559 goto done;
552 560 break;
553 561 case SCTP_DATA_LAST_FRAG:
... ... @@ -651,6 +659,14 @@
651 659 } else
652 660 goto done;
653 661 break;
  662 +
  663 + case SCTP_DATA_LAST_FRAG:
  664 + if (!first_frag)
  665 + return NULL;
  666 + else
  667 + goto done;
  668 + break;
  669 +
654 670 default:
655 671 return NULL;
656 672 }
657 673  
658 674  
... ... @@ -1025,16 +1041,28 @@
1025 1041 struct sctp_ulpevent *event;
1026 1042 struct sctp_association *asoc;
1027 1043 struct sctp_sock *sp;
  1044 + __u32 ctsn;
  1045 + struct sk_buff *skb;
1028 1046  
1029 1047 asoc = ulpq->asoc;
1030 1048 sp = sctp_sk(asoc->base.sk);
1031 1049  
1032 1050 /* If the association is already in Partial Delivery mode
1033   - * we have noting to do.
  1051 + * we have nothing to do.
1034 1052 */
1035 1053 if (ulpq->pd_mode)
1036 1054 return;
1037 1055  
  1056 + /* Data must be at or below the Cumulative TSN ACK Point to
  1057 + * start partial delivery.
  1058 + */
  1059 + skb = skb_peek(&asoc->ulpq.reasm);
  1060 + if (skb != NULL) {
  1061 + ctsn = sctp_skb2event(skb)->tsn;
  1062 + if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
  1063 + return;
  1064 + }
  1065 +
1038 1066 /* If the user enabled fragment interleave socket option,
1039 1067 * multiple associations can enter partial delivery.
1040 1068 * Otherwise, we can only enter partial delivery if the
... ... @@ -1077,12 +1105,16 @@
1077 1105 }
1078 1106 /* If able to free enough room, accept this chunk. */
1079 1107 if (chunk && (freed >= needed)) {
1080   - __u32 tsn;
1081   - tsn = ntohl(chunk->subh.data_hdr->tsn);
1082   - sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
1083   - sctp_ulpq_tail_data(ulpq, chunk, gfp);
1084   -
1085   - sctp_ulpq_partial_delivery(ulpq, gfp);
  1108 + int retval;
  1109 + retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
  1110 + /*
  1111 + * Enter partial delivery if chunk has not been
  1112 + * delivered; otherwise, drain the reassembly queue.
  1113 + */
  1114 + if (retval <= 0)
  1115 + sctp_ulpq_partial_delivery(ulpq, gfp);
  1116 + else if (retval == 1)
  1117 + sctp_ulpq_reasm_drain(ulpq);
1086 1118 }
1087 1119  
1088 1120 sk_mem_reclaim(asoc->base.sk);