Commit 672e7cca17ed6036a1756ed34cf20dbd72d5e5f6
Committed by
David S. Miller
1 parent
7c3ceb4fb9
Exists in
master
and in
7 other branches
[SCTP]: Prevent possible infinite recursion with multiple bundled DATA.
There is a rare situation that causes lksctp to go into infinite recursion and crash the system. The trigger is a packet that contains at least the first two DATA fragments of a message bundled together. The recursion is triggered when the user data buffer is smaller that the full data message. The problem is that we clone the skb for every fragment in the message. When reassembling the full message, we try to link skbs from the "first fragment" clone using the frag_list. However, since the frag_list is shared between two clones in this rare situation, we end up setting the frag_list pointer of the second fragment to point to itself. This causes sctp_skb_pull() to potentially recurse indefinitely. Proposed solution is to make a copy of the skb when attempting to link things using frag_list. Signed-off-by: Vladislav Yasevich <vladsilav.yasevich@hp.com> Signed-off-by: Sridhar Samudrala <sri@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 1 changed file with 25 additions and 2 deletions Side-by-side Diff
net/sctp/ulpqueue.c
... | ... | @@ -279,6 +279,7 @@ |
279 | 279 | static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag) |
280 | 280 | { |
281 | 281 | struct sk_buff *pos; |
282 | + struct sk_buff *new = NULL; | |
282 | 283 | struct sctp_ulpevent *event; |
283 | 284 | struct sk_buff *pnext, *last; |
284 | 285 | struct sk_buff *list = skb_shinfo(f_frag)->frag_list; |
285 | 286 | |
286 | 287 | |
... | ... | @@ -297,11 +298,33 @@ |
297 | 298 | */ |
298 | 299 | if (last) |
299 | 300 | last->next = pos; |
300 | - else | |
301 | - skb_shinfo(f_frag)->frag_list = pos; | |
301 | + else { | |
302 | + if (skb_cloned(f_frag)) { | |
303 | + /* This is a cloned skb, we can't just modify | |
304 | + * the frag_list. We need a new skb to do that. | |
305 | + * Instead of calling skb_unshare(), we'll do it | |
306 | + * ourselves since we need to delay the free. | |
307 | + */ | |
308 | + new = skb_copy(f_frag, GFP_ATOMIC); | |
309 | + if (!new) | |
310 | + return NULL; /* try again later */ | |
302 | 311 | |
312 | + new->sk = f_frag->sk; | |
313 | + | |
314 | + skb_shinfo(new)->frag_list = pos; | |
315 | + } else | |
316 | + skb_shinfo(f_frag)->frag_list = pos; | |
317 | + } | |
318 | + | |
303 | 319 | /* Remove the first fragment from the reassembly queue. */ |
304 | 320 | __skb_unlink(f_frag, queue); |
321 | + | |
322 | + /* if we did unshare, then free the old skb and re-assign */ | |
323 | + if (new) { | |
324 | + kfree_skb(f_frag); | |
325 | + f_frag = new; | |
326 | + } | |
327 | + | |
305 | 328 | while (pos) { |
306 | 329 | |
307 | 330 | pnext = pos->next; |