Commit cc9d2566986692e5b6df1aac88fbf65fe340d432
Committed by
David S. Miller
1 parent
926bdeab55
mptcp: update per unacked sequence on pkt reception
So that we keep per unacked sequence number consistent; since we update per msk data, use an atomic64 cmpxchg() to protect against concurrent updates from multiple subflows. Initialize the snd_una at connect()/accept() time. Co-developed-by: Florian Westphal <fw@strlen.de> Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 3 changed files with 49 additions and 6 deletions Side-by-side Diff
net/mptcp/options.c
... | ... | @@ -744,6 +744,46 @@ |
744 | 744 | return true; |
745 | 745 | } |
746 | 746 | |
747 | +static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit) | |
748 | +{ | |
749 | + u32 old_ack32, cur_ack32; | |
750 | + | |
751 | + if (use_64bit) | |
752 | + return cur_ack; | |
753 | + | |
754 | + old_ack32 = (u32)old_ack; | |
755 | + cur_ack32 = (u32)cur_ack; | |
756 | + cur_ack = (old_ack & GENMASK_ULL(63, 32)) + cur_ack32; | |
757 | + if (unlikely(before(cur_ack32, old_ack32))) | |
758 | + return cur_ack + (1LL << 32); | |
759 | + return cur_ack; | |
760 | +} | |
761 | + | |
762 | +static void update_una(struct mptcp_sock *msk, | |
763 | + struct mptcp_options_received *mp_opt) | |
764 | +{ | |
765 | + u64 new_snd_una, snd_una, old_snd_una = atomic64_read(&msk->snd_una); | |
766 | + u64 write_seq = READ_ONCE(msk->write_seq); | |
767 | + | |
768 | + /* avoid ack expansion on update conflict, to reduce the risk of | |
769 | + * wrongly expanding to a future ack sequence number, which is way | |
770 | + * more dangerous than missing an ack | |
771 | + */ | |
772 | + new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64); | |
773 | + | |
774 | + /* ACK for data not even sent yet? Ignore. */ | |
775 | + if (after64(new_snd_una, write_seq)) | |
776 | + new_snd_una = old_snd_una; | |
777 | + | |
778 | + while (after64(new_snd_una, old_snd_una)) { | |
779 | + snd_una = old_snd_una; | |
780 | + old_snd_una = atomic64_cmpxchg(&msk->snd_una, snd_una, | |
781 | + new_snd_una); | |
782 | + if (old_snd_una == snd_una) | |
783 | + break; | |
784 | + } | |
785 | +} | |
786 | + | |
747 | 787 | static bool add_addr_hmac_valid(struct mptcp_sock *msk, |
748 | 788 | struct mptcp_options_received *mp_opt) |
749 | 789 | { |
... | ... | @@ -805,6 +845,12 @@ |
805 | 845 | if (!mp_opt->dss) |
806 | 846 | return; |
807 | 847 | |
848 | + /* we can't wait for recvmsg() to update the ack_seq, otherwise | |
849 | + * monodirectional flows will stuck | |
850 | + */ | |
851 | + if (mp_opt->use_ack) | |
852 | + update_una(msk, mp_opt); | |
853 | + | |
808 | 854 | mpext = skb_ext_add(skb, SKB_EXT_MPTCP); |
809 | 855 | if (!mpext) |
810 | 856 | return; |
... | ... | @@ -829,12 +875,6 @@ |
829 | 875 | } |
830 | 876 | mpext->data_len = mp_opt->data_len; |
831 | 877 | mpext->use_map = 1; |
832 | - } | |
833 | - | |
834 | - if (mp_opt->use_ack) { | |
835 | - mpext->data_ack = mp_opt->data_ack; | |
836 | - mpext->use_ack = 1; | |
837 | - mpext->ack64 = mp_opt->ack64; | |
838 | 878 | } |
839 | 879 | |
840 | 880 | mpext->data_fin = mp_opt->data_fin; |
net/mptcp/protocol.c
... | ... | @@ -906,6 +906,7 @@ |
906 | 906 | } |
907 | 907 | |
908 | 908 | msk->write_seq = subflow_req->idsn + 1; |
909 | + atomic64_set(&msk->snd_una, msk->write_seq); | |
909 | 910 | if (subflow_req->remote_key_valid) { |
910 | 911 | msk->can_ack = true; |
911 | 912 | msk->remote_key = subflow_req->remote_key; |
... | ... | @@ -1107,6 +1108,7 @@ |
1107 | 1108 | WRITE_ONCE(msk->write_seq, subflow->idsn + 1); |
1108 | 1109 | WRITE_ONCE(msk->ack_seq, ack_seq); |
1109 | 1110 | WRITE_ONCE(msk->can_ack, 1); |
1111 | + atomic64_set(&msk->snd_una, msk->write_seq); | |
1110 | 1112 | |
1111 | 1113 | mptcp_pm_new_connection(msk, 0); |
1112 | 1114 | } |