Commit f6b9664f8b711cf4fd53e70aa0d21f72d5bf806c
Committed by
David S. Miller
1 parent
1c32c5ad6f
Exists in
master
and in
39 other branches
udp: Switch to ip_finish_skb
This patch converts UDP to use the new ip_finish_skb API. This would then allows us to more easily use ip_make_skb which allows UDP to run without a socket lock. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 3 changed files with 73 additions and 33 deletions Side-by-side Diff
include/net/udp.h
... | ... | @@ -144,6 +144,17 @@ |
144 | 144 | return csum; |
145 | 145 | } |
146 | 146 | |
147 | +static inline __wsum udp_csum(struct sk_buff *skb) | |
148 | +{ | |
149 | + __wsum csum = csum_partial(skb_transport_header(skb), | |
150 | + sizeof(struct udphdr), skb->csum); | |
151 | + | |
152 | + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { | |
153 | + csum = csum_add(csum, skb->csum); | |
154 | + } | |
155 | + return csum; | |
156 | +} | |
157 | + | |
147 | 158 | /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ |
148 | 159 | static inline void udp_lib_hash(struct sock *sk) |
149 | 160 | { |
include/net/udplite.h
... | ... | @@ -115,6 +115,18 @@ |
115 | 115 | return csum; |
116 | 116 | } |
117 | 117 | |
118 | +static inline __wsum udplite_csum(struct sk_buff *skb) | |
119 | +{ | |
120 | + struct sock *sk = skb->sk; | |
121 | + int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb)); | |
122 | + const int off = skb_transport_offset(skb); | |
123 | + const int len = skb->len - off; | |
124 | + | |
125 | + skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ | |
126 | + | |
127 | + return skb_checksum(skb, off, min(cscov, len), 0); | |
128 | +} | |
129 | + | |
118 | 130 | extern void udplite4_register(void); |
119 | 131 | extern int udplite_get_port(struct sock *sk, unsigned short snum, |
120 | 132 | int (*scmp)(const struct sock *, const struct sock *)); |
net/ipv4/udp.c
... | ... | @@ -663,75 +663,72 @@ |
663 | 663 | EXPORT_SYMBOL(udp_flush_pending_frames); |
664 | 664 | |
665 | 665 | /** |
666 | - * udp4_hwcsum_outgoing - handle outgoing HW checksumming | |
667 | - * @sk: socket we are sending on | |
666 | + * udp4_hwcsum - handle outgoing HW checksumming | |
668 | 667 | * @skb: sk_buff containing the filled-in UDP header |
669 | 668 | * (checksum field must be zeroed out) |
669 | + * @src: source IP address | |
670 | + * @dst: destination IP address | |
670 | 671 | */ |
671 | -static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, | |
672 | - __be32 src, __be32 dst, int len) | |
672 | +static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) | |
673 | 673 | { |
674 | - unsigned int offset; | |
675 | 674 | struct udphdr *uh = udp_hdr(skb); |
675 | + struct sk_buff *frags = skb_shinfo(skb)->frag_list; | |
676 | + int offset = skb_transport_offset(skb); | |
677 | + int len = skb->len - offset; | |
678 | + int hlen = len; | |
676 | 679 | __wsum csum = 0; |
677 | 680 | |
678 | - if (skb_queue_len(&sk->sk_write_queue) == 1) { | |
681 | + if (!frags) { | |
679 | 682 | /* |
680 | 683 | * Only one fragment on the socket. |
681 | 684 | */ |
682 | 685 | skb->csum_start = skb_transport_header(skb) - skb->head; |
683 | 686 | skb->csum_offset = offsetof(struct udphdr, check); |
684 | - uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); | |
687 | + uh->check = ~csum_tcpudp_magic(src, dst, len, | |
688 | + IPPROTO_UDP, 0); | |
685 | 689 | } else { |
686 | 690 | /* |
687 | 691 | * HW-checksum won't work as there are two or more |
688 | 692 | * fragments on the socket so that all csums of sk_buffs |
689 | 693 | * should be together |
690 | 694 | */ |
691 | - offset = skb_transport_offset(skb); | |
692 | - skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); | |
695 | + do { | |
696 | + csum = csum_add(csum, frags->csum); | |
697 | + hlen -= frags->len; | |
698 | + } while ((frags = frags->next)); | |
693 | 699 | |
700 | + csum = skb_checksum(skb, offset, hlen, csum); | |
694 | 701 | skb->ip_summed = CHECKSUM_NONE; |
695 | 702 | |
696 | - skb_queue_walk(&sk->sk_write_queue, skb) { | |
697 | - csum = csum_add(csum, skb->csum); | |
698 | - } | |
699 | - | |
700 | 703 | uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); |
701 | 704 | if (uh->check == 0) |
702 | 705 | uh->check = CSUM_MANGLED_0; |
703 | 706 | } |
704 | 707 | } |
705 | 708 | |
706 | -/* | |
707 | - * Push out all pending data as one UDP datagram. Socket is locked. | |
708 | - */ | |
709 | -static int udp_push_pending_frames(struct sock *sk) | |
709 | +static int udp_send_skb(struct sk_buff *skb, __be32 daddr, __be32 dport) | |
710 | 710 | { |
711 | - struct udp_sock *up = udp_sk(sk); | |
711 | + struct sock *sk = skb->sk; | |
712 | 712 | struct inet_sock *inet = inet_sk(sk); |
713 | - struct flowi *fl = &inet->cork.fl; | |
714 | - struct sk_buff *skb; | |
715 | 713 | struct udphdr *uh; |
714 | + struct rtable *rt = (struct rtable *)skb_dst(skb); | |
716 | 715 | int err = 0; |
717 | 716 | int is_udplite = IS_UDPLITE(sk); |
717 | + int offset = skb_transport_offset(skb); | |
718 | + int len = skb->len - offset; | |
718 | 719 | __wsum csum = 0; |
719 | 720 | |
720 | - /* Grab the skbuff where UDP header space exists. */ | |
721 | - if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) | |
722 | - goto out; | |
723 | - | |
724 | 721 | /* |
725 | 722 | * Create a UDP header |
726 | 723 | */ |
727 | 724 | uh = udp_hdr(skb); |
728 | - uh->source = fl->fl_ip_sport; | |
729 | - uh->dest = fl->fl_ip_dport; | |
730 | - uh->len = htons(up->len); | |
725 | + uh->source = inet->inet_sport; | |
726 | + uh->dest = dport; | |
727 | + uh->len = htons(len); | |
731 | 728 | uh->check = 0; |
732 | 729 | |
733 | 730 | if (is_udplite) /* UDP-Lite */ |
734 | - csum = udplite_csum_outgoing(sk, skb); | |
731 | + csum = udplite_csum(skb); | |
735 | 732 | |
736 | 733 | else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ |
737 | 734 | |
738 | 735 | |
739 | 736 | |
740 | 737 | |
... | ... | @@ -740,20 +737,20 @@ |
740 | 737 | |
741 | 738 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ |
742 | 739 | |
743 | - udp4_hwcsum_outgoing(sk, skb, fl->fl4_src, fl->fl4_dst, up->len); | |
740 | + udp4_hwcsum(skb, rt->rt_src, daddr); | |
744 | 741 | goto send; |
745 | 742 | |
746 | - } else /* `normal' UDP */ | |
747 | - csum = udp_csum_outgoing(sk, skb); | |
743 | + } else | |
744 | + csum = udp_csum(skb); | |
748 | 745 | |
749 | 746 | /* add protocol-dependent pseudo-header */ |
750 | - uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len, | |
747 | + uh->check = csum_tcpudp_magic(rt->rt_src, daddr, len, | |
751 | 748 | sk->sk_protocol, csum); |
752 | 749 | if (uh->check == 0) |
753 | 750 | uh->check = CSUM_MANGLED_0; |
754 | 751 | |
755 | 752 | send: |
756 | - err = ip_push_pending_frames(sk); | |
753 | + err = ip_send_skb(skb); | |
757 | 754 | if (err) { |
758 | 755 | if (err == -ENOBUFS && !inet->recverr) { |
759 | 756 | UDP_INC_STATS_USER(sock_net(sk), |
... | ... | @@ -763,6 +760,26 @@ |
763 | 760 | } else |
764 | 761 | UDP_INC_STATS_USER(sock_net(sk), |
765 | 762 | UDP_MIB_OUTDATAGRAMS, is_udplite); |
763 | + return err; | |
764 | +} | |
765 | + | |
766 | +/* | |
767 | + * Push out all pending data as one UDP datagram. Socket is locked. | |
768 | + */ | |
769 | +static int udp_push_pending_frames(struct sock *sk) | |
770 | +{ | |
771 | + struct udp_sock *up = udp_sk(sk); | |
772 | + struct inet_sock *inet = inet_sk(sk); | |
773 | + struct flowi *fl = &inet->cork.fl; | |
774 | + struct sk_buff *skb; | |
775 | + int err = 0; | |
776 | + | |
777 | + skb = ip_finish_skb(sk); | |
778 | + if (!skb) | |
779 | + goto out; | |
780 | + | |
781 | + err = udp_send_skb(skb, fl->fl4_dst, fl->fl_ip_dport); | |
782 | + | |
766 | 783 | out: |
767 | 784 | up->len = 0; |
768 | 785 | up->pending = 0; |