Commit 13e9b9972fa0f34059e737ae215a26e43966b46f
Committed by
David S. Miller
1 parent
3fd0202a0d
tipc: make tipc_buf_append() more robust
As per comment from David Miller, we try to make the buffer reassembly function more resilient to user errors than it is today. - We check that the "*buf" parameter always is set, since this is mandatory input. - We ensure that *buf->next always is set to NULL before linking in the buffer, instead of relying of the caller to have done this. - We ensure that the "tail" pointer in the head buffer's control block is initialized to NULL when the first fragment arrives. Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 1 changed file with 21 additions and 8 deletions Inline Diff
net/tipc/msg.c
1 | /* | 1 | /* |
2 | * net/tipc/msg.c: TIPC message header routines | 2 | * net/tipc/msg.c: TIPC message header routines |
3 | * | 3 | * |
4 | * Copyright (c) 2000-2006, 2014, Ericsson AB | 4 | * Copyright (c) 2000-2006, 2014, Ericsson AB |
5 | * Copyright (c) 2005, 2010-2011, Wind River Systems | 5 | * Copyright (c) 2005, 2010-2011, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions are met: | 9 | * modification, are permitted provided that the following conditions are met: |
10 | * | 10 | * |
11 | * 1. Redistributions of source code must retain the above copyright | 11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. | 12 | * notice, this list of conditions and the following disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright | 13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the | 14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. | 15 | * documentation and/or other materials provided with the distribution. |
16 | * 3. Neither the names of the copyright holders nor the names of its | 16 | * 3. Neither the names of the copyright holders nor the names of its |
17 | * contributors may be used to endorse or promote products derived from | 17 | * contributors may be used to endorse or promote products derived from |
18 | * this software without specific prior written permission. | 18 | * this software without specific prior written permission. |
19 | * | 19 | * |
20 | * Alternatively, this software may be distributed under the terms of the | 20 | * Alternatively, this software may be distributed under the terms of the |
21 | * GNU General Public License ("GPL") version 2 as published by the Free | 21 | * GNU General Public License ("GPL") version 2 as published by the Free |
22 | * Software Foundation. | 22 | * Software Foundation. |
23 | * | 23 | * |
24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | 24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | 25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | 26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | 27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
28 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 28 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
34 | * POSSIBILITY OF SUCH DAMAGE. | 34 | * POSSIBILITY OF SUCH DAMAGE. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include "core.h" | 37 | #include "core.h" |
38 | #include "msg.h" | 38 | #include "msg.h" |
39 | #include "addr.h" | 39 | #include "addr.h" |
40 | #include "name_table.h" | 40 | #include "name_table.h" |
41 | 41 | ||
42 | #define MAX_FORWARD_SIZE 1024 | 42 | #define MAX_FORWARD_SIZE 1024 |
43 | 43 | ||
44 | static unsigned int align(unsigned int i) | 44 | static unsigned int align(unsigned int i) |
45 | { | 45 | { |
46 | return (i + 3) & ~3u; | 46 | return (i + 3) & ~3u; |
47 | } | 47 | } |
48 | 48 | ||
49 | void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, | 49 | void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, |
50 | u32 destnode) | 50 | u32 destnode) |
51 | { | 51 | { |
52 | memset(m, 0, hsize); | 52 | memset(m, 0, hsize); |
53 | msg_set_version(m); | 53 | msg_set_version(m); |
54 | msg_set_user(m, user); | 54 | msg_set_user(m, user); |
55 | msg_set_hdr_sz(m, hsize); | 55 | msg_set_hdr_sz(m, hsize); |
56 | msg_set_size(m, hsize); | 56 | msg_set_size(m, hsize); |
57 | msg_set_prevnode(m, tipc_own_addr); | 57 | msg_set_prevnode(m, tipc_own_addr); |
58 | msg_set_type(m, type); | 58 | msg_set_type(m, type); |
59 | msg_set_orignode(m, tipc_own_addr); | 59 | msg_set_orignode(m, tipc_own_addr); |
60 | msg_set_destnode(m, destnode); | 60 | msg_set_destnode(m, destnode); |
61 | } | 61 | } |
62 | 62 | ||
63 | /* tipc_buf_append(): Append a buffer to the fragment list of another buffer | 63 | /* tipc_buf_append(): Append a buffer to the fragment list of another buffer |
64 | * @*headbuf: in: NULL for first frag, otherwise value returned from prev call | 64 | * @*headbuf: in: NULL for first frag, otherwise value returned from prev call |
65 | * out: set when successful non-complete reassembly, otherwise NULL | 65 | * out: set when successful non-complete reassembly, otherwise NULL |
66 | * @*buf: in: the buffer to append. Always defined | 66 | * @*buf: in: the buffer to append. Always defined |
67 | * out: head buf after sucessful complete reassembly, otherwise NULL | 67 | * out: head buf after sucessful complete reassembly, otherwise NULL |
68 | * Returns 1 when reassembly complete, otherwise 0 | 68 | * Returns 1 when reassembly complete, otherwise 0 |
69 | */ | 69 | */ |
70 | int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) | 70 | int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) |
71 | { | 71 | { |
72 | struct sk_buff *head = *headbuf; | 72 | struct sk_buff *head = *headbuf; |
73 | struct sk_buff *frag = *buf; | 73 | struct sk_buff *frag = *buf; |
74 | struct sk_buff *tail; | 74 | struct sk_buff *tail; |
75 | struct tipc_msg *msg = buf_msg(frag); | 75 | struct tipc_msg *msg; |
76 | u32 fragid = msg_type(msg); | 76 | u32 fragid; |
77 | bool headstolen; | ||
78 | int delta; | 77 | int delta; |
78 | bool headstolen; | ||
79 | 79 | ||
80 | if (!frag) | ||
81 | goto err; | ||
82 | |||
83 | msg = buf_msg(frag); | ||
84 | fragid = msg_type(msg); | ||
85 | frag->next = NULL; | ||
80 | skb_pull(frag, msg_hdr_sz(msg)); | 86 | skb_pull(frag, msg_hdr_sz(msg)); |
81 | 87 | ||
82 | if (fragid == FIRST_FRAGMENT) { | 88 | if (fragid == FIRST_FRAGMENT) { |
83 | if (head || skb_unclone(frag, GFP_ATOMIC)) | 89 | if (unlikely(head)) |
84 | goto out_free; | 90 | goto err; |
91 | if (unlikely(skb_unclone(frag, GFP_ATOMIC))) | ||
92 | goto err; | ||
85 | head = *headbuf = frag; | 93 | head = *headbuf = frag; |
86 | skb_frag_list_init(head); | 94 | skb_frag_list_init(head); |
95 | TIPC_SKB_CB(head)->tail = NULL; | ||
87 | *buf = NULL; | 96 | *buf = NULL; |
88 | return 0; | 97 | return 0; |
89 | } | 98 | } |
99 | |||
90 | if (!head) | 100 | if (!head) |
91 | goto out_free; | 101 | goto err; |
92 | tail = TIPC_SKB_CB(head)->tail; | 102 | |
93 | if (skb_try_coalesce(head, frag, &headstolen, &delta)) { | 103 | if (skb_try_coalesce(head, frag, &headstolen, &delta)) { |
94 | kfree_skb_partial(frag, headstolen); | 104 | kfree_skb_partial(frag, headstolen); |
95 | } else { | 105 | } else { |
106 | tail = TIPC_SKB_CB(head)->tail; | ||
96 | if (!skb_has_frag_list(head)) | 107 | if (!skb_has_frag_list(head)) |
97 | skb_shinfo(head)->frag_list = frag; | 108 | skb_shinfo(head)->frag_list = frag; |
98 | else | 109 | else |
99 | tail->next = frag; | 110 | tail->next = frag; |
100 | head->truesize += frag->truesize; | 111 | head->truesize += frag->truesize; |
101 | head->data_len += frag->len; | 112 | head->data_len += frag->len; |
102 | head->len += frag->len; | 113 | head->len += frag->len; |
103 | TIPC_SKB_CB(head)->tail = frag; | 114 | TIPC_SKB_CB(head)->tail = frag; |
104 | } | 115 | } |
116 | |||
105 | if (fragid == LAST_FRAGMENT) { | 117 | if (fragid == LAST_FRAGMENT) { |
106 | *buf = head; | 118 | *buf = head; |
107 | TIPC_SKB_CB(head)->tail = NULL; | 119 | TIPC_SKB_CB(head)->tail = NULL; |
108 | *headbuf = NULL; | 120 | *headbuf = NULL; |
109 | return 1; | 121 | return 1; |
110 | } | 122 | } |
111 | *buf = NULL; | 123 | *buf = NULL; |
112 | return 0; | 124 | return 0; |
113 | out_free: | 125 | |
126 | err: | ||
114 | pr_warn_ratelimited("Unable to build fragment list\n"); | 127 | pr_warn_ratelimited("Unable to build fragment list\n"); |
115 | kfree_skb(*buf); | 128 | kfree_skb(*buf); |
116 | kfree_skb(*headbuf); | 129 | kfree_skb(*headbuf); |
117 | *buf = *headbuf = NULL; | 130 | *buf = *headbuf = NULL; |
118 | return 0; | 131 | return 0; |
119 | } | 132 | } |
120 | 133 | ||
121 | 134 | ||
122 | /** | 135 | /** |
123 | * tipc_msg_build - create buffer chain containing specified header and data | 136 | * tipc_msg_build - create buffer chain containing specified header and data |
124 | * @mhdr: Message header, to be prepended to data | 137 | * @mhdr: Message header, to be prepended to data |
125 | * @iov: User data | 138 | * @iov: User data |
126 | * @offset: Posision in iov to start copying from | 139 | * @offset: Posision in iov to start copying from |
127 | * @dsz: Total length of user data | 140 | * @dsz: Total length of user data |
128 | * @pktmax: Max packet size that can be used | 141 | * @pktmax: Max packet size that can be used |
129 | * @chain: Buffer or chain of buffers to be returned to caller | 142 | * @chain: Buffer or chain of buffers to be returned to caller |
130 | * Returns message data size or errno: -ENOMEM, -EFAULT | 143 | * Returns message data size or errno: -ENOMEM, -EFAULT |
131 | */ | 144 | */ |
132 | int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, | 145 | int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, |
133 | int offset, int dsz, int pktmax , struct sk_buff **chain) | 146 | int offset, int dsz, int pktmax , struct sk_buff **chain) |
134 | { | 147 | { |
135 | int mhsz = msg_hdr_sz(mhdr); | 148 | int mhsz = msg_hdr_sz(mhdr); |
136 | int msz = mhsz + dsz; | 149 | int msz = mhsz + dsz; |
137 | int pktno = 1; | 150 | int pktno = 1; |
138 | int pktsz; | 151 | int pktsz; |
139 | int pktrem = pktmax; | 152 | int pktrem = pktmax; |
140 | int drem = dsz; | 153 | int drem = dsz; |
141 | struct tipc_msg pkthdr; | 154 | struct tipc_msg pkthdr; |
142 | struct sk_buff *buf, *prev; | 155 | struct sk_buff *buf, *prev; |
143 | char *pktpos; | 156 | char *pktpos; |
144 | int rc; | 157 | int rc; |
145 | 158 | ||
146 | msg_set_size(mhdr, msz); | 159 | msg_set_size(mhdr, msz); |
147 | 160 | ||
148 | /* No fragmentation needed? */ | 161 | /* No fragmentation needed? */ |
149 | if (likely(msz <= pktmax)) { | 162 | if (likely(msz <= pktmax)) { |
150 | buf = tipc_buf_acquire(msz); | 163 | buf = tipc_buf_acquire(msz); |
151 | *chain = buf; | 164 | *chain = buf; |
152 | if (unlikely(!buf)) | 165 | if (unlikely(!buf)) |
153 | return -ENOMEM; | 166 | return -ENOMEM; |
154 | skb_copy_to_linear_data(buf, mhdr, mhsz); | 167 | skb_copy_to_linear_data(buf, mhdr, mhsz); |
155 | pktpos = buf->data + mhsz; | 168 | pktpos = buf->data + mhsz; |
156 | if (!dsz || !memcpy_fromiovecend(pktpos, iov, offset, dsz)) | 169 | if (!dsz || !memcpy_fromiovecend(pktpos, iov, offset, dsz)) |
157 | return dsz; | 170 | return dsz; |
158 | rc = -EFAULT; | 171 | rc = -EFAULT; |
159 | goto error; | 172 | goto error; |
160 | } | 173 | } |
161 | 174 | ||
162 | /* Prepare reusable fragment header */ | 175 | /* Prepare reusable fragment header */ |
163 | tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT, | 176 | tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT, |
164 | INT_H_SIZE, msg_destnode(mhdr)); | 177 | INT_H_SIZE, msg_destnode(mhdr)); |
165 | msg_set_size(&pkthdr, pktmax); | 178 | msg_set_size(&pkthdr, pktmax); |
166 | msg_set_fragm_no(&pkthdr, pktno); | 179 | msg_set_fragm_no(&pkthdr, pktno); |
167 | 180 | ||
168 | /* Prepare first fragment */ | 181 | /* Prepare first fragment */ |
169 | *chain = buf = tipc_buf_acquire(pktmax); | 182 | *chain = buf = tipc_buf_acquire(pktmax); |
170 | if (!buf) | 183 | if (!buf) |
171 | return -ENOMEM; | 184 | return -ENOMEM; |
172 | pktpos = buf->data; | 185 | pktpos = buf->data; |
173 | skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); | 186 | skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); |
174 | pktpos += INT_H_SIZE; | 187 | pktpos += INT_H_SIZE; |
175 | pktrem -= INT_H_SIZE; | 188 | pktrem -= INT_H_SIZE; |
176 | skb_copy_to_linear_data_offset(buf, INT_H_SIZE, mhdr, mhsz); | 189 | skb_copy_to_linear_data_offset(buf, INT_H_SIZE, mhdr, mhsz); |
177 | pktpos += mhsz; | 190 | pktpos += mhsz; |
178 | pktrem -= mhsz; | 191 | pktrem -= mhsz; |
179 | 192 | ||
180 | do { | 193 | do { |
181 | if (drem < pktrem) | 194 | if (drem < pktrem) |
182 | pktrem = drem; | 195 | pktrem = drem; |
183 | 196 | ||
184 | if (memcpy_fromiovecend(pktpos, iov, offset, pktrem)) { | 197 | if (memcpy_fromiovecend(pktpos, iov, offset, pktrem)) { |
185 | rc = -EFAULT; | 198 | rc = -EFAULT; |
186 | goto error; | 199 | goto error; |
187 | } | 200 | } |
188 | drem -= pktrem; | 201 | drem -= pktrem; |
189 | offset += pktrem; | 202 | offset += pktrem; |
190 | 203 | ||
191 | if (!drem) | 204 | if (!drem) |
192 | break; | 205 | break; |
193 | 206 | ||
194 | /* Prepare new fragment: */ | 207 | /* Prepare new fragment: */ |
195 | if (drem < (pktmax - INT_H_SIZE)) | 208 | if (drem < (pktmax - INT_H_SIZE)) |
196 | pktsz = drem + INT_H_SIZE; | 209 | pktsz = drem + INT_H_SIZE; |
197 | else | 210 | else |
198 | pktsz = pktmax; | 211 | pktsz = pktmax; |
199 | prev = buf; | 212 | prev = buf; |
200 | buf = tipc_buf_acquire(pktsz); | 213 | buf = tipc_buf_acquire(pktsz); |
201 | if (!buf) { | 214 | if (!buf) { |
202 | rc = -ENOMEM; | 215 | rc = -ENOMEM; |
203 | goto error; | 216 | goto error; |
204 | } | 217 | } |
205 | prev->next = buf; | 218 | prev->next = buf; |
206 | msg_set_type(&pkthdr, FRAGMENT); | 219 | msg_set_type(&pkthdr, FRAGMENT); |
207 | msg_set_size(&pkthdr, pktsz); | 220 | msg_set_size(&pkthdr, pktsz); |
208 | msg_set_fragm_no(&pkthdr, ++pktno); | 221 | msg_set_fragm_no(&pkthdr, ++pktno); |
209 | skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); | 222 | skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); |
210 | pktpos = buf->data + INT_H_SIZE; | 223 | pktpos = buf->data + INT_H_SIZE; |
211 | pktrem = pktsz - INT_H_SIZE; | 224 | pktrem = pktsz - INT_H_SIZE; |
212 | 225 | ||
213 | } while (1); | 226 | } while (1); |
214 | 227 | ||
215 | msg_set_type(buf_msg(buf), LAST_FRAGMENT); | 228 | msg_set_type(buf_msg(buf), LAST_FRAGMENT); |
216 | return dsz; | 229 | return dsz; |
217 | error: | 230 | error: |
218 | kfree_skb_list(*chain); | 231 | kfree_skb_list(*chain); |
219 | *chain = NULL; | 232 | *chain = NULL; |
220 | return rc; | 233 | return rc; |
221 | } | 234 | } |
222 | 235 | ||
223 | /** | 236 | /** |
224 | * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one | 237 | * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one |
225 | * @bbuf: the existing buffer ("bundle") | 238 | * @bbuf: the existing buffer ("bundle") |
226 | * @buf: buffer to be appended | 239 | * @buf: buffer to be appended |
227 | * @mtu: max allowable size for the bundle buffer | 240 | * @mtu: max allowable size for the bundle buffer |
228 | * Consumes buffer if successful | 241 | * Consumes buffer if successful |
229 | * Returns true if bundling could be performed, otherwise false | 242 | * Returns true if bundling could be performed, otherwise false |
230 | */ | 243 | */ |
231 | bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu) | 244 | bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu) |
232 | { | 245 | { |
233 | struct tipc_msg *bmsg = buf_msg(bbuf); | 246 | struct tipc_msg *bmsg = buf_msg(bbuf); |
234 | struct tipc_msg *msg = buf_msg(buf); | 247 | struct tipc_msg *msg = buf_msg(buf); |
235 | unsigned int bsz = msg_size(bmsg); | 248 | unsigned int bsz = msg_size(bmsg); |
236 | unsigned int msz = msg_size(msg); | 249 | unsigned int msz = msg_size(msg); |
237 | u32 start = align(bsz); | 250 | u32 start = align(bsz); |
238 | u32 max = mtu - INT_H_SIZE; | 251 | u32 max = mtu - INT_H_SIZE; |
239 | u32 pad = start - bsz; | 252 | u32 pad = start - bsz; |
240 | 253 | ||
241 | if (likely(msg_user(msg) == MSG_FRAGMENTER)) | 254 | if (likely(msg_user(msg) == MSG_FRAGMENTER)) |
242 | return false; | 255 | return false; |
243 | if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) | 256 | if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) |
244 | return false; | 257 | return false; |
245 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) | 258 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) |
246 | return false; | 259 | return false; |
247 | if (likely(msg_user(bmsg) != MSG_BUNDLER)) | 260 | if (likely(msg_user(bmsg) != MSG_BUNDLER)) |
248 | return false; | 261 | return false; |
249 | if (likely(msg_type(bmsg) != BUNDLE_OPEN)) | 262 | if (likely(msg_type(bmsg) != BUNDLE_OPEN)) |
250 | return false; | 263 | return false; |
251 | if (unlikely(skb_tailroom(bbuf) < (pad + msz))) | 264 | if (unlikely(skb_tailroom(bbuf) < (pad + msz))) |
252 | return false; | 265 | return false; |
253 | if (unlikely(max < (start + msz))) | 266 | if (unlikely(max < (start + msz))) |
254 | return false; | 267 | return false; |
255 | 268 | ||
256 | skb_put(bbuf, pad + msz); | 269 | skb_put(bbuf, pad + msz); |
257 | skb_copy_to_linear_data_offset(bbuf, start, buf->data, msz); | 270 | skb_copy_to_linear_data_offset(bbuf, start, buf->data, msz); |
258 | msg_set_size(bmsg, start + msz); | 271 | msg_set_size(bmsg, start + msz); |
259 | msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); | 272 | msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); |
260 | bbuf->next = buf->next; | 273 | bbuf->next = buf->next; |
261 | kfree_skb(buf); | 274 | kfree_skb(buf); |
262 | return true; | 275 | return true; |
263 | } | 276 | } |
264 | 277 | ||
265 | /** | 278 | /** |
266 | * tipc_msg_make_bundle(): Create bundle buf and append message to its tail | 279 | * tipc_msg_make_bundle(): Create bundle buf and append message to its tail |
267 | * @buf: buffer to be appended and replaced | 280 | * @buf: buffer to be appended and replaced |
268 | * @mtu: max allowable size for the bundle buffer, inclusive header | 281 | * @mtu: max allowable size for the bundle buffer, inclusive header |
269 | * @dnode: destination node for message. (Not always present in header) | 282 | * @dnode: destination node for message. (Not always present in header) |
270 | * Replaces buffer if successful | 283 | * Replaces buffer if successful |
271 | * Returns true if sucess, otherwise false | 284 | * Returns true if sucess, otherwise false |
272 | */ | 285 | */ |
273 | bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode) | 286 | bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode) |
274 | { | 287 | { |
275 | struct sk_buff *bbuf; | 288 | struct sk_buff *bbuf; |
276 | struct tipc_msg *bmsg; | 289 | struct tipc_msg *bmsg; |
277 | struct tipc_msg *msg = buf_msg(*buf); | 290 | struct tipc_msg *msg = buf_msg(*buf); |
278 | u32 msz = msg_size(msg); | 291 | u32 msz = msg_size(msg); |
279 | u32 max = mtu - INT_H_SIZE; | 292 | u32 max = mtu - INT_H_SIZE; |
280 | 293 | ||
281 | if (msg_user(msg) == MSG_FRAGMENTER) | 294 | if (msg_user(msg) == MSG_FRAGMENTER) |
282 | return false; | 295 | return false; |
283 | if (msg_user(msg) == CHANGEOVER_PROTOCOL) | 296 | if (msg_user(msg) == CHANGEOVER_PROTOCOL) |
284 | return false; | 297 | return false; |
285 | if (msg_user(msg) == BCAST_PROTOCOL) | 298 | if (msg_user(msg) == BCAST_PROTOCOL) |
286 | return false; | 299 | return false; |
287 | if (msz > (max / 2)) | 300 | if (msz > (max / 2)) |
288 | return false; | 301 | return false; |
289 | 302 | ||
290 | bbuf = tipc_buf_acquire(max); | 303 | bbuf = tipc_buf_acquire(max); |
291 | if (!bbuf) | 304 | if (!bbuf) |
292 | return false; | 305 | return false; |
293 | 306 | ||
294 | skb_trim(bbuf, INT_H_SIZE); | 307 | skb_trim(bbuf, INT_H_SIZE); |
295 | bmsg = buf_msg(bbuf); | 308 | bmsg = buf_msg(bbuf); |
296 | tipc_msg_init(bmsg, MSG_BUNDLER, BUNDLE_OPEN, INT_H_SIZE, dnode); | 309 | tipc_msg_init(bmsg, MSG_BUNDLER, BUNDLE_OPEN, INT_H_SIZE, dnode); |
297 | msg_set_seqno(bmsg, msg_seqno(msg)); | 310 | msg_set_seqno(bmsg, msg_seqno(msg)); |
298 | msg_set_ack(bmsg, msg_ack(msg)); | 311 | msg_set_ack(bmsg, msg_ack(msg)); |
299 | msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); | 312 | msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); |
300 | bbuf->next = (*buf)->next; | 313 | bbuf->next = (*buf)->next; |
301 | tipc_msg_bundle(bbuf, *buf, mtu); | 314 | tipc_msg_bundle(bbuf, *buf, mtu); |
302 | *buf = bbuf; | 315 | *buf = bbuf; |
303 | return true; | 316 | return true; |
304 | } | 317 | } |
305 | 318 | ||
306 | /** | 319 | /** |
307 | * tipc_msg_reverse(): swap source and destination addresses and add error code | 320 | * tipc_msg_reverse(): swap source and destination addresses and add error code |
308 | * @buf: buffer containing message to be reversed | 321 | * @buf: buffer containing message to be reversed |
309 | * @dnode: return value: node where to send message after reversal | 322 | * @dnode: return value: node where to send message after reversal |
310 | * @err: error code to be set in message | 323 | * @err: error code to be set in message |
311 | * Consumes buffer if failure | 324 | * Consumes buffer if failure |
312 | * Returns true if success, otherwise false | 325 | * Returns true if success, otherwise false |
313 | */ | 326 | */ |
314 | bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err) | 327 | bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err) |
315 | { | 328 | { |
316 | struct tipc_msg *msg = buf_msg(buf); | 329 | struct tipc_msg *msg = buf_msg(buf); |
317 | uint imp = msg_importance(msg); | 330 | uint imp = msg_importance(msg); |
318 | struct tipc_msg ohdr; | 331 | struct tipc_msg ohdr; |
319 | uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE); | 332 | uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE); |
320 | 333 | ||
321 | if (skb_linearize(buf)) | 334 | if (skb_linearize(buf)) |
322 | goto exit; | 335 | goto exit; |
323 | if (msg_dest_droppable(msg)) | 336 | if (msg_dest_droppable(msg)) |
324 | goto exit; | 337 | goto exit; |
325 | if (msg_errcode(msg)) | 338 | if (msg_errcode(msg)) |
326 | goto exit; | 339 | goto exit; |
327 | 340 | ||
328 | memcpy(&ohdr, msg, msg_hdr_sz(msg)); | 341 | memcpy(&ohdr, msg, msg_hdr_sz(msg)); |
329 | imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE); | 342 | imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE); |
330 | if (msg_isdata(msg)) | 343 | if (msg_isdata(msg)) |
331 | msg_set_importance(msg, imp); | 344 | msg_set_importance(msg, imp); |
332 | msg_set_errcode(msg, err); | 345 | msg_set_errcode(msg, err); |
333 | msg_set_origport(msg, msg_destport(&ohdr)); | 346 | msg_set_origport(msg, msg_destport(&ohdr)); |
334 | msg_set_destport(msg, msg_origport(&ohdr)); | 347 | msg_set_destport(msg, msg_origport(&ohdr)); |
335 | msg_set_prevnode(msg, tipc_own_addr); | 348 | msg_set_prevnode(msg, tipc_own_addr); |
336 | if (!msg_short(msg)) { | 349 | if (!msg_short(msg)) { |
337 | msg_set_orignode(msg, msg_destnode(&ohdr)); | 350 | msg_set_orignode(msg, msg_destnode(&ohdr)); |
338 | msg_set_destnode(msg, msg_orignode(&ohdr)); | 351 | msg_set_destnode(msg, msg_orignode(&ohdr)); |
339 | } | 352 | } |
340 | msg_set_size(msg, msg_hdr_sz(msg) + rdsz); | 353 | msg_set_size(msg, msg_hdr_sz(msg) + rdsz); |
341 | skb_trim(buf, msg_size(msg)); | 354 | skb_trim(buf, msg_size(msg)); |
342 | skb_orphan(buf); | 355 | skb_orphan(buf); |
343 | *dnode = msg_orignode(&ohdr); | 356 | *dnode = msg_orignode(&ohdr); |
344 | return true; | 357 | return true; |
345 | exit: | 358 | exit: |
346 | kfree_skb(buf); | 359 | kfree_skb(buf); |
347 | return false; | 360 | return false; |
348 | } | 361 | } |
349 | 362 | ||
350 | /** | 363 | /** |
351 | * tipc_msg_eval: determine fate of message that found no destination | 364 | * tipc_msg_eval: determine fate of message that found no destination |
352 | * @buf: the buffer containing the message. | 365 | * @buf: the buffer containing the message. |
353 | * @dnode: return value: next-hop node, if message to be forwarded | 366 | * @dnode: return value: next-hop node, if message to be forwarded |
354 | * @err: error code to use, if message to be rejected | 367 | * @err: error code to use, if message to be rejected |
355 | * | 368 | * |
356 | * Does not consume buffer | 369 | * Does not consume buffer |
357 | * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error | 370 | * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error |
358 | * code if message to be rejected | 371 | * code if message to be rejected |
359 | */ | 372 | */ |
360 | int tipc_msg_eval(struct sk_buff *buf, u32 *dnode) | 373 | int tipc_msg_eval(struct sk_buff *buf, u32 *dnode) |
361 | { | 374 | { |
362 | struct tipc_msg *msg = buf_msg(buf); | 375 | struct tipc_msg *msg = buf_msg(buf); |
363 | u32 dport; | 376 | u32 dport; |
364 | 377 | ||
365 | if (msg_type(msg) != TIPC_NAMED_MSG) | 378 | if (msg_type(msg) != TIPC_NAMED_MSG) |
366 | return -TIPC_ERR_NO_PORT; | 379 | return -TIPC_ERR_NO_PORT; |
367 | if (skb_linearize(buf)) | 380 | if (skb_linearize(buf)) |
368 | return -TIPC_ERR_NO_NAME; | 381 | return -TIPC_ERR_NO_NAME; |
369 | if (msg_data_sz(msg) > MAX_FORWARD_SIZE) | 382 | if (msg_data_sz(msg) > MAX_FORWARD_SIZE) |
370 | return -TIPC_ERR_NO_NAME; | 383 | return -TIPC_ERR_NO_NAME; |
371 | if (msg_reroute_cnt(msg) > 0) | 384 | if (msg_reroute_cnt(msg) > 0) |
372 | return -TIPC_ERR_NO_NAME; | 385 | return -TIPC_ERR_NO_NAME; |
373 | 386 | ||
374 | *dnode = addr_domain(msg_lookup_scope(msg)); | 387 | *dnode = addr_domain(msg_lookup_scope(msg)); |
375 | dport = tipc_nametbl_translate(msg_nametype(msg), | 388 | dport = tipc_nametbl_translate(msg_nametype(msg), |
376 | msg_nameinst(msg), | 389 | msg_nameinst(msg), |
377 | dnode); | 390 | dnode); |
378 | if (!dport) | 391 | if (!dport) |
379 | return -TIPC_ERR_NO_NAME; | 392 | return -TIPC_ERR_NO_NAME; |
380 | msg_incr_reroute_cnt(msg); | 393 | msg_incr_reroute_cnt(msg); |
381 | msg_set_destnode(msg, *dnode); | 394 | msg_set_destnode(msg, *dnode); |
382 | msg_set_destport(msg, dport); | 395 | msg_set_destport(msg, dport); |
383 | return TIPC_OK; | 396 | return TIPC_OK; |
384 | } | 397 | } |
385 | 398 | ||
386 | /* tipc_msg_reassemble() - clone a buffer chain of fragments and | 399 | /* tipc_msg_reassemble() - clone a buffer chain of fragments and |
387 | * reassemble the clones into one message | 400 | * reassemble the clones into one message |
388 | */ | 401 | */ |
389 | struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain) | 402 | struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain) |
390 | { | 403 | { |
391 | struct sk_buff *buf = chain; | 404 | struct sk_buff *buf = chain; |
392 | struct sk_buff *frag = buf; | 405 | struct sk_buff *frag = buf; |
393 | struct sk_buff *head = NULL; | 406 | struct sk_buff *head = NULL; |
394 | int hdr_sz; | 407 | int hdr_sz; |
395 | 408 | ||
396 | /* Copy header if single buffer */ | 409 | /* Copy header if single buffer */ |
397 | if (!buf->next) { | 410 | if (!buf->next) { |
398 | hdr_sz = skb_headroom(buf) + msg_hdr_sz(buf_msg(buf)); | 411 | hdr_sz = skb_headroom(buf) + msg_hdr_sz(buf_msg(buf)); |
399 | return __pskb_copy(buf, hdr_sz, GFP_ATOMIC); | 412 | return __pskb_copy(buf, hdr_sz, GFP_ATOMIC); |
400 | } | 413 | } |
401 | 414 | ||
402 | /* Clone all fragments and reassemble */ | 415 | /* Clone all fragments and reassemble */ |
403 | while (buf) { | 416 | while (buf) { |
404 | frag = skb_clone(buf, GFP_ATOMIC); | 417 | frag = skb_clone(buf, GFP_ATOMIC); |
405 | if (!frag) | 418 | if (!frag) |
406 | goto error; | 419 | goto error; |
407 | frag->next = NULL; | 420 | frag->next = NULL; |
408 | if (tipc_buf_append(&head, &frag)) | 421 | if (tipc_buf_append(&head, &frag)) |
409 | break; | 422 | break; |
410 | if (!head) | 423 | if (!head) |
411 | goto error; | 424 | goto error; |
412 | buf = buf->next; | 425 | buf = buf->next; |
413 | } | 426 | } |
414 | return frag; | 427 | return frag; |
415 | error: | 428 | error: |
416 | pr_warn("Failed do clone local mcast rcv buffer\n"); | 429 | pr_warn("Failed do clone local mcast rcv buffer\n"); |
417 | kfree_skb(head); | 430 | kfree_skb(head); |
418 | return NULL; | 431 | return NULL; |
419 | } | 432 | } |