|
20 | 20 |
|
21 | 21 | #include "xfrm_inout.h"
|
22 | 22 |
|
| 23 | +/* IPTFS encap (header) values. */ |
| 24 | +#define IPTFS_SUBTYPE_BASIC 0 |
| 25 | +#define IPTFS_SUBTYPE_CC 1 |
| 26 | + |
23 | 27 | /* ------------------------------------------------ */
|
24 | 28 | /* IPTFS default SA values (tunnel ingress/dir-out) */
|
25 | 29 | /* ------------------------------------------------ */
|
@@ -185,6 +189,277 @@ static void iptfs_skb_head_to_frag(const struct sk_buff *skb, skb_frag_t *frag)
|
185 | 189 | skb_frag_fill_page_desc(frag, page, skb->data - addr, skb_headlen(skb));
|
186 | 190 | }
|
187 | 191 |
|
| 192 | +/* ================================== */ |
| 193 | +/* IPTFS Receiving (egress) Functions */ |
| 194 | +/* ================================== */ |
| 195 | + |
| 196 | +/** |
| 197 | + * iptfs_pskb_extract_seq() - Create and load data into a new sk_buff. |
| 198 | + * @skblen: the total data size for `skb`. |
| 199 | + * @st: The source for the rest of the data to copy into `skb`. |
| 200 | + * @off: The offset into @st to copy data from. |
| 201 | + * @len: The length of data to copy from @st into `skb`. This must be <= |
| 202 | + * @skblen. |
| 203 | + * |
| 204 | + * Create a new sk_buff `skb` with @skblen of packet data space. If non-zero, |
| 205 | + * copy @rlen bytes of @runt into `skb`. Then using seq functions copy @len |
| 206 | + * bytes from @st into `skb` starting from @off. |
| 207 | + * |
| 208 | + * It is an error for @len to be greater than the amount of data left in @st. |
| 209 | + * |
| 210 | + * Return: The newly allocated sk_buff `skb` or NULL if an error occurs. |
| 211 | + */ |
| 212 | +static struct sk_buff * |
| 213 | +iptfs_pskb_extract_seq(u32 skblen, struct skb_seq_state *st, u32 off, int len) |
| 214 | +{ |
| 215 | + struct sk_buff *skb = iptfs_alloc_skb(st->root_skb, skblen, false); |
| 216 | + |
| 217 | + if (!skb) |
| 218 | + return NULL; |
| 219 | + if (skb_copy_seq_read(st, off, skb_put(skb, len), len)) { |
| 220 | + XFRM_INC_STATS(dev_net(st->root_skb->dev), LINUX_MIB_XFRMINERROR); |
| 221 | + kfree_skb(skb); |
| 222 | + return NULL; |
| 223 | + } |
| 224 | + return skb; |
| 225 | +} |
| 226 | + |
| 227 | +/** |
| 228 | + * iptfs_complete_inner_skb() - finish preparing the inner packet for gro recv. |
| 229 | + * @x: xfrm state |
| 230 | + * @skb: the inner packet |
| 231 | + * |
| 232 | + * Finish the standard xfrm processing on the inner packet prior to sending back |
| 233 | + * through gro_cells_receive. We do this separately b/c we are building a list |
| 234 | + * of packets in the hopes that one day a list will be taken by |
| 235 | + * xfrm_input. |
| 236 | + */ |
| 237 | +static void iptfs_complete_inner_skb(struct xfrm_state *x, struct sk_buff *skb) |
| 238 | +{ |
| 239 | + skb_reset_network_header(skb); |
| 240 | + |
| 241 | + /* The packet is going back through gro_cells_receive no need to |
| 242 | + * set this. |
| 243 | + */ |
| 244 | + skb_reset_transport_header(skb); |
| 245 | + |
| 246 | + /* Packet already has checksum value set. */ |
| 247 | + skb->ip_summed = CHECKSUM_NONE; |
| 248 | + |
| 249 | + /* Our skb will contain the header data copied when this outer packet |
| 250 | + * which contained the start of this inner packet. This is true |
| 251 | + * when we allocate a new skb as well as when we reuse the existing skb. |
| 252 | + */ |
| 253 | + if (ip_hdr(skb)->version == 0x4) { |
| 254 | + struct iphdr *iph = ip_hdr(skb); |
| 255 | + |
| 256 | + if (x->props.flags & XFRM_STATE_DECAP_DSCP) |
| 257 | + ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, iph); |
| 258 | + if (!(x->props.flags & XFRM_STATE_NOECN)) |
| 259 | + if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) |
| 260 | + IP_ECN_set_ce(iph); |
| 261 | + |
| 262 | + skb->protocol = htons(ETH_P_IP); |
| 263 | + } else { |
| 264 | + struct ipv6hdr *iph = ipv6_hdr(skb); |
| 265 | + |
| 266 | + if (x->props.flags & XFRM_STATE_DECAP_DSCP) |
| 267 | + ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, iph); |
| 268 | + if (!(x->props.flags & XFRM_STATE_NOECN)) |
| 269 | + if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) |
| 270 | + IP6_ECN_set_ce(skb, iph); |
| 271 | + |
| 272 | + skb->protocol = htons(ETH_P_IPV6); |
| 273 | + } |
| 274 | +} |
| 275 | + |
| 276 | +static bool __input_process_payload(struct xfrm_state *x, u32 data, |
| 277 | + struct skb_seq_state *skbseq, |
| 278 | + struct list_head *sublist) |
| 279 | +{ |
| 280 | + u8 hbytes[sizeof(struct ipv6hdr)]; |
| 281 | + struct sk_buff *first_skb, *next, *skb; |
| 282 | + const unsigned char *old_mac; |
| 283 | + struct iphdr *iph; |
| 284 | + struct net *net; |
| 285 | + u32 remaining, iplen, iphlen, tail; |
| 286 | + |
| 287 | + net = xs_net(x); |
| 288 | + skb = skbseq->root_skb; |
| 289 | + first_skb = NULL; |
| 290 | + |
| 291 | + /* Save the old mac header if set */ |
| 292 | + old_mac = skb_mac_header_was_set(skb) ? skb_mac_header(skb) : NULL; |
| 293 | + |
| 294 | + /* New packets */ |
| 295 | + |
| 296 | + tail = skb->len; |
| 297 | + while (data < tail) { |
| 298 | + __be16 protocol = 0; |
| 299 | + |
| 300 | + /* Gather information on the next data block. |
| 301 | + * `data` points to the start of the data block. |
| 302 | + */ |
| 303 | + remaining = tail - data; |
| 304 | + |
| 305 | + /* try and copy enough bytes to read length from ipv4/ipv6 */ |
| 306 | + iphlen = min_t(u32, remaining, 6); |
| 307 | + if (skb_copy_seq_read(skbseq, data, hbytes, iphlen)) { |
| 308 | + XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); |
| 309 | + goto done; |
| 310 | + } |
| 311 | + |
| 312 | + iph = (struct iphdr *)hbytes; |
| 313 | + if (iph->version == 0x4) { |
| 314 | + /* must have at least tot_len field present */ |
| 315 | + if (remaining < 4) |
| 316 | + break; |
| 317 | + |
| 318 | + iplen = be16_to_cpu(iph->tot_len); |
| 319 | + iphlen = iph->ihl << 2; |
| 320 | + protocol = cpu_to_be16(ETH_P_IP); |
| 321 | + XFRM_MODE_SKB_CB(skbseq->root_skb)->tos = iph->tos; |
| 322 | + } else if (iph->version == 0x6) { |
| 323 | + /* must have at least payload_len field present */ |
| 324 | + if (remaining < 6) |
| 325 | + break; |
| 326 | + |
| 327 | + iplen = be16_to_cpu(((struct ipv6hdr *)hbytes)->payload_len); |
| 328 | + iplen += sizeof(struct ipv6hdr); |
| 329 | + iphlen = sizeof(struct ipv6hdr); |
| 330 | + protocol = cpu_to_be16(ETH_P_IPV6); |
| 331 | + XFRM_MODE_SKB_CB(skbseq->root_skb)->tos = |
| 332 | + ipv6_get_dsfield((struct ipv6hdr *)iph); |
| 333 | + } else if (iph->version == 0x0) { |
| 334 | + /* pad */ |
| 335 | + break; |
| 336 | + } else { |
| 337 | + XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); |
| 338 | + goto done; |
| 339 | + } |
| 340 | + |
| 341 | + if (unlikely(skbseq->stepped_offset)) { |
| 342 | + /* We need to reset our seq read, it can't backup at |
| 343 | + * this point. |
| 344 | + */ |
| 345 | + struct sk_buff *save = skbseq->root_skb; |
| 346 | + |
| 347 | + skb_abort_seq_read(skbseq); |
| 348 | + skb_prepare_seq_read(save, data, tail, skbseq); |
| 349 | + } |
| 350 | + |
| 351 | + if (!first_skb) |
| 352 | + first_skb = skb; |
| 353 | + |
| 354 | + /* Fragment handling in following commits */ |
| 355 | + if (iplen > remaining) |
| 356 | + break; |
| 357 | + |
| 358 | + skb = iptfs_pskb_extract_seq(iplen, skbseq, data, iplen); |
| 359 | + if (!skb) { |
| 360 | + /* skip to next packet or done */ |
| 361 | + data += iplen; |
| 362 | + continue; |
| 363 | + } |
| 364 | + |
| 365 | + skb->protocol = protocol; |
| 366 | + if (old_mac) { |
| 367 | + /* rebuild the mac header */ |
| 368 | + skb_set_mac_header(skb, -first_skb->mac_len); |
| 369 | + memcpy(skb_mac_header(skb), old_mac, first_skb->mac_len); |
| 370 | + eth_hdr(skb)->h_proto = skb->protocol; |
| 371 | + } |
| 372 | + |
| 373 | + data += iplen; |
| 374 | + iptfs_complete_inner_skb(x, skb); |
| 375 | + list_add_tail(&skb->list, sublist); |
| 376 | + } |
| 377 | + |
| 378 | + /* Send the packets! */ |
| 379 | + list_for_each_entry_safe(skb, next, sublist, list) { |
| 380 | + skb_list_del_init(skb); |
| 381 | + if (xfrm_input(skb, 0, 0, -2)) |
| 382 | + kfree_skb(skb); |
| 383 | + } |
| 384 | + |
| 385 | +done: |
| 386 | + return false; |
| 387 | +} |
| 388 | + |
| 389 | +/** |
| 390 | + * iptfs_input() - handle receipt of iptfs payload |
| 391 | + * @x: xfrm state |
| 392 | + * @skb: the packet |
| 393 | + * |
| 394 | + * Process the IPTFS payload in `skb` and consume it afterwards. |
| 395 | + * |
| 396 | + * Returns 0. |
| 397 | + */ |
| 398 | +static int iptfs_input(struct xfrm_state *x, struct sk_buff *skb) |
| 399 | +{ |
| 400 | + struct ip_iptfs_cc_hdr iptcch; |
| 401 | + struct skb_seq_state skbseq; |
| 402 | + struct list_head sublist; /* rename this it's just a list */ |
| 403 | + struct ip_iptfs_hdr *ipth; |
| 404 | + struct net *net; |
| 405 | + u32 remaining, data; |
| 406 | + bool consumed = false; |
| 407 | + |
| 408 | + net = xs_net(x); |
| 409 | + |
| 410 | + /* Large enough to hold both types of header */ |
| 411 | + ipth = (struct ip_iptfs_hdr *)&iptcch; |
| 412 | + |
| 413 | + skb_prepare_seq_read(skb, 0, skb->len, &skbseq); |
| 414 | + |
| 415 | + /* Get the IPTFS header and validate it */ |
| 416 | + |
| 417 | + if (skb_copy_seq_read(&skbseq, 0, ipth, sizeof(*ipth))) { |
| 418 | + XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); |
| 419 | + goto done; |
| 420 | + } |
| 421 | + data = sizeof(*ipth); |
| 422 | + |
| 423 | + /* Set data past the basic header */ |
| 424 | + if (ipth->subtype == IPTFS_SUBTYPE_CC) { |
| 425 | + /* Copy the rest of the CC header */ |
| 426 | + remaining = sizeof(iptcch) - sizeof(*ipth); |
| 427 | + if (skb_copy_seq_read(&skbseq, data, ipth + 1, remaining)) { |
| 428 | + XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); |
| 429 | + goto done; |
| 430 | + } |
| 431 | + data += remaining; |
| 432 | + } else if (ipth->subtype != IPTFS_SUBTYPE_BASIC) { |
| 433 | + XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); |
| 434 | + goto done; |
| 435 | + } |
| 436 | + |
| 437 | + if (ipth->flags != 0) { |
| 438 | + XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); |
| 439 | + goto done; |
| 440 | + } |
| 441 | + |
| 442 | + INIT_LIST_HEAD(&sublist); |
| 443 | + |
| 444 | + /* Fragment handling in following commits */ |
| 445 | + data += ntohs(ipth->block_offset); |
| 446 | + |
| 447 | + /* New packets */ |
| 448 | + consumed = __input_process_payload(x, data, &skbseq, &sublist); |
| 449 | +done: |
| 450 | + skb = skbseq.root_skb; |
| 451 | + skb_abort_seq_read(&skbseq); |
| 452 | + |
| 453 | + if (!consumed) |
| 454 | + kfree_skb(skb); |
| 455 | + |
| 456 | + /* We always have dealt with the input SKB, either we are re-using it, |
| 457 | + * or we have freed it. Return EINPROGRESS so that xfrm_input stops |
| 458 | + * processing it. |
| 459 | + */ |
| 460 | + return -EINPROGRESS; |
| 461 | +} |
| 462 | + |
188 | 463 | /* ================================= */
|
189 | 464 | /* IPTFS Sending (ingress) Functions */
|
190 | 465 | /* ================================= */
|
@@ -1101,6 +1376,7 @@ static const struct xfrm_mode_cbs iptfs_mode_cbs = {
|
1101 | 1376 | .copy_to_user = iptfs_copy_to_user,
|
1102 | 1377 | .sa_len = iptfs_sa_len,
|
1103 | 1378 | .get_inner_mtu = iptfs_get_inner_mtu,
|
| 1379 | + .input = iptfs_input, |
1104 | 1380 | .output = iptfs_output_collect,
|
1105 | 1381 | .prepare_output = iptfs_prepare_output,
|
1106 | 1382 | };
|
|
0 commit comments