2 * decap.c: gtpu tunnel decap packet processing
4 * Copyright (c) 2017 Intel and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <gtpu/gtpu.h>
21 extern vlib_node_registration_t gtpu4_input_node;
22 extern vlib_node_registration_t gtpu6_input_node;
33 static u8 * format_gtpu_rx_trace (u8 * s, va_list * args)
35 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
36 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
37 gtpu_rx_trace_t * t = va_arg (*args, gtpu_rx_trace_t *);
39 if (t->tunnel_index != ~0)
41 s = format (s, "GTPU decap from gtpu_tunnel%d ", t->tunnel_index);
42 switch (t->forwarding_type)
44 case GTPU_FORWARD_BAD_HEADER:
45 s = format (s, "forwarding bad-header ");
47 case GTPU_FORWARD_UNKNOWN_TEID:
48 s = format (s, "forwarding unknown-teid ");
50 case GTPU_FORWARD_UNKNOWN_TYPE:
51 s = format (s, "forwarding unknown-type ");
54 s = format (s, "teid %u, ", t->teid);
58 s = format (s, "GTPU decap error - tunnel for teid %u does not exist, ",
61 s = format (s, "next %d error %d, ", t->next_index, t->error);
62 s = format (s, "flags: 0x%x, type: %d, length: %d", t->header.ver_flags,
63 t->header.type, t->header.length);
68 validate_gtpu_fib (vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4)
70 return t->encap_fib_index == vlib_buffer_get_ip_fib_index (b, is_ip4);
73 // Gets run with every input
75 gtpu_input (vlib_main_t * vm,
76 vlib_node_runtime_t * node,
77 vlib_frame_t * from_frame,
80 u32 n_left_from, next_index, * from, * to_next;
81 gtpu_main_t * gtm = >pu_main;
82 vnet_main_t * vnm = gtm->vnet_main;
83 vnet_interface_main_t * im = &vnm->interface_main;
84 u32 last_tunnel_index = ~0;
85 gtpu4_tunnel_key_t last_key4;
86 gtpu6_tunnel_key_t last_key6;
87 u32 pkts_decapsulated = 0;
88 u32 thread_index = vlib_get_thread_index();
89 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
92 last_key4.as_u64 = ~0;
94 clib_memset (&last_key6, 0xff, sizeof (last_key6));
96 // Where is the framevector coming from
97 from = vlib_frame_vector_args (from_frame);
98 // number of packets left in frame
99 n_left_from = from_frame->n_vectors;
101 // whats the next node it needs to go to
102 next_index = node->cached_next_index;
103 // stats from the next interface
104 stats_sw_if_index = node->runtime_data[0];
105 // number of packets processed
106 stats_n_packets = stats_n_bytes = 0;
108 // run until no more packets left in vectorframe
109 while (n_left_from > 0)
113 // get vectorframe to process
114 vlib_get_next_frame (vm, node, next_index,
115 to_next, n_left_to_next);
116 // while there are still more than 4 packets left in frame and more than
117 // two packets in current frame
118 while (n_left_from >= 4 && n_left_to_next >= 2)
120 // buffer index for loading packet data
122 // vlib packet buffer
123 vlib_buffer_t * b0, * b1;
124 // next operation to do with the packet
127 ip4_header_t *ip4_0, *ip4_1;
128 ip6_header_t *ip6_0, *ip6_1;
129 gtpu_header_t *gtpu0, *gtpu1;
130 i32 gtpu_hdr_len0, gtpu_hdr_len1;
132 u32 tunnel_index0, tunnel_index1;
133 gtpu_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
134 gtpu4_tunnel_key_t key4_0, key4_1;
135 gtpu6_tunnel_key_t key6_0, key6_1;
137 u32 sw_if_index0, sw_if_index1, len0, len1;
138 u8 has_space0, has_space1;
140 udp_header_t *udp0, *udp1;
141 ip_csum_t sum0, sum1;
143 gtpu_ext_header_t ext = { .type = 0, .len = 0, .pad = 0 };
144 gtpu_ext_header_t *ext0, *ext1;
145 bool is_fast_track0, is_fast_track1;
148 /* Prefetch next iteration. */
150 vlib_buffer_t * p2, * p3;
153 p2 = vlib_get_buffer (vm, from[2]);
154 p3 = vlib_get_buffer (vm, from[3]);
156 vlib_prefetch_buffer_header (p2, LOAD);
157 vlib_prefetch_buffer_header (p3, LOAD);
159 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
160 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
163 // getting buffer index from vectorframe
166 // pre inserting the packets for the next node
169 // forward in vectorframe
173 // decimate message counter for next node
175 // decimate message counter for current progessing node
178 // load packets into buffer
179 b0 = vlib_get_buffer (vm, bi0);
180 b1 = vlib_get_buffer (vm, bi1);
182 /* udp leaves current_data pointing at the gtpu header */
183 // get pointers to the beginnings of the gtpu frame
184 gtpu0 = vlib_buffer_get_current (b0);
185 gtpu1 = vlib_buffer_get_current (b1);
188 ip4_0 = (void *) ((u8 *) gtpu0 - sizeof (udp_header_t) -
189 sizeof (ip4_header_t));
190 ip4_1 = (void *) ((u8 *) gtpu1 - sizeof (udp_header_t) -
191 sizeof (ip4_header_t));
195 ip6_0 = (void *) ((u8 *) gtpu0 - sizeof (udp_header_t) -
196 sizeof (ip6_header_t));
197 ip6_1 = (void *) ((u8 *) gtpu1 - sizeof (udp_header_t) -
198 sizeof (ip6_header_t));
200 udp0 = (void *) ((u8 *) gtpu0 - sizeof (udp_header_t));
201 udp1 = (void *) ((u8 *) gtpu1 - sizeof (udp_header_t));
209 /* speculatively load gtp header version field */
210 ver0 = gtpu0->ver_flags;
211 ver1 = gtpu1->ver_flags;
214 * Manipulate gtpu header
215 * TBD: Manipulate Sequence Number and N-PDU Number
216 * TBD: Manipulate Next Extension Header
219 /* Perform all test assuming the packet has the needed space.
220 * Check if version 1, not PT, not reserved.
221 * Check message type 255.
224 ((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) ==
225 (GTPU_V1_VER | GTPU_PT_BIT));
226 is_fast_track0 = is_fast_track0 & (gtpu0->type == 255);
229 ((ver1 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) ==
230 (GTPU_V1_VER | GTPU_PT_BIT));
231 is_fast_track1 = is_fast_track1 & (gtpu1->type == 255);
233 /* Make the header overlap the end of the gtpu_header_t, so
234 * that it starts with the same Next extension header as the
236 * This means that the gtpu_ext_header_t (ext) has the type
237 * from the previous header and the length from the current one.
238 * Works both for the first gtpu_header_t and all following
239 * gtpu_ext_header_t extensions.
240 * Copy the ext data if the E bit is set, else use the 0 value.
242 ext0 = (ver0 & GTPU_E_BIT) ?
243 (gtpu_ext_header_t *) >pu0->next_ext_type :
245 ext1 = (ver1 & GTPU_E_BIT) ?
246 (gtpu_ext_header_t *) >pu1->next_ext_type :
249 /* One or more of the E, S and PN flags are set, so all 3 fields
251 * The gtpu_header_t contains the Sequence number, N-PDU number and
252 * Next extension header type.
253 * If E is not set subtract 4 bytes from the header.
254 * Then add the length of the extension. 0 * 4 if E is not set,
255 * else it's the ext->len from the gtp extension. Length is multiple
257 * Note: This length is only valid if the header itself is valid,
258 * so it must be verified before use.
260 gtpu_hdr_len0 = sizeof (gtpu_header_t) -
261 (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4) +
263 gtpu_hdr_len1 = sizeof (gtpu_header_t) -
264 (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4) +
267 /* Get the next extension, unconditionally.
268 * If E was not set in the gtp header ext->len is zero.
269 * If E was set ext0 will now point to the packet buffer.
270 * If the gtp packet is illegal this might point outside the buffer.
271 * TBD check the updated for ext0->type != 0, and continue removing
272 * extensions. Only for clarity, will be optimized away.
274 ext0 += ext0->len * 4 / sizeof (*ext0);
275 ext1 += ext1->len * 4 / sizeof (*ext1);
277 /* Check the space, if this is true then ext0 points to a valid
278 * location in the buffer as well.
280 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
281 has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
283 /* Diverge the packet paths for 0 and 1 */
284 if (PREDICT_FALSE ((!is_fast_track0) | (!has_space0)))
286 /* Not fast path. ext0 and gtpu_hdr_len0 might be wrong */
288 /* GCC will hopefully fix the duplicate compute */
290 !((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) ==
291 (GTPU_V1_VER | GTPU_PT_BIT)) |
294 /* The header or size is wrong */
296 has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
297 next0 = GTPU_INPUT_NEXT_DROP;
299 /* This is an unsupported/bad packet.
300 * Check if it is to be forwarded.
303 tunnel_index0 = gtm->bad_header_forward_tunnel_index_ipv4;
305 tunnel_index0 = gtm->bad_header_forward_tunnel_index_ipv6;
307 if (PREDICT_FALSE (tunnel_index0 != ~0))
312 /* Correct version and has the space. It can only be unknown
315 error0 = GTPU_ERROR_UNSUPPORTED_TYPE;
316 next0 = GTPU_INPUT_NEXT_DROP;
318 /* This is an error/nonstandard packet
319 * Check if it is to be forwarded. */
321 tunnel_index0 = gtm->unknown_type_forward_tunnel_index_ipv4;
323 tunnel_index0 = gtm->unknown_type_forward_tunnel_index_ipv6;
325 if (PREDICT_FALSE (tunnel_index0 != ~0))
328 /* The packet is ipv6/not forwarded */
332 /* Manipulate packet 0 */
334 key4_0.src = ip4_0->src_address.as_u32;
335 key4_0.teid = gtpu0->teid;
337 /* Make sure GTPU tunnel exist according to packet SourceIP and
338 * teid SourceIP identify a GTPU path, and teid identify a tunnel
339 * in a given GTPU path */
340 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
342 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
343 if (PREDICT_FALSE (p0 == NULL))
345 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
346 next0 = GTPU_INPUT_NEXT_DROP;
347 /* This is a standard packet, but no tunnel was found.
348 * Check if it is to be forwarded. */
350 gtm->unknown_teid_forward_tunnel_index_ipv4;
351 if (PREDICT_FALSE (tunnel_index0 != ~0))
355 last_key4.as_u64 = key4_0.as_u64;
356 tunnel_index0 = last_tunnel_index = p0[0];
358 else // when the address of the packet is the same as the packet
359 // before ... saving lookup in table
360 tunnel_index0 = last_tunnel_index;
361 // tunnel index in vpp
362 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
364 /* Validate GTPU tunnel encap-fib index against packet */
365 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
367 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
368 next0 = GTPU_INPUT_NEXT_DROP;
369 tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv4;
370 if (PREDICT_FALSE (tunnel_index0 != ~0))
375 /* Validate GTPU tunnel SourceIP against packet DestinationIP */
376 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
377 goto next0; /* valid packet */
378 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
380 key4_0.src = ip4_0->dst_address.as_u32;
381 key4_0.teid = gtpu0->teid;
382 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
383 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
384 if (PREDICT_TRUE (p0 != NULL))
386 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
387 goto next0; /* valid packet */
390 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
391 next0 = GTPU_INPUT_NEXT_DROP;
392 tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv4;
393 if (PREDICT_FALSE (tunnel_index0 != ~0))
397 } else /* !is_ip4 */ {
398 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
399 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
400 key6_0.teid = gtpu0->teid;
402 /* Make sure GTPU tunnel exist according to packet SIP and teid
403 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
404 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
406 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
407 if (PREDICT_FALSE (p0 == NULL))
409 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
410 next0 = GTPU_INPUT_NEXT_DROP;
411 /* This is a standard packet, but no tunnel was found.
412 * Check if it is to be forwarded. */
414 gtm->unknown_teid_forward_tunnel_index_ipv6;
415 if (PREDICT_FALSE (tunnel_index0 != ~0))
419 clib_memcpy_fast (&last_key6, &key6_0, sizeof (key6_0));
420 tunnel_index0 = last_tunnel_index = p0[0];
423 tunnel_index0 = last_tunnel_index;
424 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
426 /* Validate GTPU tunnel encap-fib index against packet */
427 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
429 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
430 next0 = GTPU_INPUT_NEXT_DROP;
431 tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv6;
432 if (PREDICT_FALSE (tunnel_index0 != ~0))
437 /* Validate GTPU tunnel SIP against packet DIP */
438 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
440 goto next0; /* valid packet */
441 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
443 key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
444 key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
445 key6_0.teid = gtpu0->teid;
446 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
447 if (PREDICT_TRUE (p0 != NULL))
449 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
450 goto next0; /* valid packet */
453 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
454 next0 = GTPU_INPUT_NEXT_DROP;
455 tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv6;
456 if (PREDICT_FALSE (tunnel_index0 != ~0))
462 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
464 /* Validate GTPU tunnel encap-fib index against packet */
465 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
467 error0 = GTPU_ERROR_NO_ERROR_TUNNEL;
468 next0 = GTPU_INPUT_NEXT_DROP;
472 /* Clear the error, next0 will be overwritten by the tunnel */
477 /* Forward packet instead. Push the IP+UDP header */
479 -(i32) (sizeof (udp_header_t) + sizeof (ip4_header_t));
480 /* Backup the IP4 checksum and address */
481 sum0 = ip4_0->checksum;
482 old0 = ip4_0->dst_address.as_u32;
484 /* Update IP address of the packet using the src from the tunnel
486 ip4_0->dst_address.as_u32 = t0->src.ip4.as_u32;
488 /* Fix the IP4 checksum */
489 sum0 = ip_csum_update (sum0, old0, ip4_0->dst_address.as_u32,
491 dst_address /* changed member */);
492 ip4_0->checksum = ip_csum_fold (sum0);
496 /* Forward packet instead. Push the IP+UDP header */
498 -(i32) (sizeof (udp_header_t) + sizeof (ip6_header_t));
499 /* IPv6 UDP checksum is mandatory */
502 ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6_0, &bogus);
503 if (udp0->checksum == 0)
504 udp0->checksum = 0xffff;
508 /* Pop/Remove gtpu header from buffered package or push existing
509 * IP+UDP header back to the buffer*/
510 vlib_buffer_advance (b0, gtpu_hdr_len0);
512 // where does it need to go in the graph next
513 next0 = t0->decap_next_index;
514 // interface index the package is on
515 sw_if_index0 = t0->sw_if_index;
516 len0 = vlib_buffer_length_in_chain (vm, b0);
518 // Next three lines are for forwarding the payload to L2
520 /* Required to make the l2 tag push / pop code work on l2 subifs */
521 if (PREDICT_TRUE (next0 == GTPU_INPUT_NEXT_L2_INPUT))
522 vnet_update_l2_len (b0);
524 /* Set packet input sw_if_index to unicast GTPU tunnel for learning
526 vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0;
527 // in case its a multicast packet set different interface index
528 sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
532 stats_n_packets += 1;
533 stats_n_bytes += len0;
535 /* Batch stats increment on the same gtpu tunnel so counter
536 is not incremented per packet */
537 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
539 stats_n_packets -= 1;
540 stats_n_bytes -= len0;
542 vlib_increment_combined_counter
543 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
544 thread_index, stats_sw_if_index,
545 stats_n_packets, stats_n_bytes);
547 stats_n_bytes = len0;
548 stats_sw_if_index = sw_if_index0;
552 b0->error = error0 ? node->errors[error0] : 0;
554 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
557 = vlib_add_trace (vm, node, b0, sizeof (*tr));
558 tr->next_index = next0;
560 tr->tunnel_index = tunnel_index0;
561 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
563 if (vlib_buffer_has_space (b0, 4))
565 tr->header.ver_flags = gtpu0->ver_flags;
566 tr->header.type = gtpu0->type;
567 tr->header.length = clib_net_to_host_u16 (gtpu0->length);
571 /* End of processing for packet 0, start for packet 1 */
572 if (PREDICT_FALSE ((!is_fast_track1) | (!has_space1)))
574 /* Not fast path. ext1 and gtpu_hdr_len1 might be wrong */
576 /* GCC will hopefully fix the duplicate compute */
578 !((ver1 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) ==
579 (GTPU_V1_VER | GTPU_PT_BIT)) |
582 /* The header or size is wrong */
584 has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
585 next1 = GTPU_INPUT_NEXT_DROP;
587 /* This is an unsupported/bad packet.
588 * Check if it is to be forwarded.
591 tunnel_index1 = gtm->bad_header_forward_tunnel_index_ipv4;
593 tunnel_index1 = gtm->bad_header_forward_tunnel_index_ipv6;
595 if (PREDICT_FALSE (tunnel_index1 != ~0))
600 /* Correct version and has the space. It can only be unknown
603 error1 = GTPU_ERROR_UNSUPPORTED_TYPE;
604 next1 = GTPU_INPUT_NEXT_DROP;
606 /* This is an error/nonstandard packet
607 * Check if it is to be forwarded. */
609 tunnel_index1 = gtm->unknown_type_forward_tunnel_index_ipv4;
611 tunnel_index1 = gtm->unknown_type_forward_tunnel_index_ipv6;
613 if (PREDICT_FALSE (tunnel_index1 != ~0))
616 /* The packet is ipv6/not forwarded */
620 /* Manipulate packet 1 */
622 key4_1.src = ip4_1->src_address.as_u32;
623 key4_1.teid = gtpu1->teid;
625 /* Make sure GTPU tunnel exist according to packet SIP and teid
626 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
627 if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
629 p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
630 if (PREDICT_FALSE (p1 == NULL))
632 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
633 next1 = GTPU_INPUT_NEXT_DROP;
635 gtm->unknown_teid_forward_tunnel_index_ipv4;
636 if (PREDICT_FALSE (tunnel_index1 != ~0))
640 last_key4.as_u64 = key4_1.as_u64;
641 tunnel_index1 = last_tunnel_index = p1[0];
644 tunnel_index1 = last_tunnel_index;
645 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
647 /* Validate GTPU tunnel encap-fib index against packet */
648 if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
650 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
651 next1 = GTPU_INPUT_NEXT_DROP;
652 tunnel_index1 = gtm->unknown_teid_forward_tunnel_index_ipv4;
653 if (PREDICT_FALSE (tunnel_index1 != ~0))
658 /* Validate GTPU tunnel SIP against packet DIP */
659 if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
660 goto next1; /* valid packet */
661 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_1->dst_address)))
663 key4_1.src = ip4_1->dst_address.as_u32;
664 key4_1.teid = gtpu1->teid;
665 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
666 p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
667 if (PREDICT_TRUE (p1 != NULL))
669 mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
670 goto next1; /* valid packet */
673 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
674 next1 = GTPU_INPUT_NEXT_DROP;
675 tunnel_index1 = gtm->unknown_teid_forward_tunnel_index_ipv4;
676 if (PREDICT_FALSE (tunnel_index1 != ~0))
680 } else /* !is_ip4 */ {
681 key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
682 key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
683 key6_1.teid = gtpu1->teid;
685 /* Make sure GTPU tunnel exist according to packet SIP and teid
686 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
687 if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
689 p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
691 if (PREDICT_FALSE (p1 == NULL))
693 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
694 next1 = GTPU_INPUT_NEXT_DROP;
696 gtm->unknown_teid_forward_tunnel_index_ipv6;
697 if (PREDICT_FALSE (tunnel_index1 != ~0))
702 clib_memcpy_fast (&last_key6, &key6_1, sizeof (key6_1));
703 tunnel_index1 = last_tunnel_index = p1[0];
706 tunnel_index1 = last_tunnel_index;
707 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
709 /* Validate GTPU tunnel encap-fib index against packet */
710 if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
712 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
713 next1 = GTPU_INPUT_NEXT_DROP;
714 tunnel_index1 = gtm->unknown_teid_forward_tunnel_index_ipv6;
715 if (PREDICT_FALSE (tunnel_index1 != ~0))
720 /* Validate GTPU tunnel SIP against packet DIP */
721 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_1->dst_address,
723 goto next1; /* valid packet */
724 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_1->dst_address)))
726 key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
727 key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
728 key6_1.teid = gtpu1->teid;
729 p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
730 if (PREDICT_TRUE (p1 != NULL))
732 mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
733 goto next1; /* valid packet */
736 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
737 next1 = GTPU_INPUT_NEXT_DROP;
738 tunnel_index1 = gtm->unknown_teid_forward_tunnel_index_ipv6;
739 if (PREDICT_FALSE (tunnel_index1 != ~0))
746 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
748 /* Validate GTPU tunnel encap-fib index against packet */
749 if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
751 error1 = GTPU_ERROR_NO_ERROR_TUNNEL;
752 next1 = GTPU_INPUT_NEXT_DROP;
756 /* Clear the error, next0 will be overwritten by the tunnel */
761 /* Forward packet instead. Push the IP+UDP header */
763 -(i32) (sizeof (udp_header_t) + sizeof (ip4_header_t));
765 /* Backup the IP4 checksum and address */
766 sum1 = ip4_1->checksum;
767 old1 = ip4_1->dst_address.as_u32;
769 /* Update IP address of the packet using the src from the tunnel
771 ip4_1->dst_address.as_u32 = t1->src.ip4.as_u32;
773 /* Fix the IP4 checksum */
774 sum1 = ip_csum_update (sum1, old1, ip4_1->dst_address.as_u32,
776 dst_address /* changed member */);
777 ip4_1->checksum = ip_csum_fold (sum1);
781 /* Forward packet instead. Push the IP+UDP header */
783 -(i32) (sizeof (udp_header_t) + sizeof (ip6_header_t));
785 /* IPv6 UDP checksum is mandatory */
788 ip6_tcp_udp_icmp_compute_checksum (vm, b1, ip6_1, &bogus);
789 if (udp1->checksum == 0)
790 udp1->checksum = 0xffff;
794 /* Pop gtpu header / push IP+UDP header */
795 vlib_buffer_advance (b1, gtpu_hdr_len1);
797 next1 = t1->decap_next_index;
798 sw_if_index1 = t1->sw_if_index;
799 len1 = vlib_buffer_length_in_chain (vm, b1);
801 /* Required to make the l2 tag push / pop code work on l2 subifs */
802 if (PREDICT_TRUE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
803 vnet_update_l2_len (b1);
805 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
806 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
807 sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
809 pkts_decapsulated ++;
810 stats_n_packets += 1;
811 stats_n_bytes += len1;
813 /* Batch stats increment on the same gtpu tunnel so counter
814 is not incremented per packet */
815 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
817 stats_n_packets -= 1;
818 stats_n_bytes -= len1;
820 vlib_increment_combined_counter
821 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
822 thread_index, stats_sw_if_index,
823 stats_n_packets, stats_n_bytes);
825 stats_n_bytes = len1;
826 stats_sw_if_index = sw_if_index1;
830 b1->error = error1 ? node->errors[error1] : 0;
832 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
835 = vlib_add_trace (vm, node, b1, sizeof (*tr));
836 tr->next_index = next1;
838 tr->tunnel_index = tunnel_index1;
839 tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
840 if (vlib_buffer_has_space (b1, 4))
842 tr->header.ver_flags = gtpu1->ver_flags;
843 tr->header.type = gtpu1->type;
844 tr->header.length = clib_net_to_host_u16 (gtpu1->length);
848 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
849 to_next, n_left_to_next,
850 bi0, bi1, next0, next1);
853 /* In case there are less than 4 packets left in frame and packets in
854 current frame aka single processing */
855 while (n_left_from > 0 && n_left_to_next > 0)
860 ip4_header_t * ip4_0;
861 ip6_header_t * ip6_0;
862 gtpu_header_t * gtpu0;
866 gtpu_tunnel_t * t0, * mt0 = NULL;
867 gtpu4_tunnel_key_t key4_0;
868 gtpu6_tunnel_key_t key6_0;
870 u32 sw_if_index0, len0;
876 gtpu_ext_header_t ext = { .type = 0, .len = 0, .pad = 0 };
877 gtpu_ext_header_t *ext0;
888 b0 = vlib_get_buffer (vm, bi0);
890 /* udp leaves current_data pointing at the gtpu header */
891 gtpu0 = vlib_buffer_get_current (b0);
893 ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
895 ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
897 udp0 = (void *) ((u8 *) gtpu0 - sizeof (udp_header_t));
902 /* speculatively load gtp header version field */
903 ver0 = gtpu0->ver_flags;
905 * Manipulate gtpu header
906 * TBD: Manipulate Sequence Number and N-PDU Number
907 * TBD: Manipulate Next Extension Header
911 ((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) ==
912 (GTPU_V1_VER | GTPU_PT_BIT));
913 is_fast_track0 = is_fast_track0 & (gtpu0->type == 255);
915 ext0 = (ver0 & GTPU_E_BIT) ?
916 (gtpu_ext_header_t *) >pu0->next_ext_type :
919 gtpu_hdr_len0 = sizeof (gtpu_header_t) -
920 (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4) +
923 ext0 += ext0->len * 4 / sizeof (*ext0);
925 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
927 if (PREDICT_FALSE ((!is_fast_track0) | (!has_space0)))
929 /* Not fast path. ext0 and gtpu_hdr_len0 might be wrong */
931 /* GCC will hopefully fix the duplicate compute */
933 !((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) ==
934 (GTPU_V1_VER | GTPU_PT_BIT)) |
937 /* The header or size is wrong */
939 has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
940 next0 = GTPU_INPUT_NEXT_DROP;
942 /* This is an unsupported/bad packet.
943 * Check if it is to be forwarded.
946 tunnel_index0 = gtm->bad_header_forward_tunnel_index_ipv4;
948 tunnel_index0 = gtm->bad_header_forward_tunnel_index_ipv6;
950 if (PREDICT_FALSE (tunnel_index0 != ~0))
955 /* Correct version and has the space. It can only be unknown
958 error0 = GTPU_ERROR_UNSUPPORTED_TYPE;
959 next0 = GTPU_INPUT_NEXT_DROP;
961 /* This is an error/nonstandard packet
962 * Check if it is to be forwarded. */
964 tunnel_index0 = gtm->unknown_type_forward_tunnel_index_ipv4;
966 tunnel_index0 = gtm->unknown_type_forward_tunnel_index_ipv6;
968 if (PREDICT_FALSE (tunnel_index0 != ~0))
971 /* The packet is ipv6/not forwarded */
977 key4_0.src = ip4_0->src_address.as_u32;
978 key4_0.teid = gtpu0->teid;
980 /* Make sure GTPU tunnel exist according to packet SIP and teid
981 * SIP identify a GTPU path, and teid identify a tunnel in a
983 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
985 // Cache miss, so try normal lookup now.
986 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
987 if (PREDICT_FALSE (p0 == NULL))
989 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
990 next0 = GTPU_INPUT_NEXT_DROP;
992 /* This is a standard packet, but no tunnel was found.
993 * Check if it is to be forwarded. */
995 gtm->unknown_teid_forward_tunnel_index_ipv4;
996 if (PREDICT_FALSE (tunnel_index0 != ~0))
1000 // Update the key/tunnel cache for normal packets
1001 last_key4.as_u64 = key4_0.as_u64;
1002 tunnel_index0 = last_tunnel_index = p0[0];
1005 tunnel_index0 = last_tunnel_index;
1006 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1008 /* Validate GTPU tunnel encap-fib index against packet */
1009 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
1011 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
1012 next0 = GTPU_INPUT_NEXT_DROP;
1013 tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv4;
1014 if (PREDICT_FALSE (tunnel_index0 != ~0))
1019 /* Validate GTPU tunnel SIP against packet DIP */
1020 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 ==
1021 t0->src.ip4.as_u32))
1022 goto next00; /* valid packet */
1024 ip4_address_is_multicast (&ip4_0->dst_address)))
1026 key4_0.src = ip4_0->dst_address.as_u32;
1027 key4_0.teid = gtpu0->teid;
1028 /* Make sure mcast GTPU tunnel exist by packet DIP and teid
1030 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
1031 if (PREDICT_TRUE (p0 != NULL))
1033 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
1034 goto next00; /* valid packet */
1037 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
1038 next0 = GTPU_INPUT_NEXT_DROP;
1039 tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv4;
1040 if (PREDICT_FALSE (tunnel_index0 != ~0))
1046 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
1047 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
1048 key6_0.teid = gtpu0->teid;
1050 /* Make sure GTPU tunnel exist according to packet SIP and teid
1051 * SIP identify a GTPU path, and teid identify a tunnel in a
1052 * given GTPU path */
1054 memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
1056 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
1057 if (PREDICT_FALSE (p0 == NULL))
1059 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
1060 next0 = GTPU_INPUT_NEXT_DROP;
1062 gtm->unknown_teid_forward_tunnel_index_ipv6;
1063 if (PREDICT_FALSE (tunnel_index0 != ~0))
1067 clib_memcpy_fast (&last_key6, &key6_0, sizeof (key6_0));
1068 tunnel_index0 = last_tunnel_index = p0[0];
1071 tunnel_index0 = last_tunnel_index;
1072 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1074 /* Validate GTPU tunnel encap-fib index against packet */
1075 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
1077 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
1078 next0 = GTPU_INPUT_NEXT_DROP;
1079 tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv6;
1080 if (PREDICT_FALSE (tunnel_index0 != ~0))
1085 /* Validate GTPU tunnel SIP against packet DIP */
1087 ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6)))
1088 goto next00; /* valid packet */
1089 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
1091 key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
1092 key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
1093 key6_0.teid = gtpu0->teid;
1094 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
1095 if (PREDICT_TRUE (p0 != NULL))
1097 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
1098 goto next00; /* valid packet */
1101 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
1102 next0 = GTPU_INPUT_NEXT_DROP;
1103 tunnel_index0 = gtm->unknown_teid_forward_tunnel_index_ipv6;
1104 if (PREDICT_FALSE (tunnel_index0 != ~0))
1109 /* This can only be reached via goto */
1112 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1114 /* Validate GTPU tunnel encap-fib index against packet */
1115 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
1117 error0 = GTPU_ERROR_NO_ERROR_TUNNEL;
1118 next0 = GTPU_INPUT_NEXT_DROP;
1122 /* Clear the error, next0 will be overwritten by the tunnel */
1127 /* Forward packet instead. Push the IP+UDP header */
1129 -(i32) (sizeof (udp_header_t) + sizeof (ip4_header_t));
1130 /* Backup the IP4 checksum and address */
1131 sum0 = ip4_0->checksum;
1132 old0 = ip4_0->dst_address.as_u32;
1134 /* Update IP address of the packet using the src from the tunnel
1136 ip4_0->dst_address.as_u32 = t0->src.ip4.as_u32;
1138 /* Fix the IP4 checksum */
1139 sum0 = ip_csum_update (sum0, old0, ip4_0->dst_address.as_u32,
1141 dst_address /* changed member */);
1142 ip4_0->checksum = ip_csum_fold (sum0);
1146 /* Forward packet instead. Push the IP+UDP header */
1148 -(i32) (sizeof (udp_header_t) + sizeof (ip6_header_t));
1150 /* IPv6 UDP checksum is mandatory */
1153 ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6_0, &bogus);
1154 if (udp0->checksum == 0)
1155 udp0->checksum = 0xffff;
1159 /* Pop gtpu header / push IP+UDP header */
1160 vlib_buffer_advance (b0, gtpu_hdr_len0);
1162 next0 = t0->decap_next_index;
1163 sw_if_index0 = t0->sw_if_index;
1164 len0 = vlib_buffer_length_in_chain (vm, b0);
1166 /* Required to make the l2 tag push / pop code work on l2 subifs */
1167 if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
1168 vnet_update_l2_len (b0);
1170 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1171 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1172 sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
1174 pkts_decapsulated ++;
1175 stats_n_packets += 1;
1176 stats_n_bytes += len0;
1178 /* Batch stats increment on the same gtpu tunnel so counter
1179 is not incremented per packet */
1180 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1182 stats_n_packets -= 1;
1183 stats_n_bytes -= len0;
1184 if (stats_n_packets)
1185 vlib_increment_combined_counter
1186 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1187 thread_index, stats_sw_if_index,
1188 stats_n_packets, stats_n_bytes);
1189 stats_n_packets = 1;
1190 stats_n_bytes = len0;
1191 stats_sw_if_index = sw_if_index0;
1195 b0->error = error0 ? node->errors[error0] : 0;
1197 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1200 = vlib_add_trace (vm, node, b0, sizeof (*tr));
1201 tr->next_index = next0;
1203 tr->tunnel_index = tunnel_index0;
1204 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1205 if (vlib_buffer_has_space (b0, 4))
1207 tr->header.ver_flags = gtpu0->ver_flags;
1208 tr->header.type = gtpu0->type;
1209 tr->header.length = clib_net_to_host_u16 (gtpu0->length);
1212 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1213 to_next, n_left_to_next,
1217 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1219 /* Do we still need this now that tunnel tx stats is kept? */
1220 vlib_node_increment_counter (vm, is_ip4?
1221 gtpu4_input_node.index:gtpu6_input_node.index,
1222 GTPU_ERROR_DECAPSULATED,
1225 /* Increment any remaining batch stats */
1226 if (stats_n_packets)
1228 vlib_increment_combined_counter
1229 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1230 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1231 node->runtime_data[0] = stats_sw_if_index;
1234 return from_frame->n_vectors;
1237 VLIB_NODE_FN (gtpu4_input_node) (vlib_main_t * vm,
1238 vlib_node_runtime_t * node,
1239 vlib_frame_t * from_frame)
1241 return gtpu_input(vm, node, from_frame, /* is_ip4 */ 1);
1244 VLIB_NODE_FN (gtpu6_input_node) (vlib_main_t * vm,
1245 vlib_node_runtime_t * node,
1246 vlib_frame_t * from_frame)
1248 return gtpu_input(vm, node, from_frame, /* is_ip4 */ 0);
1251 static char * gtpu_error_strings[] = {
1252 #define gtpu_error(n,s) s,
1253 #include <gtpu/gtpu_error.def>
1258 VLIB_REGISTER_NODE (gtpu4_input_node) = {
1259 .name = "gtpu4-input",
1260 /* Takes a vector of packets. */
1261 .vector_size = sizeof (u32),
1263 .n_errors = GTPU_N_ERROR,
1264 .error_strings = gtpu_error_strings,
1266 .n_next_nodes = GTPU_INPUT_N_NEXT,
1268 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
1269 foreach_gtpu_input_next
1273 //temp .format_buffer = format_gtpu_header,
1274 .format_trace = format_gtpu_rx_trace,
1275 // $$$$ .unformat_buffer = unformat_gtpu_header,
1278 VLIB_REGISTER_NODE (gtpu6_input_node) = {
1279 .name = "gtpu6-input",
1280 /* Takes a vector of packets. */
1281 .vector_size = sizeof (u32),
1283 .n_errors = GTPU_N_ERROR,
1284 .error_strings = gtpu_error_strings,
1286 .n_next_nodes = GTPU_INPUT_N_NEXT,
1288 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
1289 foreach_gtpu_input_next
1293 //temp .format_buffer = format_gtpu_header,
1294 .format_trace = format_gtpu_rx_trace,
1295 // $$$$ .unformat_buffer = unformat_gtpu_header,
1299 IP_GTPU_BYPASS_NEXT_DROP,
1300 IP_GTPU_BYPASS_NEXT_GTPU,
1301 IP_GTPU_BYPASS_N_NEXT,
1302 } ip_vxan_bypass_next_t;
1304 /* this function determines if a udp packet is actually gtpu and needs
1305 forwarding to gtpu_input */
1307 ip_gtpu_bypass_inline (vlib_main_t * vm,
1308 vlib_node_runtime_t * node,
1309 vlib_frame_t * frame,
1312 gtpu_main_t * gtm = >pu_main;
1313 u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
1314 vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
1315 vtep4_key_t last_vtep4; /* last IPv4 address / fib index
1316 matching a local VTEP address */
1317 vtep6_key_t last_vtep6; /* last IPv6 address / fib index
1318 matching a local VTEP address */
1319 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1321 from = vlib_frame_vector_args (frame);
1322 n_left_from = frame->n_vectors;
1323 next_index = node->cached_next_index;
1324 vlib_get_buffers (vm, from, bufs, n_left_from);
1326 if (node->flags & VLIB_NODE_FLAG_TRACE)
1327 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
1330 vtep4_key_init (&last_vtep4);
1332 vtep6_key_init (&last_vtep6);
1334 while (n_left_from > 0)
1336 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1338 while (n_left_from >= 4 && n_left_to_next >= 2)
1340 vlib_buffer_t * b0, * b1;
1341 ip4_header_t * ip40, * ip41;
1342 ip6_header_t * ip60, * ip61;
1343 udp_header_t * udp0, * udp1;
1344 u32 bi0, ip_len0, udp_len0, flags0, next0;
1345 u32 bi1, ip_len1, udp_len1, flags1, next1;
1346 i32 len_diff0, len_diff1;
1347 u8 error0, good_udp0, proto0;
1348 u8 error1, good_udp1, proto1;
1350 /* Prefetch next iteration. */
1352 vlib_prefetch_buffer_header (b[2], LOAD);
1353 vlib_prefetch_buffer_header (b[3], LOAD);
1355 CLIB_PREFETCH (b[2]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1356 CLIB_PREFETCH (b[3]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1359 bi0 = to_next[0] = from[0];
1360 bi1 = to_next[1] = from[1];
1364 n_left_to_next -= 2;
1371 ip40 = vlib_buffer_get_current (b0);
1372 ip41 = vlib_buffer_get_current (b1);
1376 ip60 = vlib_buffer_get_current (b0);
1377 ip61 = vlib_buffer_get_current (b1);
1380 /* Setup packet for next IP feature */
1381 vnet_feature_next(&next0, b0);
1382 vnet_feature_next(&next1, b1);
1386 /* Treat IP frag packets as "experimental" protocol for now
1387 until support of IP frag reassembly is implemented */
1388 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1389 proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
1393 proto0 = ip60->protocol;
1394 proto1 = ip61->protocol;
1397 /* Process packet 0 */
1398 if (proto0 != IP_PROTOCOL_UDP)
1399 goto exit0; /* not UDP packet */
1402 udp0 = ip4_next_header (ip40);
1404 udp0 = ip6_next_header (ip60);
1406 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1407 goto exit0; /* not GTPU packet */
1409 /* Validate DIP against VTEPs*/
1412 #ifdef CLIB_HAVE_VEC512
1413 if (!vtep4_check_vector (>m->vtep_table, b0, ip40, &last_vtep4,
1416 if (!vtep4_check (>m->vtep_table, b0, ip40, &last_vtep4))
1418 goto exit0; /* no local VTEP for GTPU packet */
1422 if (!vtep6_check (>m->vtep_table, b0, ip60, &last_vtep6))
1423 goto exit0; /* no local VTEP for GTPU packet */
1427 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1429 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1430 good_udp0 |= udp0->checksum == 0;
1432 /* Verify UDP length */
1434 ip_len0 = clib_net_to_host_u16 (ip40->length);
1436 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1437 udp_len0 = clib_net_to_host_u16 (udp0->length);
1438 len_diff0 = ip_len0 - udp_len0;
1440 /* Verify UDP checksum */
1441 if (PREDICT_FALSE (!good_udp0))
1443 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1446 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1448 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1450 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1456 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1457 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1461 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1462 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1466 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
1467 b0->error = error0 ? error_node->errors[error0] : 0;
1469 /* gtpu-input node expect current at GTPU header */
1471 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1473 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1476 /* Process packet 1 */
1477 if (proto1 != IP_PROTOCOL_UDP)
1478 goto exit1; /* not UDP packet */
1481 udp1 = ip4_next_header (ip41);
1483 udp1 = ip6_next_header (ip61);
1485 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1486 goto exit1; /* not GTPU packet */
1488 /* Validate DIP against VTEPs*/
1491 #ifdef CLIB_HAVE_VEC512
1492 if (!vtep4_check_vector (>m->vtep_table, b1, ip41, &last_vtep4,
1495 if (!vtep4_check (>m->vtep_table, b1, ip41, &last_vtep4))
1497 goto exit1; /* no local VTEP for GTPU packet */
1501 if (!vtep6_check (>m->vtep_table, b1, ip61, &last_vtep6))
1502 goto exit1; /* no local VTEP for GTPU packet */
1506 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1508 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1509 good_udp1 |= udp1->checksum == 0;
1511 /* Verify UDP length */
1513 ip_len1 = clib_net_to_host_u16 (ip41->length);
1515 ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1516 udp_len1 = clib_net_to_host_u16 (udp1->length);
1517 len_diff1 = ip_len1 - udp_len1;
1519 /* Verify UDP checksum */
1520 if (PREDICT_FALSE (!good_udp1))
1522 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1525 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1527 flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1529 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1535 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1536 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1540 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1541 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1545 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
1546 b1->error = error1 ? error_node->errors[error1] : 0;
1548 /* gtpu-input node expect current at GTPU header */
1550 vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1552 vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1555 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1556 to_next, n_left_to_next,
1557 bi0, bi1, next0, next1);
1560 while (n_left_from > 0 && n_left_to_next > 0)
1563 ip4_header_t * ip40;
1564 ip6_header_t * ip60;
1565 udp_header_t * udp0;
1566 u32 bi0, ip_len0, udp_len0, flags0, next0;
1568 u8 error0, good_udp0, proto0;
1570 bi0 = to_next[0] = from[0];
1574 n_left_to_next -= 1;
1579 ip40 = vlib_buffer_get_current (b0);
1581 ip60 = vlib_buffer_get_current (b0);
1583 /* Setup packet for next IP feature */
1584 vnet_feature_next(&next0, b0);
1587 /* Treat IP4 frag packets as "experimental" protocol for now
1588 until support of IP frag reassembly is implemented */
1589 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1591 proto0 = ip60->protocol;
1593 if (proto0 != IP_PROTOCOL_UDP)
1594 goto exit; /* not UDP packet */
1597 udp0 = ip4_next_header (ip40);
1599 udp0 = ip6_next_header (ip60);
1601 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1602 goto exit; /* not GTPU packet */
1604 /* Validate DIP against VTEPs*/
1607 #ifdef CLIB_HAVE_VEC512
1608 if (!vtep4_check_vector (>m->vtep_table, b0, ip40, &last_vtep4,
1611 if (!vtep4_check (>m->vtep_table, b0, ip40, &last_vtep4))
1613 goto exit; /* no local VTEP for GTPU packet */
1617 if (!vtep6_check (>m->vtep_table, b0, ip60, &last_vtep6))
1618 goto exit; /* no local VTEP for GTPU packet */
1622 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1624 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1625 good_udp0 |= udp0->checksum == 0;
1627 /* Verify UDP length */
1629 ip_len0 = clib_net_to_host_u16 (ip40->length);
1631 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1632 udp_len0 = clib_net_to_host_u16 (udp0->length);
1633 len_diff0 = ip_len0 - udp_len0;
1635 /* Verify UDP checksum */
1636 if (PREDICT_FALSE (!good_udp0))
1638 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1641 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1643 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1645 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1651 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1652 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1656 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1657 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1661 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
1662 b0->error = error0 ? error_node->errors[error0] : 0;
1664 /* gtpu-input node expect current at GTPU header */
1666 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1668 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1671 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1672 to_next, n_left_to_next,
1676 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1679 return frame->n_vectors;
1682 VLIB_NODE_FN (ip4_gtpu_bypass_node) (vlib_main_t * vm,
1683 vlib_node_runtime_t * node,
1684 vlib_frame_t * frame)
1686 return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1689 VLIB_REGISTER_NODE (ip4_gtpu_bypass_node) = {
1690 .name = "ip4-gtpu-bypass",
1691 .vector_size = sizeof (u32),
1693 .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1695 [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1696 [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu4-input",
1699 .format_buffer = format_ip4_header,
1700 .format_trace = format_ip4_forward_next_trace,
1703 #ifndef CLIB_MARCH_VARIANT
1704 /* Dummy init function to get us linked in. */
1705 clib_error_t * ip4_gtpu_bypass_init (vlib_main_t * vm)
1708 VLIB_INIT_FUNCTION (ip4_gtpu_bypass_init);
1709 #endif /* CLIB_MARCH_VARIANT */
1711 VLIB_NODE_FN (ip6_gtpu_bypass_node) (vlib_main_t * vm,
1712 vlib_node_runtime_t * node,
1713 vlib_frame_t * frame)
1715 return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1718 VLIB_REGISTER_NODE (ip6_gtpu_bypass_node) = {
1719 .name = "ip6-gtpu-bypass",
1720 .vector_size = sizeof (u32),
1722 .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1724 [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1725 [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu6-input",
1728 .format_buffer = format_ip6_header,
1729 .format_trace = format_ip6_forward_next_trace,
1732 #ifndef CLIB_MARCH_VARIANT
1733 /* Dummy init function to get us linked in. */
1734 clib_error_t * ip6_gtpu_bypass_init (vlib_main_t * vm)
1737 VLIB_INIT_FUNCTION (ip6_gtpu_bypass_init);
1739 #define foreach_gtpu_flow_error \
1740 _(NONE, "no error") \
1741 _(PAYLOAD_ERROR, "Payload type errors") \
1742 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
1743 _(IP_HEADER_ERROR, "Rx ip header errors") \
1744 _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
1745 _(UDP_LENGTH_ERROR, "Rx udp length errors")
1749 #define _(f,s) GTPU_FLOW_ERROR_##f,
1750 foreach_gtpu_flow_error
1752 #define gtpu_error(n,s) GTPU_FLOW_ERROR_##n,
1753 #include <gtpu/gtpu_error.def>
1756 } gtpu_flow_error_t;
1758 static char *gtpu_flow_error_strings[] = {
1760 foreach_gtpu_flow_error
1762 #define gtpu_error(n,s) s,
1763 #include <gtpu/gtpu_error.def>
1769 #define gtpu_local_need_csum_check(_b) \
1770 (!(_b->flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED || \
1771 (_b->flags & VNET_BUFFER_F_OFFLOAD && \
1772 vnet_buffer (_b)->oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)))
1774 #define gtpu_local_csum_is_valid(_b) \
1775 ((_b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT || \
1776 (_b->flags & VNET_BUFFER_F_OFFLOAD && \
1777 vnet_buffer (_b)->oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)) != 0)
1779 static_always_inline u8
1780 gtpu_validate_udp_csum (vlib_main_t * vm, vlib_buffer_t *b)
1782 u32 flags = b->flags;
1783 enum { offset = sizeof(ip4_header_t) + sizeof(udp_header_t)};
1785 /* Verify UDP checksum */
1786 if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1788 vlib_buffer_advance (b, -offset);
1789 flags = ip4_tcp_udp_validate_checksum (vm, b);
1790 vlib_buffer_advance (b, offset);
1793 return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1796 static_always_inline u8
1797 gtpu_check_ip (vlib_buffer_t *b, u16 payload_len)
1799 ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1800 sizeof(ip4_header_t) - sizeof(udp_header_t);
1801 u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1802 u16 expected = payload_len + sizeof(ip4_header_t) + sizeof(udp_header_t);
1803 return ip_len > expected || ip4_hdr->ttl == 0 || ip4_hdr->ip_version_and_header_length != 0x45;
1806 static_always_inline u8
1807 gtpu_check_ip_udp_len (vlib_buffer_t *b)
1809 ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1810 sizeof(ip4_header_t) - sizeof(udp_header_t);
1811 udp_header_t * udp_hdr = vlib_buffer_get_current(b) - sizeof(udp_header_t);
1812 u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1813 u16 udp_len = clib_net_to_host_u16 (udp_hdr->length);
1814 return udp_len > ip_len;
1817 static_always_inline u8
1818 gtpu_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
1820 u8 error0 = GTPU_FLOW_ERROR_NONE;
1822 error0 = GTPU_FLOW_ERROR_IP_HEADER_ERROR;
1824 error0 = GTPU_FLOW_ERROR_UDP_LENGTH_ERROR;
1826 error0 = GTPU_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1832 gtpu_flow_input (vlib_main_t * vm,
1833 vlib_node_runtime_t * node,
1834 vlib_frame_t * from_frame)
1836 u32 n_left_from, next_index, * from, * to_next;
1837 gtpu_main_t * gtm = >pu_main;
1838 vnet_main_t * vnm = gtm->vnet_main;
1839 vnet_interface_main_t * im = &vnm->interface_main;
1840 u32 pkts_decapsulated = 0;
1841 u32 thread_index = vlib_get_thread_index();
1842 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
1843 u8 ip_err0, ip_err1, udp_err0, udp_err1, csum_err0, csum_err1;
1845 from = vlib_frame_vector_args (from_frame);
1846 n_left_from = from_frame->n_vectors;
1848 next_index = node->cached_next_index;
1849 stats_sw_if_index = node->runtime_data[0];
1850 stats_n_packets = stats_n_bytes = 0;
1852 while (n_left_from > 0)
1856 vlib_get_next_frame (vm, node, next_index,
1857 to_next, n_left_to_next);
1859 while (n_left_from >= 4 && n_left_to_next >= 2)
1862 vlib_buffer_t * b0, * b1;
1864 gtpu_header_t * gtpu0, * gtpu1;
1865 u32 gtpu_hdr_len0, gtpu_hdr_len1;
1866 u32 tunnel_index0, tunnel_index1;
1867 gtpu_tunnel_t * t0, * t1;
1869 u32 sw_if_index0, sw_if_index1, len0, len1;
1870 u8 has_space0 = 0, has_space1 = 0;
1872 gtpu_ext_header_t ext = { .type = 0, .len = 0, .pad = 0 };
1873 gtpu_ext_header_t *ext0, *ext1;
1874 bool is_fast_track0, is_fast_track1;
1877 /* Prefetch next iteration. */
1879 vlib_buffer_t *p2, *p3;
1881 p2 = vlib_get_buffer (vm, from[2]);
1882 p3 = vlib_get_buffer (vm, from[3]);
1884 vlib_prefetch_buffer_header (p2, LOAD);
1885 vlib_prefetch_buffer_header (p3, LOAD);
1887 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
1888 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
1897 n_left_to_next -= 2;
1900 b0 = vlib_get_buffer (vm, bi0);
1901 b1 = vlib_get_buffer (vm, bi1);
1903 /* udp leaves current_data pointing at the gtpu header */
1904 gtpu0 = vlib_buffer_get_current (b0);
1905 gtpu1 = vlib_buffer_get_current (b1);
1907 len0 = vlib_buffer_length_in_chain (vm, b0);
1908 len1 = vlib_buffer_length_in_chain (vm, b1);
1916 ip_err0 = gtpu_check_ip (b0, len0);
1917 udp_err0 = gtpu_check_ip_udp_len (b0);
1918 ip_err1 = gtpu_check_ip (b1, len1);
1919 udp_err1 = gtpu_check_ip_udp_len (b1);
1921 if (PREDICT_FALSE (gtpu_local_need_csum_check (b0)))
1922 csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1924 csum_err0 = !gtpu_local_csum_is_valid (b0);
1925 if (PREDICT_FALSE (gtpu_local_need_csum_check (b1)))
1926 csum_err1 = !gtpu_validate_udp_csum (vm, b1);
1928 csum_err1 = !gtpu_local_csum_is_valid (b1);
1930 /* speculatively load gtp header version field */
1931 ver0 = gtpu0->ver_flags;
1932 ver1 = gtpu1->ver_flags;
1935 * Manipulate gtpu header
1936 * TBD: Manipulate Sequence Number and N-PDU Number
1937 * TBD: Manipulate Next Extension Header
1940 ((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) ==
1941 (GTPU_V1_VER | GTPU_PT_BIT));
1942 is_fast_track0 = is_fast_track0 & (gtpu0->type == 255);
1945 ((ver1 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) ==
1946 (GTPU_V1_VER | GTPU_PT_BIT));
1947 is_fast_track1 = is_fast_track1 & (gtpu1->type == 255);
1949 ext0 = (ver0 & GTPU_E_BIT) ?
1950 (gtpu_ext_header_t *) >pu0->next_ext_type :
1952 ext1 = (ver1 & GTPU_E_BIT) ?
1953 (gtpu_ext_header_t *) >pu1->next_ext_type :
1956 gtpu_hdr_len0 = sizeof (gtpu_header_t) -
1957 (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4) +
1959 gtpu_hdr_len1 = sizeof (gtpu_header_t) -
1960 (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4) +
1963 /* Only for clarity, will be optimized away */
1964 ext0 += ext0->len * 4 / sizeof (*ext0);
1965 ext1 += ext1->len * 4 / sizeof (*ext1);
1967 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1968 has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
1970 if (ip_err0 || udp_err0 || csum_err0)
1972 next0 = GTPU_INPUT_NEXT_DROP;
1973 error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1977 /* Diverge the packet paths for 0 and 1 */
1978 if (PREDICT_FALSE ((!is_fast_track0) | (!has_space0)))
1980 /* Not fast path. ext0 and gtpu_hdr_len0 might be wrong */
1982 /* GCC will hopefully fix the duplicate compute */
1984 !((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) ==
1985 (GTPU_V1_VER | GTPU_PT_BIT)) |
1988 /* The header or size is wrong */
1990 has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1991 next0 = GTPU_INPUT_NEXT_DROP;
1994 /* Correct version and has the space. It can only be unknown
1997 error0 = GTPU_ERROR_UNSUPPORTED_TYPE;
1998 next0 = GTPU_INPUT_NEXT_DROP;
2000 /* The packet is not forwarded */
2004 /* Manipulate packet 0 */
2005 ASSERT (b0->flow_id != 0);
2006 tunnel_index0 = b0->flow_id - gtm->flow_id_start;
2007 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
2010 /* Pop gtpu header */
2011 vlib_buffer_advance (b0, gtpu_hdr_len0);
2013 /* assign the next node */
2014 if (PREDICT_FALSE (t0->decap_next_index !=
2015 GTPU_INPUT_NEXT_IP4_INPUT) &&
2016 (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
2018 error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
2019 next0 = GTPU_INPUT_NEXT_DROP;
2022 next0 = t0->decap_next_index;
2024 sw_if_index0 = t0->sw_if_index;
2026 /* Set packet input sw_if_index to unicast GTPU tunnel for learning
2028 vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0;
2030 pkts_decapsulated++;
2031 stats_n_packets += 1;
2032 stats_n_bytes += len0;
2034 /* Batch stats increment on the same gtpu tunnel so counter
2035 is not incremented per packet */
2036 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
2038 stats_n_packets -= 1;
2039 stats_n_bytes -= len0;
2040 if (stats_n_packets)
2041 vlib_increment_combined_counter (
2042 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
2043 thread_index, stats_sw_if_index, stats_n_packets,
2045 stats_n_packets = 1;
2046 stats_n_bytes = len0;
2047 stats_sw_if_index = sw_if_index0;
2051 b0->error = error0 ? node->errors[error0] : 0;
2053 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
2056 = vlib_add_trace (vm, node, b0, sizeof (*tr));
2057 tr->next_index = next0;
2059 tr->tunnel_index = tunnel_index0;
2060 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
2061 if (vlib_buffer_has_space (b0, 4))
2063 tr->header.ver_flags = gtpu0->ver_flags;
2064 tr->header.type = gtpu0->type;
2065 tr->header.length = clib_net_to_host_u16 (gtpu0->length);
2069 if (ip_err1 || udp_err1 || csum_err1)
2071 next1 = GTPU_INPUT_NEXT_DROP;
2072 error1 = gtpu_err_code (ip_err1, udp_err1, csum_err1);
2077 * Manipulate gtpu header
2078 * TBD: Manipulate Sequence Number and N-PDU Number
2079 * TBD: Manipulate Next Extension Header
2081 if (PREDICT_FALSE ((!is_fast_track1) | (!has_space1)))
2083 /* Not fast path. ext1 and gtpu_hdr_len1 might be wrong */
2085 /* GCC will hopefully fix the duplicate compute */
2087 !((ver1 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) ==
2088 (GTPU_V1_VER | GTPU_PT_BIT)) |
2091 /* The header or size is wrong */
2093 has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
2094 next1 = GTPU_INPUT_NEXT_DROP;
2097 /* Correct version and has the space. It can only be unknown
2100 error1 = GTPU_ERROR_UNSUPPORTED_TYPE;
2101 next1 = GTPU_INPUT_NEXT_DROP;
2103 /* The packet is not forwarded */
2107 /* Manipulate packet 1 */
2108 ASSERT (b1->flow_id != 0);
2109 tunnel_index1 = b1->flow_id - gtm->flow_id_start;
2110 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
2113 /* Pop gtpu header */
2114 vlib_buffer_advance (b1, gtpu_hdr_len1);
2116 /* assign the next node */
2117 if (PREDICT_FALSE (t1->decap_next_index !=
2118 GTPU_INPUT_NEXT_IP4_INPUT) &&
2119 (t1->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
2121 error1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
2122 next1 = GTPU_INPUT_NEXT_DROP;
2125 next1 = t1->decap_next_index;
2127 sw_if_index1 = t1->sw_if_index;
2129 /* Required to make the l2 tag push / pop code work on l2 subifs */
2130 /* This won't happen in current implementation as only
2131 ipv4/udp/gtpu/IPV4 type packets can be matched */
2132 if (PREDICT_FALSE (next1 == GTPU_INPUT_NEXT_L2_INPUT))
2133 vnet_update_l2_len (b1);
2135 /* Set packet input sw_if_index to unicast GTPU tunnel for learning
2137 vnet_buffer (b1)->sw_if_index[VLIB_RX] = sw_if_index1;
2139 pkts_decapsulated++;
2140 stats_n_packets += 1;
2141 stats_n_bytes += len1;
2143 /* Batch stats increment on the same gtpu tunnel so counter
2144 is not incremented per packet */
2145 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
2147 stats_n_packets -= 1;
2148 stats_n_bytes -= len1;
2149 if (stats_n_packets)
2150 vlib_increment_combined_counter (
2151 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
2152 thread_index, stats_sw_if_index, stats_n_packets,
2154 stats_n_packets = 1;
2155 stats_n_bytes = len1;
2156 stats_sw_if_index = sw_if_index1;
2160 b1->error = error1 ? node->errors[error1] : 0;
2162 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
2165 = vlib_add_trace (vm, node, b1, sizeof (*tr));
2166 tr->next_index = next1;
2168 tr->tunnel_index = tunnel_index1;
2169 tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
2170 if (vlib_buffer_has_space (b1, 4))
2172 tr->header.ver_flags = gtpu1->ver_flags;
2173 tr->header.type = gtpu1->type;
2174 tr->header.length = clib_net_to_host_u16 (gtpu1->length);
2178 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
2179 n_left_to_next, bi0, bi1, next0,
2183 while (n_left_from > 0 && n_left_to_next > 0)
2188 gtpu_header_t * gtpu0;
2193 u32 sw_if_index0, len0;
2196 gtpu_ext_header_t ext = { .type = 0, .len = 0, .pad = 0 };
2197 gtpu_ext_header_t *ext0;
2198 bool is_fast_track0;
2206 n_left_to_next -= 1;
2208 b0 = vlib_get_buffer (vm, bi0);
2209 len0 = vlib_buffer_length_in_chain (vm, b0);
2214 ip_err0 = gtpu_check_ip (b0, len0);
2215 udp_err0 = gtpu_check_ip_udp_len (b0);
2216 if (PREDICT_FALSE (gtpu_local_need_csum_check (b0)))
2217 csum_err0 = !gtpu_validate_udp_csum (vm, b0);
2219 csum_err0 = !gtpu_local_csum_is_valid (b0);
2221 /* udp leaves current_data pointing at the gtpu header */
2222 gtpu0 = vlib_buffer_get_current (b0);
2224 /* speculatively load gtp header version field */
2225 ver0 = gtpu0->ver_flags;
2228 * Manipulate gtpu header
2229 * TBD: Manipulate Sequence Number and N-PDU Number
2230 * TBD: Manipulate Next Extension Header
2233 ((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) ==
2234 (GTPU_V1_VER | GTPU_PT_BIT));
2235 is_fast_track0 = is_fast_track0 & (gtpu0->type == 255);
2237 ext0 = (ver0 & GTPU_E_BIT) ?
2238 (gtpu_ext_header_t *) >pu0->next_ext_type :
2241 gtpu_hdr_len0 = sizeof (gtpu_header_t) -
2242 (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4) +
2244 ext0 += ext0->len * 4 / sizeof (*ext0);
2246 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
2248 if (ip_err0 || udp_err0 || csum_err0)
2250 next0 = GTPU_INPUT_NEXT_DROP;
2251 error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
2255 if (PREDICT_FALSE ((!is_fast_track0) | (!has_space0)))
2257 /* Not fast path. ext0 and gtpu_hdr_len0 might be wrong */
2259 /* GCC will hopefully fix the duplicate compute */
2261 !((ver0 & (GTPU_VER_MASK | GTPU_PT_BIT | GTPU_RES_BIT)) ==
2262 (GTPU_V1_VER | GTPU_PT_BIT)) |
2265 /* The header or size is wrong */
2267 has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
2268 next0 = GTPU_INPUT_NEXT_DROP;
2271 /* Correct version and has the space. It can only be unknown
2274 error0 = GTPU_ERROR_UNSUPPORTED_TYPE;
2275 next0 = GTPU_INPUT_NEXT_DROP;
2277 /* The packet is not forwarded */
2281 ASSERT (b0->flow_id != 0);
2282 tunnel_index0 = b0->flow_id - gtm->flow_id_start;
2283 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
2286 /* Pop gtpu header */
2287 vlib_buffer_advance (b0, gtpu_hdr_len0);
2289 /* assign the next node */
2290 if (PREDICT_FALSE (t0->decap_next_index !=
2291 GTPU_INPUT_NEXT_IP4_INPUT) &&
2292 (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
2294 error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
2295 next0 = GTPU_INPUT_NEXT_DROP;
2298 next0 = t0->decap_next_index;
2300 sw_if_index0 = t0->sw_if_index;
2302 /* Set packet input sw_if_index to unicast GTPU tunnel for learning
2304 vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0;
2306 pkts_decapsulated++;
2307 stats_n_packets += 1;
2308 stats_n_bytes += len0;
2310 /* Batch stats increment on the same gtpu tunnel so counter
2311 is not incremented per packet */
2312 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
2314 stats_n_packets -= 1;
2315 stats_n_bytes -= len0;
2316 if (stats_n_packets)
2317 vlib_increment_combined_counter (
2318 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
2319 thread_index, stats_sw_if_index, stats_n_packets,
2321 stats_n_packets = 1;
2322 stats_n_bytes = len0;
2323 stats_sw_if_index = sw_if_index0;
2326 b0->error = error0 ? node->errors[error0] : 0;
2328 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
2331 = vlib_add_trace (vm, node, b0, sizeof (*tr));
2332 tr->next_index = next0;
2334 tr->tunnel_index = tunnel_index0;
2335 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
2336 if (vlib_buffer_has_space (b0, 4))
2338 tr->header.ver_flags = gtpu0->ver_flags;
2339 tr->header.type = gtpu0->type;
2340 tr->header.length = clib_net_to_host_u16 (gtpu0->length);
2343 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2344 n_left_to_next, bi0, next0);
2347 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2350 /* Do we still need this now that tunnel tx stats is kept? */
2351 vlib_node_increment_counter (vm, gtpu4_flow_input_node.index,
2352 GTPU_ERROR_DECAPSULATED,
2355 /* Increment any remaining batch stats */
2356 if (stats_n_packets)
2358 vlib_increment_combined_counter
2359 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
2360 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
2361 node->runtime_data[0] = stats_sw_if_index;
2364 return from_frame->n_vectors;
2367 VLIB_NODE_FN (gtpu4_flow_input_node) (vlib_main_t * vm,
2368 vlib_node_runtime_t * node,
2369 vlib_frame_t * from_frame)
2371 return gtpu_flow_input(vm, node, from_frame);
2376 #ifndef CLIB_MULTIARCH_VARIANT
2377 VLIB_REGISTER_NODE (gtpu4_flow_input_node) = {
2378 .name = "gtpu4-flow-input",
2379 .type = VLIB_NODE_TYPE_INTERNAL,
2380 .vector_size = sizeof (u32),
2382 .format_trace = format_gtpu_rx_trace,
2384 .n_errors = GTPU_FLOW_N_ERROR,
2385 .error_strings = gtpu_flow_error_strings,
2387 .n_next_nodes = GTPU_INPUT_N_NEXT,
2389 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
2390 foreach_gtpu_input_next
2398 #endif /* CLIB_MARCH_VARIANT */