2 * decap.c: gtpu tunnel decap packet processing
4 * Copyright (c) 2017 Intel and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <gtpu/gtpu.h>
22 extern vlib_node_registration_t gtpu4_input_node;
23 extern vlib_node_registration_t gtpu6_input_node;
32 static u8 * format_gtpu_rx_trace (u8 * s, va_list * args)
34 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
35 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
36 gtpu_rx_trace_t * t = va_arg (*args, gtpu_rx_trace_t *);
38 if (t->tunnel_index != ~0)
40 s = format (s, "GTPU decap from gtpu_tunnel%d teid %d next %d error %d",
41 t->tunnel_index, t->teid, t->next_index, t->error);
45 s = format (s, "GTPU decap error - tunnel for teid %d does not exist",
52 validate_gtpu_fib (vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4)
54 u32 fib_index, sw_if_index;
56 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
59 fib_index = (vnet_buffer (b)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
60 vec_elt (ip4_main.fib_index_by_sw_if_index, sw_if_index) :
61 vnet_buffer (b)->sw_if_index[VLIB_TX];
63 fib_index = (vnet_buffer (b)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
64 vec_elt (ip6_main.fib_index_by_sw_if_index, sw_if_index) :
65 vnet_buffer (b)->sw_if_index[VLIB_TX];
67 return (fib_index == t->encap_fib_index);
71 gtpu_input (vlib_main_t * vm,
72 vlib_node_runtime_t * node,
73 vlib_frame_t * from_frame,
76 u32 n_left_from, next_index, * from, * to_next;
77 gtpu_main_t * gtm = >pu_main;
78 vnet_main_t * vnm = gtm->vnet_main;
79 vnet_interface_main_t * im = &vnm->interface_main;
80 u32 last_tunnel_index = ~0;
81 gtpu4_tunnel_key_t last_key4;
82 gtpu6_tunnel_key_t last_key6;
83 u32 pkts_decapsulated = 0;
84 u32 thread_index = vlib_get_thread_index();
85 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
88 last_key4.as_u64 = ~0;
90 clib_memset (&last_key6, 0xff, sizeof (last_key6));
92 from = vlib_frame_vector_args (from_frame);
93 n_left_from = from_frame->n_vectors;
95 next_index = node->cached_next_index;
96 stats_sw_if_index = node->runtime_data[0];
97 stats_n_packets = stats_n_bytes = 0;
99 while (n_left_from > 0)
103 vlib_get_next_frame (vm, node, next_index,
104 to_next, n_left_to_next);
105 while (n_left_from >= 4 && n_left_to_next >= 2)
108 vlib_buffer_t * b0, * b1;
110 ip4_header_t * ip4_0, * ip4_1;
111 ip6_header_t * ip6_0, * ip6_1;
112 gtpu_header_t * gtpu0, * gtpu1;
113 u32 gtpu_hdr_len0, gtpu_hdr_len1;
115 u32 tunnel_index0, tunnel_index1;
116 gtpu_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
117 gtpu4_tunnel_key_t key4_0, key4_1;
118 gtpu6_tunnel_key_t key6_0, key6_1;
120 u32 sw_if_index0, sw_if_index1, len0, len1;
121 u8 has_space0, has_space1;
124 /* Prefetch next iteration. */
126 vlib_buffer_t * p2, * p3;
128 p2 = vlib_get_buffer (vm, from[2]);
129 p3 = vlib_get_buffer (vm, from[3]);
131 vlib_prefetch_buffer_header (p2, LOAD);
132 vlib_prefetch_buffer_header (p3, LOAD);
134 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
135 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
147 b0 = vlib_get_buffer (vm, bi0);
148 b1 = vlib_get_buffer (vm, bi1);
150 /* udp leaves current_data pointing at the gtpu header */
151 gtpu0 = vlib_buffer_get_current (b0);
152 gtpu1 = vlib_buffer_get_current (b1);
155 ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
156 ip4_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip4_header_t));
160 ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
161 ip6_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip6_header_t));
170 /* speculatively load gtp header version field */
171 ver0 = gtpu0->ver_flags;
172 ver1 = gtpu1->ver_flags;
175 * Manipulate gtpu header
176 * TBD: Manipulate Sequence Number and N-PDU Number
177 * TBD: Manipulate Next Extension Header
179 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
180 gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
182 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
183 has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
185 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
187 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
188 next0 = GTPU_INPUT_NEXT_DROP;
192 /* Manipulate packet 0 */
194 key4_0.src = ip4_0->src_address.as_u32;
195 key4_0.teid = gtpu0->teid;
197 /* Make sure GTPU tunnel exist according to packet SIP and teid
198 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
199 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
201 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
202 if (PREDICT_FALSE (p0 == NULL))
204 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
205 next0 = GTPU_INPUT_NEXT_DROP;
208 last_key4.as_u64 = key4_0.as_u64;
209 tunnel_index0 = last_tunnel_index = p0[0];
212 tunnel_index0 = last_tunnel_index;
213 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
215 /* Validate GTPU tunnel encap-fib index against packet */
216 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
218 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
219 next0 = GTPU_INPUT_NEXT_DROP;
223 /* Validate GTPU tunnel SIP against packet DIP */
224 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
225 goto next0; /* valid packet */
226 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
228 key4_0.src = ip4_0->dst_address.as_u32;
229 key4_0.teid = gtpu0->teid;
230 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
231 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
232 if (PREDICT_TRUE (p0 != NULL))
234 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
235 goto next0; /* valid packet */
238 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
239 next0 = GTPU_INPUT_NEXT_DROP;
242 } else /* !is_ip4 */ {
243 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
244 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
245 key6_0.teid = gtpu0->teid;
247 /* Make sure GTPU tunnel exist according to packet SIP and teid
248 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
249 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
251 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
252 if (PREDICT_FALSE (p0 == NULL))
254 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
255 next0 = GTPU_INPUT_NEXT_DROP;
258 clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
259 tunnel_index0 = last_tunnel_index = p0[0];
262 tunnel_index0 = last_tunnel_index;
263 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
265 /* Validate GTPU tunnel encap-fib index against packet */
266 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
268 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
269 next0 = GTPU_INPUT_NEXT_DROP;
273 /* Validate GTPU tunnel SIP against packet DIP */
274 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
276 goto next0; /* valid packet */
277 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
279 key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
280 key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
281 key6_0.teid = gtpu0->teid;
282 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
283 if (PREDICT_TRUE (p0 != NULL))
285 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
286 goto next0; /* valid packet */
289 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
290 next0 = GTPU_INPUT_NEXT_DROP;
295 /* Pop gtpu header */
296 vlib_buffer_advance (b0, gtpu_hdr_len0);
298 next0 = t0->decap_next_index;
299 sw_if_index0 = t0->sw_if_index;
300 len0 = vlib_buffer_length_in_chain (vm, b0);
302 /* Required to make the l2 tag push / pop code work on l2 subifs */
303 if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
304 vnet_update_l2_len (b0);
306 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
307 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
308 sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
310 pkts_decapsulated ++;
311 stats_n_packets += 1;
312 stats_n_bytes += len0;
314 /* Batch stats increment on the same gtpu tunnel so counter
315 is not incremented per packet */
316 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
318 stats_n_packets -= 1;
319 stats_n_bytes -= len0;
321 vlib_increment_combined_counter
322 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
323 thread_index, stats_sw_if_index,
324 stats_n_packets, stats_n_bytes);
326 stats_n_bytes = len0;
327 stats_sw_if_index = sw_if_index0;
331 b0->error = error0 ? node->errors[error0] : 0;
333 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
336 = vlib_add_trace (vm, node, b0, sizeof (*tr));
337 tr->next_index = next0;
339 tr->tunnel_index = tunnel_index0;
340 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
343 if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
345 error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
346 next1 = GTPU_INPUT_NEXT_DROP;
350 /* Manipulate packet 1 */
352 key4_1.src = ip4_1->src_address.as_u32;
353 key4_1.teid = gtpu1->teid;
355 /* Make sure GTPU tunnel exist according to packet SIP and teid
356 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
357 if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
359 p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
360 if (PREDICT_FALSE (p1 == NULL))
362 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
363 next1 = GTPU_INPUT_NEXT_DROP;
366 last_key4.as_u64 = key4_1.as_u64;
367 tunnel_index1 = last_tunnel_index = p1[0];
370 tunnel_index1 = last_tunnel_index;
371 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
373 /* Validate GTPU tunnel encap-fib index against packet */
374 if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
376 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
377 next1 = GTPU_INPUT_NEXT_DROP;
381 /* Validate GTPU tunnel SIP against packet DIP */
382 if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
383 goto next1; /* valid packet */
384 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_1->dst_address)))
386 key4_1.src = ip4_1->dst_address.as_u32;
387 key4_1.teid = gtpu1->teid;
388 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
389 p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
390 if (PREDICT_TRUE (p1 != NULL))
392 mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
393 goto next1; /* valid packet */
396 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
397 next1 = GTPU_INPUT_NEXT_DROP;
400 } else /* !is_ip4 */ {
401 key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
402 key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
403 key6_1.teid = gtpu1->teid;
405 /* Make sure GTPU tunnel exist according to packet SIP and teid
406 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
407 if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
409 p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
411 if (PREDICT_FALSE (p1 == NULL))
413 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
414 next1 = GTPU_INPUT_NEXT_DROP;
418 clib_memcpy_fast (&last_key6, &key6_1, sizeof(key6_1));
419 tunnel_index1 = last_tunnel_index = p1[0];
422 tunnel_index1 = last_tunnel_index;
423 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
425 /* Validate GTPU tunnel encap-fib index against packet */
426 if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
428 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
429 next1 = GTPU_INPUT_NEXT_DROP;
433 /* Validate GTPU tunnel SIP against packet DIP */
434 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_1->dst_address,
436 goto next1; /* valid packet */
437 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_1->dst_address)))
439 key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
440 key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
441 key6_1.teid = gtpu1->teid;
442 p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
443 if (PREDICT_TRUE (p1 != NULL))
445 mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
446 goto next1; /* valid packet */
449 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
450 next1 = GTPU_INPUT_NEXT_DROP;
455 /* Pop gtpu header */
456 vlib_buffer_advance (b1, gtpu_hdr_len1);
458 next1 = t1->decap_next_index;
459 sw_if_index1 = t1->sw_if_index;
460 len1 = vlib_buffer_length_in_chain (vm, b1);
462 /* Required to make the l2 tag push / pop code work on l2 subifs */
463 if (PREDICT_TRUE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
464 vnet_update_l2_len (b1);
466 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
467 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
468 sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
470 pkts_decapsulated ++;
471 stats_n_packets += 1;
472 stats_n_bytes += len1;
474 /* Batch stats increment on the same gtpu tunnel so counter
475 is not incremented per packet */
476 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
478 stats_n_packets -= 1;
479 stats_n_bytes -= len1;
481 vlib_increment_combined_counter
482 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
483 thread_index, stats_sw_if_index,
484 stats_n_packets, stats_n_bytes);
486 stats_n_bytes = len1;
487 stats_sw_if_index = sw_if_index1;
491 b1->error = error1 ? node->errors[error1] : 0;
493 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
496 = vlib_add_trace (vm, node, b1, sizeof (*tr));
497 tr->next_index = next1;
499 tr->tunnel_index = tunnel_index1;
500 tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
503 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
504 to_next, n_left_to_next,
505 bi0, bi1, next0, next1);
508 while (n_left_from > 0 && n_left_to_next > 0)
513 ip4_header_t * ip4_0;
514 ip6_header_t * ip6_0;
515 gtpu_header_t * gtpu0;
519 gtpu_tunnel_t * t0, * mt0 = NULL;
520 gtpu4_tunnel_key_t key4_0;
521 gtpu6_tunnel_key_t key6_0;
523 u32 sw_if_index0, len0;
534 b0 = vlib_get_buffer (vm, bi0);
536 /* udp leaves current_data pointing at the gtpu header */
537 gtpu0 = vlib_buffer_get_current (b0);
539 ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
541 ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
547 /* speculatively load gtp header version field */
548 ver0 = gtpu0->ver_flags;
551 * Manipulate gtpu header
552 * TBD: Manipulate Sequence Number and N-PDU Number
553 * TBD: Manipulate Next Extension Header
555 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
557 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
559 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
561 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
562 next0 = GTPU_INPUT_NEXT_DROP;
567 key4_0.src = ip4_0->src_address.as_u32;
568 key4_0.teid = gtpu0->teid;
570 /* Make sure GTPU tunnel exist according to packet SIP and teid
571 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
572 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
574 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
575 if (PREDICT_FALSE (p0 == NULL))
577 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
578 next0 = GTPU_INPUT_NEXT_DROP;
581 last_key4.as_u64 = key4_0.as_u64;
582 tunnel_index0 = last_tunnel_index = p0[0];
585 tunnel_index0 = last_tunnel_index;
586 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
588 /* Validate GTPU tunnel encap-fib index against packet */
589 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
591 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
592 next0 = GTPU_INPUT_NEXT_DROP;
596 /* Validate GTPU tunnel SIP against packet DIP */
597 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
598 goto next00; /* valid packet */
599 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
601 key4_0.src = ip4_0->dst_address.as_u32;
602 key4_0.teid = gtpu0->teid;
603 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
604 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
605 if (PREDICT_TRUE (p0 != NULL))
607 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
608 goto next00; /* valid packet */
611 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
612 next0 = GTPU_INPUT_NEXT_DROP;
615 } else /* !is_ip4 */ {
616 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
617 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
618 key6_0.teid = gtpu0->teid;
620 /* Make sure GTPU tunnel exist according to packet SIP and teid
621 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
622 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
624 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
625 if (PREDICT_FALSE (p0 == NULL))
627 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
628 next0 = GTPU_INPUT_NEXT_DROP;
631 clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
632 tunnel_index0 = last_tunnel_index = p0[0];
635 tunnel_index0 = last_tunnel_index;
636 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
638 /* Validate GTPU tunnel encap-fib index against packet */
639 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
641 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
642 next0 = GTPU_INPUT_NEXT_DROP;
646 /* Validate GTPU tunnel SIP against packet DIP */
647 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
649 goto next00; /* valid packet */
650 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
652 key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
653 key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
654 key6_0.teid = gtpu0->teid;
655 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
656 if (PREDICT_TRUE (p0 != NULL))
658 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
659 goto next00; /* valid packet */
662 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
663 next0 = GTPU_INPUT_NEXT_DROP;
668 /* Pop gtpu header */
669 vlib_buffer_advance (b0, gtpu_hdr_len0);
671 next0 = t0->decap_next_index;
672 sw_if_index0 = t0->sw_if_index;
673 len0 = vlib_buffer_length_in_chain (vm, b0);
675 /* Required to make the l2 tag push / pop code work on l2 subifs */
676 if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
677 vnet_update_l2_len (b0);
679 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
680 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
681 sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
683 pkts_decapsulated ++;
684 stats_n_packets += 1;
685 stats_n_bytes += len0;
687 /* Batch stats increment on the same gtpu tunnel so counter
688 is not incremented per packet */
689 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
691 stats_n_packets -= 1;
692 stats_n_bytes -= len0;
694 vlib_increment_combined_counter
695 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
696 thread_index, stats_sw_if_index,
697 stats_n_packets, stats_n_bytes);
699 stats_n_bytes = len0;
700 stats_sw_if_index = sw_if_index0;
704 b0->error = error0 ? node->errors[error0] : 0;
706 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
709 = vlib_add_trace (vm, node, b0, sizeof (*tr));
710 tr->next_index = next0;
712 tr->tunnel_index = tunnel_index0;
713 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
715 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
716 to_next, n_left_to_next,
720 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
722 /* Do we still need this now that tunnel tx stats is kept? */
723 vlib_node_increment_counter (vm, is_ip4?
724 gtpu4_input_node.index:gtpu6_input_node.index,
725 GTPU_ERROR_DECAPSULATED,
728 /* Increment any remaining batch stats */
731 vlib_increment_combined_counter
732 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
733 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
734 node->runtime_data[0] = stats_sw_if_index;
737 return from_frame->n_vectors;
740 VLIB_NODE_FN (gtpu4_input_node) (vlib_main_t * vm,
741 vlib_node_runtime_t * node,
742 vlib_frame_t * from_frame)
744 return gtpu_input(vm, node, from_frame, /* is_ip4 */ 1);
747 VLIB_NODE_FN (gtpu6_input_node) (vlib_main_t * vm,
748 vlib_node_runtime_t * node,
749 vlib_frame_t * from_frame)
751 return gtpu_input(vm, node, from_frame, /* is_ip4 */ 0);
754 static char * gtpu_error_strings[] = {
755 #define gtpu_error(n,s) s,
756 #include <gtpu/gtpu_error.def>
761 VLIB_REGISTER_NODE (gtpu4_input_node) = {
762 .name = "gtpu4-input",
763 /* Takes a vector of packets. */
764 .vector_size = sizeof (u32),
766 .n_errors = GTPU_N_ERROR,
767 .error_strings = gtpu_error_strings,
769 .n_next_nodes = GTPU_INPUT_N_NEXT,
771 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
772 foreach_gtpu_input_next
776 //temp .format_buffer = format_gtpu_header,
777 .format_trace = format_gtpu_rx_trace,
778 // $$$$ .unformat_buffer = unformat_gtpu_header,
781 VLIB_REGISTER_NODE (gtpu6_input_node) = {
782 .name = "gtpu6-input",
783 /* Takes a vector of packets. */
784 .vector_size = sizeof (u32),
786 .n_errors = GTPU_N_ERROR,
787 .error_strings = gtpu_error_strings,
789 .n_next_nodes = GTPU_INPUT_N_NEXT,
791 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
792 foreach_gtpu_input_next
796 //temp .format_buffer = format_gtpu_header,
797 .format_trace = format_gtpu_rx_trace,
798 // $$$$ .unformat_buffer = unformat_gtpu_header,
802 IP_GTPU_BYPASS_NEXT_DROP,
803 IP_GTPU_BYPASS_NEXT_GTPU,
804 IP_GTPU_BYPASS_N_NEXT,
805 } ip_vxan_bypass_next_t;
808 ip_gtpu_bypass_inline (vlib_main_t * vm,
809 vlib_node_runtime_t * node,
810 vlib_frame_t * frame,
813 gtpu_main_t * gtm = >pu_main;
814 u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
815 vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
816 ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
817 ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
819 from = vlib_frame_vector_args (frame);
820 n_left_from = frame->n_vectors;
821 next_index = node->cached_next_index;
823 if (node->flags & VLIB_NODE_FLAG_TRACE)
824 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
826 if (is_ip4) addr4.data_u32 = ~0;
827 else ip6_address_set_zero (&addr6);
829 while (n_left_from > 0)
831 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
833 while (n_left_from >= 4 && n_left_to_next >= 2)
835 vlib_buffer_t * b0, * b1;
836 ip4_header_t * ip40, * ip41;
837 ip6_header_t * ip60, * ip61;
838 udp_header_t * udp0, * udp1;
839 u32 bi0, ip_len0, udp_len0, flags0, next0;
840 u32 bi1, ip_len1, udp_len1, flags1, next1;
841 i32 len_diff0, len_diff1;
842 u8 error0, good_udp0, proto0;
843 u8 error1, good_udp1, proto1;
845 /* Prefetch next iteration. */
847 vlib_buffer_t * p2, * p3;
849 p2 = vlib_get_buffer (vm, from[2]);
850 p3 = vlib_get_buffer (vm, from[3]);
852 vlib_prefetch_buffer_header (p2, LOAD);
853 vlib_prefetch_buffer_header (p3, LOAD);
855 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
856 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
859 bi0 = to_next[0] = from[0];
860 bi1 = to_next[1] = from[1];
866 b0 = vlib_get_buffer (vm, bi0);
867 b1 = vlib_get_buffer (vm, bi1);
870 ip40 = vlib_buffer_get_current (b0);
871 ip41 = vlib_buffer_get_current (b1);
875 ip60 = vlib_buffer_get_current (b0);
876 ip61 = vlib_buffer_get_current (b1);
879 /* Setup packet for next IP feature */
880 vnet_feature_next(&next0, b0);
881 vnet_feature_next(&next1, b1);
885 /* Treat IP frag packets as "experimental" protocol for now
886 until support of IP frag reassembly is implemented */
887 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
888 proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
892 proto0 = ip60->protocol;
893 proto1 = ip61->protocol;
896 /* Process packet 0 */
897 if (proto0 != IP_PROTOCOL_UDP)
898 goto exit0; /* not UDP packet */
901 udp0 = ip4_next_header (ip40);
903 udp0 = ip6_next_header (ip60);
905 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
906 goto exit0; /* not GTPU packet */
908 /* Validate DIP against VTEPs*/
911 if (addr4.as_u32 != ip40->dst_address.as_u32)
913 if (!hash_get (gtm->vtep4, ip40->dst_address.as_u32))
914 goto exit0; /* no local VTEP for GTPU packet */
915 addr4 = ip40->dst_address;
920 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
922 if (!hash_get_mem (gtm->vtep6, &ip60->dst_address))
923 goto exit0; /* no local VTEP for GTPU packet */
924 addr6 = ip60->dst_address;
929 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
931 /* Don't verify UDP checksum for packets with explicit zero checksum. */
932 good_udp0 |= udp0->checksum == 0;
934 /* Verify UDP length */
936 ip_len0 = clib_net_to_host_u16 (ip40->length);
938 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
939 udp_len0 = clib_net_to_host_u16 (udp0->length);
940 len_diff0 = ip_len0 - udp_len0;
942 /* Verify UDP checksum */
943 if (PREDICT_FALSE (!good_udp0))
945 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
948 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
950 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
952 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
958 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
959 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
963 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
964 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
968 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
969 b0->error = error0 ? error_node->errors[error0] : 0;
971 /* gtpu-input node expect current at GTPU header */
973 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
975 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
978 /* Process packet 1 */
979 if (proto1 != IP_PROTOCOL_UDP)
980 goto exit1; /* not UDP packet */
983 udp1 = ip4_next_header (ip41);
985 udp1 = ip6_next_header (ip61);
987 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
988 goto exit1; /* not GTPU packet */
990 /* Validate DIP against VTEPs*/
993 if (addr4.as_u32 != ip41->dst_address.as_u32)
995 if (!hash_get (gtm->vtep4, ip41->dst_address.as_u32))
996 goto exit1; /* no local VTEP for GTPU packet */
997 addr4 = ip41->dst_address;
1002 if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
1004 if (!hash_get_mem (gtm->vtep6, &ip61->dst_address))
1005 goto exit1; /* no local VTEP for GTPU packet */
1006 addr6 = ip61->dst_address;
1011 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1013 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1014 good_udp1 |= udp1->checksum == 0;
1016 /* Verify UDP length */
1018 ip_len1 = clib_net_to_host_u16 (ip41->length);
1020 ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1021 udp_len1 = clib_net_to_host_u16 (udp1->length);
1022 len_diff1 = ip_len1 - udp_len1;
1024 /* Verify UDP checksum */
1025 if (PREDICT_FALSE (!good_udp1))
1027 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1030 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1032 flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1034 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1040 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1041 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1045 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1046 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1050 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
1051 b1->error = error1 ? error_node->errors[error1] : 0;
1053 /* gtpu-input node expect current at GTPU header */
1055 vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1057 vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1060 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1061 to_next, n_left_to_next,
1062 bi0, bi1, next0, next1);
1065 while (n_left_from > 0 && n_left_to_next > 0)
1068 ip4_header_t * ip40;
1069 ip6_header_t * ip60;
1070 udp_header_t * udp0;
1071 u32 bi0, ip_len0, udp_len0, flags0, next0;
1073 u8 error0, good_udp0, proto0;
1075 bi0 = to_next[0] = from[0];
1079 n_left_to_next -= 1;
1081 b0 = vlib_get_buffer (vm, bi0);
1083 ip40 = vlib_buffer_get_current (b0);
1085 ip60 = vlib_buffer_get_current (b0);
1087 /* Setup packet for next IP feature */
1088 vnet_feature_next(&next0, b0);
1091 /* Treat IP4 frag packets as "experimental" protocol for now
1092 until support of IP frag reassembly is implemented */
1093 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1095 proto0 = ip60->protocol;
1097 if (proto0 != IP_PROTOCOL_UDP)
1098 goto exit; /* not UDP packet */
1101 udp0 = ip4_next_header (ip40);
1103 udp0 = ip6_next_header (ip60);
1105 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1106 goto exit; /* not GTPU packet */
1108 /* Validate DIP against VTEPs*/
1111 if (addr4.as_u32 != ip40->dst_address.as_u32)
1113 if (!hash_get (gtm->vtep4, ip40->dst_address.as_u32))
1114 goto exit; /* no local VTEP for GTPU packet */
1115 addr4 = ip40->dst_address;
1120 if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
1122 if (!hash_get_mem (gtm->vtep6, &ip60->dst_address))
1123 goto exit; /* no local VTEP for GTPU packet */
1124 addr6 = ip60->dst_address;
1129 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1131 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1132 good_udp0 |= udp0->checksum == 0;
1134 /* Verify UDP length */
1136 ip_len0 = clib_net_to_host_u16 (ip40->length);
1138 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1139 udp_len0 = clib_net_to_host_u16 (udp0->length);
1140 len_diff0 = ip_len0 - udp_len0;
1142 /* Verify UDP checksum */
1143 if (PREDICT_FALSE (!good_udp0))
1145 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1148 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1150 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1152 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1158 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1159 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1163 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1164 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1168 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
1169 b0->error = error0 ? error_node->errors[error0] : 0;
1171 /* gtpu-input node expect current at GTPU header */
1173 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1175 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1178 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1179 to_next, n_left_to_next,
1183 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1186 return frame->n_vectors;
1189 VLIB_NODE_FN (ip4_gtpu_bypass_node) (vlib_main_t * vm,
1190 vlib_node_runtime_t * node,
1191 vlib_frame_t * frame)
1193 return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1196 VLIB_REGISTER_NODE (ip4_gtpu_bypass_node) = {
1197 .name = "ip4-gtpu-bypass",
1198 .vector_size = sizeof (u32),
1200 .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1202 [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1203 [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu4-input",
1206 .format_buffer = format_ip4_header,
1207 .format_trace = format_ip4_forward_next_trace,
1210 #ifndef CLIB_MARCH_VARIANT
1211 /* Dummy init function to get us linked in. */
1212 clib_error_t * ip4_gtpu_bypass_init (vlib_main_t * vm)
1215 VLIB_INIT_FUNCTION (ip4_gtpu_bypass_init);
1216 #endif /* CLIB_MARCH_VARIANT */
1218 VLIB_NODE_FN (ip6_gtpu_bypass_node) (vlib_main_t * vm,
1219 vlib_node_runtime_t * node,
1220 vlib_frame_t * frame)
1222 return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1225 VLIB_REGISTER_NODE (ip6_gtpu_bypass_node) = {
1226 .name = "ip6-gtpu-bypass",
1227 .vector_size = sizeof (u32),
1229 .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1231 [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1232 [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu6-input",
1235 .format_buffer = format_ip6_header,
1236 .format_trace = format_ip6_forward_next_trace,
1239 #ifndef CLIB_MARCH_VARIANT
1240 /* Dummy init function to get us linked in. */
1241 clib_error_t * ip6_gtpu_bypass_init (vlib_main_t * vm)
1244 VLIB_INIT_FUNCTION (ip6_gtpu_bypass_init);
1246 #define foreach_gtpu_flow_error \
1247 _(NONE, "no error") \
1248 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
1249 _(IP_HEADER_ERROR, "Rx ip header errors") \
1250 _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
1251 _(UDP_LENGTH_ERROR, "Rx udp length errors")
1255 #define _(f,s) GTPU_FLOW_ERROR_##f,
1256 foreach_gtpu_flow_error
1258 #define gtpu_error(n,s) GTPU_FLOW_ERROR_##n,
1259 #include <gtpu/gtpu_error.def>
1262 } gtpu_flow_error_t;
1264 static char *gtpu_flow_error_strings[] = {
1266 foreach_gtpu_flow_error
1268 #define gtpu_error(n,s) s,
1269 #include <gtpu/gtpu_error.def>
1275 #define gtpu_local_need_csum_check(_b) \
1276 (!(_b->flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED \
1277 || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
1279 #define gtpu_local_csum_is_valid(_b) \
1280 ((_b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT \
1281 || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) != 0)
1283 static_always_inline u8
1284 gtpu_validate_udp_csum (vlib_main_t * vm, vlib_buffer_t *b)
1286 u32 flags = b->flags;
1287 enum { offset = sizeof(ip4_header_t) + sizeof(udp_header_t)};
1289 /* Verify UDP checksum */
1290 if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1292 vlib_buffer_advance (b, -offset);
1293 flags = ip4_tcp_udp_validate_checksum (vm, b);
1294 vlib_buffer_advance (b, offset);
1297 return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1300 static_always_inline u8
1301 gtpu_check_ip (vlib_buffer_t *b, u16 payload_len)
1303 ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1304 sizeof(ip4_header_t) - sizeof(udp_header_t);
1305 u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1306 u16 expected = payload_len + sizeof(ip4_header_t) + sizeof(udp_header_t);
1307 return ip_len > expected || ip4_hdr->ttl == 0 || ip4_hdr->ip_version_and_header_length != 0x45;
1310 static_always_inline u8
1311 gtpu_check_ip_udp_len (vlib_buffer_t *b)
1313 ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1314 sizeof(ip4_header_t) - sizeof(udp_header_t);
1315 udp_header_t * udp_hdr = vlib_buffer_get_current(b) - sizeof(udp_header_t);
1316 u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1317 u16 udp_len = clib_net_to_host_u16 (udp_hdr->length);
1318 return udp_len > ip_len;
1321 static_always_inline u8
1322 gtpu_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
1324 u8 error0 = GTPU_FLOW_ERROR_NONE;
1326 error0 = GTPU_FLOW_ERROR_IP_HEADER_ERROR;
1328 error0 = GTPU_FLOW_ERROR_UDP_LENGTH_ERROR;
1330 error0 = GTPU_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1336 gtpu_flow_input (vlib_main_t * vm,
1337 vlib_node_runtime_t * node,
1338 vlib_frame_t * from_frame)
1340 u32 n_left_from, next_index, * from, * to_next;
1341 gtpu_main_t * gtm = >pu_main;
1342 vnet_main_t * vnm = gtm->vnet_main;
1343 vnet_interface_main_t * im = &vnm->interface_main;
1344 u32 pkts_decapsulated = 0;
1345 u32 thread_index = vlib_get_thread_index();
1346 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
1347 u8 ip_err0, ip_err1, udp_err0, udp_err1, csum_err0, csum_err1;
1349 from = vlib_frame_vector_args (from_frame);
1350 n_left_from = from_frame->n_vectors;
1352 next_index = node->cached_next_index;
1353 stats_sw_if_index = node->runtime_data[0];
1354 stats_n_packets = stats_n_bytes = 0;
1356 while (n_left_from > 0)
1360 vlib_get_next_frame (vm, node, next_index,
1361 to_next, n_left_to_next);
1363 while (n_left_from >= 4 && n_left_to_next >= 2)
1366 vlib_buffer_t * b0, * b1;
1368 gtpu_header_t * gtpu0, * gtpu1;
1369 u32 gtpu_hdr_len0, gtpu_hdr_len1;
1370 u32 tunnel_index0, tunnel_index1;
1371 gtpu_tunnel_t * t0, * t1;
1373 u32 sw_if_index0, sw_if_index1, len0, len1;
1374 u8 has_space0 = 0, has_space1 = 0;
1377 /* Prefetch next iteration. */
1379 vlib_buffer_t * p2, * p3;
1381 p2 = vlib_get_buffer (vm, from[2]);
1382 p3 = vlib_get_buffer (vm, from[3]);
1384 vlib_prefetch_buffer_header (p2, LOAD);
1385 vlib_prefetch_buffer_header (p3, LOAD);
1387 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1388 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1397 n_left_to_next -= 2;
1400 b0 = vlib_get_buffer (vm, bi0);
1401 b1 = vlib_get_buffer (vm, bi1);
1403 /* udp leaves current_data pointing at the gtpu header */
1404 gtpu0 = vlib_buffer_get_current (b0);
1405 gtpu1 = vlib_buffer_get_current (b1);
1407 len0 = vlib_buffer_length_in_chain (vm, b0);
1408 len1 = vlib_buffer_length_in_chain (vm, b1);
1416 ip_err0 = gtpu_check_ip (b0, len0);
1417 udp_err0 = gtpu_check_ip_udp_len (b0);
1418 ip_err1 = gtpu_check_ip (b1, len1);
1419 udp_err1 = gtpu_check_ip_udp_len (b1);
1421 if (PREDICT_FALSE (gtpu_local_need_csum_check (b0)))
1422 csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1424 csum_err0 = !gtpu_local_csum_is_valid (b0);
1425 if (PREDICT_FALSE (gtpu_local_need_csum_check (b1)))
1426 csum_err1 = !gtpu_validate_udp_csum (vm, b1);
1428 csum_err1 = !gtpu_local_csum_is_valid (b1);
1430 if (ip_err0 || udp_err0 || csum_err0)
1432 next0 = GTPU_INPUT_NEXT_DROP;
1433 error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1437 /* speculatively load gtp header version field */
1438 ver0 = gtpu0->ver_flags;
1441 * Manipulate gtpu header
1442 * TBD: Manipulate Sequence Number and N-PDU Number
1443 * TBD: Manipulate Next Extension Header
1445 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1447 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1448 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1450 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1451 next0 = GTPU_INPUT_NEXT_DROP;
1455 /* Manipulate packet 0 */
1456 ASSERT (b0->flow_id != 0);
1457 tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1458 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1461 /* Pop gtpu header */
1462 vlib_buffer_advance (b0, gtpu_hdr_len0);
1464 next0 = GTPU_INPUT_NEXT_IP4_INPUT;
1465 sw_if_index0 = t0->sw_if_index;
1467 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1468 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1470 pkts_decapsulated ++;
1471 stats_n_packets += 1;
1472 stats_n_bytes += len0;
1474 /* Batch stats increment on the same gtpu tunnel so counter
1475 is not incremented per packet */
1476 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1478 stats_n_packets -= 1;
1479 stats_n_bytes -= len0;
1480 if (stats_n_packets)
1481 vlib_increment_combined_counter
1482 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1483 thread_index, stats_sw_if_index,
1484 stats_n_packets, stats_n_bytes);
1485 stats_n_packets = 1;
1486 stats_n_bytes = len0;
1487 stats_sw_if_index = sw_if_index0;
1491 b0->error = error0 ? node->errors[error0] : 0;
1493 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1496 = vlib_add_trace (vm, node, b0, sizeof (*tr));
1497 tr->next_index = next0;
1499 tr->tunnel_index = tunnel_index0;
1500 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1503 if (ip_err1 || udp_err1 || csum_err1)
1505 next1 = GTPU_INPUT_NEXT_DROP;
1506 error1 = gtpu_err_code (ip_err1, udp_err1, csum_err1);
1510 /* speculatively load gtp header version field */
1511 ver1 = gtpu1->ver_flags;
1514 * Manipulate gtpu header
1515 * TBD: Manipulate Sequence Number and N-PDU Number
1516 * TBD: Manipulate Next Extension Header
1518 gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
1519 has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
1520 if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
1522 error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1523 next1 = GTPU_INPUT_NEXT_DROP;
1527 /* Manipulate packet 1 */
1528 ASSERT (b1->flow_id != 0);
1529 tunnel_index1 = b1->flow_id - gtm->flow_id_start;
1530 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
1533 /* Pop gtpu header */
1534 vlib_buffer_advance (b1, gtpu_hdr_len1);
1536 next1 = GTPU_INPUT_NEXT_IP4_INPUT;
1537 sw_if_index1 = t1->sw_if_index;
1539 /* Required to make the l2 tag push / pop code work on l2 subifs */
1540 /* This won't happen in current implementation as only
1541 ipv4/udp/gtpu/IPV4 type packets can be matched */
1542 if (PREDICT_FALSE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
1543 vnet_update_l2_len (b1);
1545 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1546 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
1548 pkts_decapsulated ++;
1549 stats_n_packets += 1;
1550 stats_n_bytes += len1;
1552 /* Batch stats increment on the same gtpu tunnel so counter
1553 is not incremented per packet */
1554 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
1556 stats_n_packets -= 1;
1557 stats_n_bytes -= len1;
1558 if (stats_n_packets)
1559 vlib_increment_combined_counter
1560 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1561 thread_index, stats_sw_if_index,
1562 stats_n_packets, stats_n_bytes);
1563 stats_n_packets = 1;
1564 stats_n_bytes = len1;
1565 stats_sw_if_index = sw_if_index1;
1569 b1->error = error1 ? node->errors[error1] : 0;
1571 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
1574 = vlib_add_trace (vm, node, b1, sizeof (*tr));
1575 tr->next_index = next1;
1577 tr->tunnel_index = tunnel_index1;
1578 tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
1581 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1582 to_next, n_left_to_next,
1583 bi0, bi1, next0, next1);
1586 while (n_left_from > 0 && n_left_to_next > 0)
1591 gtpu_header_t * gtpu0;
1596 u32 sw_if_index0, len0;
1605 n_left_to_next -= 1;
1607 b0 = vlib_get_buffer (vm, bi0);
1608 len0 = vlib_buffer_length_in_chain (vm, b0);
1613 ip_err0 = gtpu_check_ip (b0, len0);
1614 udp_err0 = gtpu_check_ip_udp_len (b0);
1615 if (PREDICT_FALSE (gtpu_local_need_csum_check (b0)))
1616 csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1618 csum_err0 = !gtpu_local_csum_is_valid (b0);
1620 if (ip_err0 || udp_err0 || csum_err0)
1622 next0 = GTPU_INPUT_NEXT_DROP;
1623 error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1627 /* udp leaves current_data pointing at the gtpu header */
1628 gtpu0 = vlib_buffer_get_current (b0);
1630 /* speculatively load gtp header version field */
1631 ver0 = gtpu0->ver_flags;
1634 * Manipulate gtpu header
1635 * TBD: Manipulate Sequence Number and N-PDU Number
1636 * TBD: Manipulate Next Extension Header
1638 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1640 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1641 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1643 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1644 next0 = GTPU_INPUT_NEXT_DROP;
1648 ASSERT (b0->flow_id != 0);
1649 tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1650 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1653 /* Pop gtpu header */
1654 vlib_buffer_advance (b0, gtpu_hdr_len0);
1656 next0 = GTPU_INPUT_NEXT_IP4_INPUT;
1657 sw_if_index0 = t0->sw_if_index;
1659 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1660 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1662 pkts_decapsulated ++;
1663 stats_n_packets += 1;
1664 stats_n_bytes += len0;
1666 /* Batch stats increment on the same gtpu tunnel so counter
1667 is not incremented per packet */
1668 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1670 stats_n_packets -= 1;
1671 stats_n_bytes -= len0;
1672 if (stats_n_packets)
1673 vlib_increment_combined_counter
1674 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1675 thread_index, stats_sw_if_index,
1676 stats_n_packets, stats_n_bytes);
1677 stats_n_packets = 1;
1678 stats_n_bytes = len0;
1679 stats_sw_if_index = sw_if_index0;
1682 b0->error = error0 ? node->errors[error0] : 0;
1684 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1687 = vlib_add_trace (vm, node, b0, sizeof (*tr));
1688 tr->next_index = next0;
1690 tr->tunnel_index = tunnel_index0;
1691 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1693 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1694 to_next, n_left_to_next,
1698 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1701 /* Do we still need this now that tunnel tx stats is kept? */
1702 vlib_node_increment_counter (vm, gtpu4_flow_input_node.index,
1703 GTPU_ERROR_DECAPSULATED,
1706 /* Increment any remaining batch stats */
1707 if (stats_n_packets)
1709 vlib_increment_combined_counter
1710 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1711 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1712 node->runtime_data[0] = stats_sw_if_index;
1715 return from_frame->n_vectors;
1718 VLIB_NODE_FN (gtpu4_flow_input_node) (vlib_main_t * vm,
1719 vlib_node_runtime_t * node,
1720 vlib_frame_t * from_frame)
1722 return gtpu_flow_input(vm, node, from_frame);
1727 #ifndef CLIB_MULTIARCH_VARIANT
1728 VLIB_REGISTER_NODE (gtpu4_flow_input_node) = {
1729 .name = "gtpu4-flow-input",
1730 .type = VLIB_NODE_TYPE_INTERNAL,
1731 .vector_size = sizeof (u32),
1733 .format_trace = format_gtpu_rx_trace,
1735 .n_errors = GTPU_FLOW_N_ERROR,
1736 .error_strings = gtpu_flow_error_strings,
1738 .n_next_nodes = GTPU_INPUT_N_NEXT,
1740 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
1741 foreach_gtpu_input_next
1749 #endif /* CLIB_MARCH_VARIANT */