2 * decap.c: gtpu tunnel decap packet processing
4 * Copyright (c) 2017 Intel and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <gtpu/gtpu.h>
22 extern vlib_node_registration_t gtpu4_input_node;
23 extern vlib_node_registration_t gtpu6_input_node;
32 static u8 * format_gtpu_rx_trace (u8 * s, va_list * args)
34 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
35 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
36 gtpu_rx_trace_t * t = va_arg (*args, gtpu_rx_trace_t *);
38 if (t->tunnel_index != ~0)
40 s = format (s, "GTPU decap from gtpu_tunnel%d teid %d next %d error %d",
41 t->tunnel_index, t->teid, t->next_index, t->error);
45 s = format (s, "GTPU decap error - tunnel for teid %d does not exist",
52 validate_gtpu_fib (vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4)
54 return t->encap_fib_index == vlib_buffer_get_ip_fib_index (b, is_ip4);
58 gtpu_input (vlib_main_t * vm,
59 vlib_node_runtime_t * node,
60 vlib_frame_t * from_frame,
63 u32 n_left_from, next_index, * from, * to_next;
64 gtpu_main_t * gtm = >pu_main;
65 vnet_main_t * vnm = gtm->vnet_main;
66 vnet_interface_main_t * im = &vnm->interface_main;
67 u32 last_tunnel_index = ~0;
68 gtpu4_tunnel_key_t last_key4;
69 gtpu6_tunnel_key_t last_key6;
70 u32 pkts_decapsulated = 0;
71 u32 thread_index = vlib_get_thread_index();
72 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
75 last_key4.as_u64 = ~0;
77 clib_memset (&last_key6, 0xff, sizeof (last_key6));
79 from = vlib_frame_vector_args (from_frame);
80 n_left_from = from_frame->n_vectors;
82 next_index = node->cached_next_index;
83 stats_sw_if_index = node->runtime_data[0];
84 stats_n_packets = stats_n_bytes = 0;
86 while (n_left_from > 0)
90 vlib_get_next_frame (vm, node, next_index,
91 to_next, n_left_to_next);
92 while (n_left_from >= 4 && n_left_to_next >= 2)
95 vlib_buffer_t * b0, * b1;
97 ip4_header_t * ip4_0, * ip4_1;
98 ip6_header_t * ip6_0, * ip6_1;
99 gtpu_header_t * gtpu0, * gtpu1;
100 u32 gtpu_hdr_len0, gtpu_hdr_len1;
102 u32 tunnel_index0, tunnel_index1;
103 gtpu_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
104 gtpu4_tunnel_key_t key4_0, key4_1;
105 gtpu6_tunnel_key_t key6_0, key6_1;
107 u32 sw_if_index0, sw_if_index1, len0, len1;
108 u8 has_space0, has_space1;
111 /* Prefetch next iteration. */
113 vlib_buffer_t * p2, * p3;
115 p2 = vlib_get_buffer (vm, from[2]);
116 p3 = vlib_get_buffer (vm, from[3]);
118 vlib_prefetch_buffer_header (p2, LOAD);
119 vlib_prefetch_buffer_header (p3, LOAD);
121 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
122 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
134 b0 = vlib_get_buffer (vm, bi0);
135 b1 = vlib_get_buffer (vm, bi1);
137 /* udp leaves current_data pointing at the gtpu header */
138 gtpu0 = vlib_buffer_get_current (b0);
139 gtpu1 = vlib_buffer_get_current (b1);
142 ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
143 ip4_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip4_header_t));
147 ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
148 ip6_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip6_header_t));
157 /* speculatively load gtp header version field */
158 ver0 = gtpu0->ver_flags;
159 ver1 = gtpu1->ver_flags;
162 * Manipulate gtpu header
163 * TBD: Manipulate Sequence Number and N-PDU Number
164 * TBD: Manipulate Next Extension Header
166 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
167 gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
169 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
170 has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
172 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
174 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
175 next0 = GTPU_INPUT_NEXT_DROP;
179 /* Manipulate packet 0 */
181 key4_0.src = ip4_0->src_address.as_u32;
182 key4_0.teid = gtpu0->teid;
184 /* Make sure GTPU tunnel exist according to packet SIP and teid
185 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
186 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
188 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
189 if (PREDICT_FALSE (p0 == NULL))
191 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
192 next0 = GTPU_INPUT_NEXT_DROP;
195 last_key4.as_u64 = key4_0.as_u64;
196 tunnel_index0 = last_tunnel_index = p0[0];
199 tunnel_index0 = last_tunnel_index;
200 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
202 /* Validate GTPU tunnel encap-fib index against packet */
203 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
205 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
206 next0 = GTPU_INPUT_NEXT_DROP;
210 /* Validate GTPU tunnel SIP against packet DIP */
211 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
212 goto next0; /* valid packet */
213 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
215 key4_0.src = ip4_0->dst_address.as_u32;
216 key4_0.teid = gtpu0->teid;
217 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
218 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
219 if (PREDICT_TRUE (p0 != NULL))
221 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
222 goto next0; /* valid packet */
225 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
226 next0 = GTPU_INPUT_NEXT_DROP;
229 } else /* !is_ip4 */ {
230 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
231 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
232 key6_0.teid = gtpu0->teid;
234 /* Make sure GTPU tunnel exist according to packet SIP and teid
235 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
236 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
238 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
239 if (PREDICT_FALSE (p0 == NULL))
241 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
242 next0 = GTPU_INPUT_NEXT_DROP;
245 clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
246 tunnel_index0 = last_tunnel_index = p0[0];
249 tunnel_index0 = last_tunnel_index;
250 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
252 /* Validate GTPU tunnel encap-fib index against packet */
253 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
255 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
256 next0 = GTPU_INPUT_NEXT_DROP;
260 /* Validate GTPU tunnel SIP against packet DIP */
261 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
263 goto next0; /* valid packet */
264 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
266 key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
267 key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
268 key6_0.teid = gtpu0->teid;
269 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
270 if (PREDICT_TRUE (p0 != NULL))
272 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
273 goto next0; /* valid packet */
276 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
277 next0 = GTPU_INPUT_NEXT_DROP;
282 /* Pop gtpu header */
283 vlib_buffer_advance (b0, gtpu_hdr_len0);
285 next0 = t0->decap_next_index;
286 sw_if_index0 = t0->sw_if_index;
287 len0 = vlib_buffer_length_in_chain (vm, b0);
289 /* Required to make the l2 tag push / pop code work on l2 subifs */
290 if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
291 vnet_update_l2_len (b0);
293 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
294 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
295 sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
297 pkts_decapsulated ++;
298 stats_n_packets += 1;
299 stats_n_bytes += len0;
301 /* Batch stats increment on the same gtpu tunnel so counter
302 is not incremented per packet */
303 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
305 stats_n_packets -= 1;
306 stats_n_bytes -= len0;
308 vlib_increment_combined_counter
309 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
310 thread_index, stats_sw_if_index,
311 stats_n_packets, stats_n_bytes);
313 stats_n_bytes = len0;
314 stats_sw_if_index = sw_if_index0;
318 b0->error = error0 ? node->errors[error0] : 0;
320 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
323 = vlib_add_trace (vm, node, b0, sizeof (*tr));
324 tr->next_index = next0;
326 tr->tunnel_index = tunnel_index0;
327 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
330 if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
332 error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
333 next1 = GTPU_INPUT_NEXT_DROP;
337 /* Manipulate packet 1 */
339 key4_1.src = ip4_1->src_address.as_u32;
340 key4_1.teid = gtpu1->teid;
342 /* Make sure GTPU tunnel exist according to packet SIP and teid
343 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
344 if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
346 p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
347 if (PREDICT_FALSE (p1 == NULL))
349 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
350 next1 = GTPU_INPUT_NEXT_DROP;
353 last_key4.as_u64 = key4_1.as_u64;
354 tunnel_index1 = last_tunnel_index = p1[0];
357 tunnel_index1 = last_tunnel_index;
358 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
360 /* Validate GTPU tunnel encap-fib index against packet */
361 if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
363 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
364 next1 = GTPU_INPUT_NEXT_DROP;
368 /* Validate GTPU tunnel SIP against packet DIP */
369 if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
370 goto next1; /* valid packet */
371 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_1->dst_address)))
373 key4_1.src = ip4_1->dst_address.as_u32;
374 key4_1.teid = gtpu1->teid;
375 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
376 p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
377 if (PREDICT_TRUE (p1 != NULL))
379 mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
380 goto next1; /* valid packet */
383 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
384 next1 = GTPU_INPUT_NEXT_DROP;
387 } else /* !is_ip4 */ {
388 key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
389 key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
390 key6_1.teid = gtpu1->teid;
392 /* Make sure GTPU tunnel exist according to packet SIP and teid
393 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
394 if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
396 p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
398 if (PREDICT_FALSE (p1 == NULL))
400 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
401 next1 = GTPU_INPUT_NEXT_DROP;
405 clib_memcpy_fast (&last_key6, &key6_1, sizeof(key6_1));
406 tunnel_index1 = last_tunnel_index = p1[0];
409 tunnel_index1 = last_tunnel_index;
410 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
412 /* Validate GTPU tunnel encap-fib index against packet */
413 if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
415 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
416 next1 = GTPU_INPUT_NEXT_DROP;
420 /* Validate GTPU tunnel SIP against packet DIP */
421 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_1->dst_address,
423 goto next1; /* valid packet */
424 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_1->dst_address)))
426 key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
427 key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
428 key6_1.teid = gtpu1->teid;
429 p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
430 if (PREDICT_TRUE (p1 != NULL))
432 mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
433 goto next1; /* valid packet */
436 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
437 next1 = GTPU_INPUT_NEXT_DROP;
442 /* Pop gtpu header */
443 vlib_buffer_advance (b1, gtpu_hdr_len1);
445 next1 = t1->decap_next_index;
446 sw_if_index1 = t1->sw_if_index;
447 len1 = vlib_buffer_length_in_chain (vm, b1);
449 /* Required to make the l2 tag push / pop code work on l2 subifs */
450 if (PREDICT_TRUE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
451 vnet_update_l2_len (b1);
453 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
454 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
455 sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
457 pkts_decapsulated ++;
458 stats_n_packets += 1;
459 stats_n_bytes += len1;
461 /* Batch stats increment on the same gtpu tunnel so counter
462 is not incremented per packet */
463 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
465 stats_n_packets -= 1;
466 stats_n_bytes -= len1;
468 vlib_increment_combined_counter
469 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
470 thread_index, stats_sw_if_index,
471 stats_n_packets, stats_n_bytes);
473 stats_n_bytes = len1;
474 stats_sw_if_index = sw_if_index1;
478 b1->error = error1 ? node->errors[error1] : 0;
480 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
483 = vlib_add_trace (vm, node, b1, sizeof (*tr));
484 tr->next_index = next1;
486 tr->tunnel_index = tunnel_index1;
487 tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
490 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
491 to_next, n_left_to_next,
492 bi0, bi1, next0, next1);
495 while (n_left_from > 0 && n_left_to_next > 0)
500 ip4_header_t * ip4_0;
501 ip6_header_t * ip6_0;
502 gtpu_header_t * gtpu0;
506 gtpu_tunnel_t * t0, * mt0 = NULL;
507 gtpu4_tunnel_key_t key4_0;
508 gtpu6_tunnel_key_t key6_0;
510 u32 sw_if_index0, len0;
521 b0 = vlib_get_buffer (vm, bi0);
523 /* udp leaves current_data pointing at the gtpu header */
524 gtpu0 = vlib_buffer_get_current (b0);
526 ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
528 ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
534 /* speculatively load gtp header version field */
535 ver0 = gtpu0->ver_flags;
538 * Manipulate gtpu header
539 * TBD: Manipulate Sequence Number and N-PDU Number
540 * TBD: Manipulate Next Extension Header
542 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
544 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
546 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
548 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
549 next0 = GTPU_INPUT_NEXT_DROP;
554 key4_0.src = ip4_0->src_address.as_u32;
555 key4_0.teid = gtpu0->teid;
557 /* Make sure GTPU tunnel exist according to packet SIP and teid
558 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
559 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
561 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
562 if (PREDICT_FALSE (p0 == NULL))
564 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
565 next0 = GTPU_INPUT_NEXT_DROP;
568 last_key4.as_u64 = key4_0.as_u64;
569 tunnel_index0 = last_tunnel_index = p0[0];
572 tunnel_index0 = last_tunnel_index;
573 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
575 /* Validate GTPU tunnel encap-fib index against packet */
576 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
578 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
579 next0 = GTPU_INPUT_NEXT_DROP;
583 /* Validate GTPU tunnel SIP against packet DIP */
584 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
585 goto next00; /* valid packet */
586 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
588 key4_0.src = ip4_0->dst_address.as_u32;
589 key4_0.teid = gtpu0->teid;
590 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
591 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
592 if (PREDICT_TRUE (p0 != NULL))
594 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
595 goto next00; /* valid packet */
598 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
599 next0 = GTPU_INPUT_NEXT_DROP;
602 } else /* !is_ip4 */ {
603 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
604 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
605 key6_0.teid = gtpu0->teid;
607 /* Make sure GTPU tunnel exist according to packet SIP and teid
608 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
609 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
611 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
612 if (PREDICT_FALSE (p0 == NULL))
614 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
615 next0 = GTPU_INPUT_NEXT_DROP;
618 clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
619 tunnel_index0 = last_tunnel_index = p0[0];
622 tunnel_index0 = last_tunnel_index;
623 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
625 /* Validate GTPU tunnel encap-fib index against packet */
626 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
628 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
629 next0 = GTPU_INPUT_NEXT_DROP;
633 /* Validate GTPU tunnel SIP against packet DIP */
634 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
636 goto next00; /* valid packet */
637 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
639 key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
640 key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
641 key6_0.teid = gtpu0->teid;
642 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
643 if (PREDICT_TRUE (p0 != NULL))
645 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
646 goto next00; /* valid packet */
649 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
650 next0 = GTPU_INPUT_NEXT_DROP;
655 /* Pop gtpu header */
656 vlib_buffer_advance (b0, gtpu_hdr_len0);
658 next0 = t0->decap_next_index;
659 sw_if_index0 = t0->sw_if_index;
660 len0 = vlib_buffer_length_in_chain (vm, b0);
662 /* Required to make the l2 tag push / pop code work on l2 subifs */
663 if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
664 vnet_update_l2_len (b0);
666 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
667 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
668 sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
670 pkts_decapsulated ++;
671 stats_n_packets += 1;
672 stats_n_bytes += len0;
674 /* Batch stats increment on the same gtpu tunnel so counter
675 is not incremented per packet */
676 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
678 stats_n_packets -= 1;
679 stats_n_bytes -= len0;
681 vlib_increment_combined_counter
682 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
683 thread_index, stats_sw_if_index,
684 stats_n_packets, stats_n_bytes);
686 stats_n_bytes = len0;
687 stats_sw_if_index = sw_if_index0;
691 b0->error = error0 ? node->errors[error0] : 0;
693 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
696 = vlib_add_trace (vm, node, b0, sizeof (*tr));
697 tr->next_index = next0;
699 tr->tunnel_index = tunnel_index0;
700 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
702 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
703 to_next, n_left_to_next,
707 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
709 /* Do we still need this now that tunnel tx stats is kept? */
710 vlib_node_increment_counter (vm, is_ip4?
711 gtpu4_input_node.index:gtpu6_input_node.index,
712 GTPU_ERROR_DECAPSULATED,
715 /* Increment any remaining batch stats */
718 vlib_increment_combined_counter
719 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
720 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
721 node->runtime_data[0] = stats_sw_if_index;
724 return from_frame->n_vectors;
727 VLIB_NODE_FN (gtpu4_input_node) (vlib_main_t * vm,
728 vlib_node_runtime_t * node,
729 vlib_frame_t * from_frame)
731 return gtpu_input(vm, node, from_frame, /* is_ip4 */ 1);
734 VLIB_NODE_FN (gtpu6_input_node) (vlib_main_t * vm,
735 vlib_node_runtime_t * node,
736 vlib_frame_t * from_frame)
738 return gtpu_input(vm, node, from_frame, /* is_ip4 */ 0);
741 static char * gtpu_error_strings[] = {
742 #define gtpu_error(n,s) s,
743 #include <gtpu/gtpu_error.def>
748 VLIB_REGISTER_NODE (gtpu4_input_node) = {
749 .name = "gtpu4-input",
750 /* Takes a vector of packets. */
751 .vector_size = sizeof (u32),
753 .n_errors = GTPU_N_ERROR,
754 .error_strings = gtpu_error_strings,
756 .n_next_nodes = GTPU_INPUT_N_NEXT,
758 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
759 foreach_gtpu_input_next
763 //temp .format_buffer = format_gtpu_header,
764 .format_trace = format_gtpu_rx_trace,
765 // $$$$ .unformat_buffer = unformat_gtpu_header,
768 VLIB_REGISTER_NODE (gtpu6_input_node) = {
769 .name = "gtpu6-input",
770 /* Takes a vector of packets. */
771 .vector_size = sizeof (u32),
773 .n_errors = GTPU_N_ERROR,
774 .error_strings = gtpu_error_strings,
776 .n_next_nodes = GTPU_INPUT_N_NEXT,
778 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
779 foreach_gtpu_input_next
783 //temp .format_buffer = format_gtpu_header,
784 .format_trace = format_gtpu_rx_trace,
785 // $$$$ .unformat_buffer = unformat_gtpu_header,
789 IP_GTPU_BYPASS_NEXT_DROP,
790 IP_GTPU_BYPASS_NEXT_GTPU,
791 IP_GTPU_BYPASS_N_NEXT,
792 } ip_vxan_bypass_next_t;
795 ip_gtpu_bypass_inline (vlib_main_t * vm,
796 vlib_node_runtime_t * node,
797 vlib_frame_t * frame,
800 gtpu_main_t * gtm = >pu_main;
801 u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
802 vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
803 vtep4_key_t last_vtep4; /* last IPv4 address / fib index
804 matching a local VTEP address */
805 vtep6_key_t last_vtep6; /* last IPv6 address / fib index
806 matching a local VTEP address */
807 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
808 #ifdef CLIB_HAVE_VEC512
809 vtep4_cache_t vtep4_u512;
810 clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
813 from = vlib_frame_vector_args (frame);
814 n_left_from = frame->n_vectors;
815 next_index = node->cached_next_index;
816 vlib_get_buffers (vm, from, bufs, n_left_from);
818 if (node->flags & VLIB_NODE_FLAG_TRACE)
819 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
822 vtep4_key_init (&last_vtep4);
824 vtep6_key_init (&last_vtep6);
826 while (n_left_from > 0)
828 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
830 while (n_left_from >= 4 && n_left_to_next >= 2)
832 vlib_buffer_t * b0, * b1;
833 ip4_header_t * ip40, * ip41;
834 ip6_header_t * ip60, * ip61;
835 udp_header_t * udp0, * udp1;
836 u32 bi0, ip_len0, udp_len0, flags0, next0;
837 u32 bi1, ip_len1, udp_len1, flags1, next1;
838 i32 len_diff0, len_diff1;
839 u8 error0, good_udp0, proto0;
840 u8 error1, good_udp1, proto1;
842 /* Prefetch next iteration. */
844 vlib_prefetch_buffer_header (b[2], LOAD);
845 vlib_prefetch_buffer_header (b[3], LOAD);
847 CLIB_PREFETCH (b[2]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
848 CLIB_PREFETCH (b[3]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
851 bi0 = to_next[0] = from[0];
852 bi1 = to_next[1] = from[1];
863 ip40 = vlib_buffer_get_current (b0);
864 ip41 = vlib_buffer_get_current (b1);
868 ip60 = vlib_buffer_get_current (b0);
869 ip61 = vlib_buffer_get_current (b1);
872 /* Setup packet for next IP feature */
873 vnet_feature_next(&next0, b0);
874 vnet_feature_next(&next1, b1);
878 /* Treat IP frag packets as "experimental" protocol for now
879 until support of IP frag reassembly is implemented */
880 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
881 proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
885 proto0 = ip60->protocol;
886 proto1 = ip61->protocol;
889 /* Process packet 0 */
890 if (proto0 != IP_PROTOCOL_UDP)
891 goto exit0; /* not UDP packet */
894 udp0 = ip4_next_header (ip40);
896 udp0 = ip6_next_header (ip60);
898 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
899 goto exit0; /* not GTPU packet */
901 /* Validate DIP against VTEPs*/
904 #ifdef CLIB_HAVE_VEC512
905 if (!vtep4_check_vector
906 (>m->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
908 if (!vtep4_check (>m->vtep_table, b0, ip40, &last_vtep4))
910 goto exit0; /* no local VTEP for GTPU packet */
914 if (!vtep6_check (>m->vtep_table, b0, ip60, &last_vtep6))
915 goto exit0; /* no local VTEP for GTPU packet */
919 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
921 /* Don't verify UDP checksum for packets with explicit zero checksum. */
922 good_udp0 |= udp0->checksum == 0;
924 /* Verify UDP length */
926 ip_len0 = clib_net_to_host_u16 (ip40->length);
928 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
929 udp_len0 = clib_net_to_host_u16 (udp0->length);
930 len_diff0 = ip_len0 - udp_len0;
932 /* Verify UDP checksum */
933 if (PREDICT_FALSE (!good_udp0))
935 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
938 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
940 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
942 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
948 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
949 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
953 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
954 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
958 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
959 b0->error = error0 ? error_node->errors[error0] : 0;
961 /* gtpu-input node expect current at GTPU header */
963 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
965 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
968 /* Process packet 1 */
969 if (proto1 != IP_PROTOCOL_UDP)
970 goto exit1; /* not UDP packet */
973 udp1 = ip4_next_header (ip41);
975 udp1 = ip6_next_header (ip61);
977 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
978 goto exit1; /* not GTPU packet */
980 /* Validate DIP against VTEPs*/
983 #ifdef CLIB_HAVE_VEC512
984 if (!vtep4_check_vector
985 (>m->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
987 if (!vtep4_check (>m->vtep_table, b1, ip41, &last_vtep4))
989 goto exit1; /* no local VTEP for GTPU packet */
993 if (!vtep6_check (>m->vtep_table, b1, ip61, &last_vtep6))
994 goto exit1; /* no local VTEP for GTPU packet */
998 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1000 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1001 good_udp1 |= udp1->checksum == 0;
1003 /* Verify UDP length */
1005 ip_len1 = clib_net_to_host_u16 (ip41->length);
1007 ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1008 udp_len1 = clib_net_to_host_u16 (udp1->length);
1009 len_diff1 = ip_len1 - udp_len1;
1011 /* Verify UDP checksum */
1012 if (PREDICT_FALSE (!good_udp1))
1014 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1017 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1019 flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1021 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1027 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1028 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1032 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1033 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1037 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
1038 b1->error = error1 ? error_node->errors[error1] : 0;
1040 /* gtpu-input node expect current at GTPU header */
1042 vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1044 vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1047 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1048 to_next, n_left_to_next,
1049 bi0, bi1, next0, next1);
1052 while (n_left_from > 0 && n_left_to_next > 0)
1055 ip4_header_t * ip40;
1056 ip6_header_t * ip60;
1057 udp_header_t * udp0;
1058 u32 bi0, ip_len0, udp_len0, flags0, next0;
1060 u8 error0, good_udp0, proto0;
1062 bi0 = to_next[0] = from[0];
1066 n_left_to_next -= 1;
1071 ip40 = vlib_buffer_get_current (b0);
1073 ip60 = vlib_buffer_get_current (b0);
1075 /* Setup packet for next IP feature */
1076 vnet_feature_next(&next0, b0);
1079 /* Treat IP4 frag packets as "experimental" protocol for now
1080 until support of IP frag reassembly is implemented */
1081 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1083 proto0 = ip60->protocol;
1085 if (proto0 != IP_PROTOCOL_UDP)
1086 goto exit; /* not UDP packet */
1089 udp0 = ip4_next_header (ip40);
1091 udp0 = ip6_next_header (ip60);
1093 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1094 goto exit; /* not GTPU packet */
1096 /* Validate DIP against VTEPs*/
1099 #ifdef CLIB_HAVE_VEC512
1100 if (!vtep4_check_vector
1101 (>m->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
1103 if (!vtep4_check (>m->vtep_table, b0, ip40, &last_vtep4))
1105 goto exit; /* no local VTEP for GTPU packet */
1109 if (!vtep6_check (>m->vtep_table, b0, ip60, &last_vtep6))
1110 goto exit; /* no local VTEP for GTPU packet */
1114 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1116 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1117 good_udp0 |= udp0->checksum == 0;
1119 /* Verify UDP length */
1121 ip_len0 = clib_net_to_host_u16 (ip40->length);
1123 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1124 udp_len0 = clib_net_to_host_u16 (udp0->length);
1125 len_diff0 = ip_len0 - udp_len0;
1127 /* Verify UDP checksum */
1128 if (PREDICT_FALSE (!good_udp0))
1130 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1133 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1135 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1137 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1143 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1144 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1148 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1149 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1153 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
1154 b0->error = error0 ? error_node->errors[error0] : 0;
1156 /* gtpu-input node expect current at GTPU header */
1158 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1160 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1163 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1164 to_next, n_left_to_next,
1168 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1171 return frame->n_vectors;
1174 VLIB_NODE_FN (ip4_gtpu_bypass_node) (vlib_main_t * vm,
1175 vlib_node_runtime_t * node,
1176 vlib_frame_t * frame)
1178 return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1181 VLIB_REGISTER_NODE (ip4_gtpu_bypass_node) = {
1182 .name = "ip4-gtpu-bypass",
1183 .vector_size = sizeof (u32),
1185 .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1187 [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1188 [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu4-input",
1191 .format_buffer = format_ip4_header,
1192 .format_trace = format_ip4_forward_next_trace,
1195 #ifndef CLIB_MARCH_VARIANT
1196 /* Dummy init function to get us linked in. */
1197 clib_error_t * ip4_gtpu_bypass_init (vlib_main_t * vm)
1200 VLIB_INIT_FUNCTION (ip4_gtpu_bypass_init);
1201 #endif /* CLIB_MARCH_VARIANT */
1203 VLIB_NODE_FN (ip6_gtpu_bypass_node) (vlib_main_t * vm,
1204 vlib_node_runtime_t * node,
1205 vlib_frame_t * frame)
1207 return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1210 VLIB_REGISTER_NODE (ip6_gtpu_bypass_node) = {
1211 .name = "ip6-gtpu-bypass",
1212 .vector_size = sizeof (u32),
1214 .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1216 [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1217 [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu6-input",
1220 .format_buffer = format_ip6_header,
1221 .format_trace = format_ip6_forward_next_trace,
1224 #ifndef CLIB_MARCH_VARIANT
1225 /* Dummy init function to get us linked in. */
1226 clib_error_t * ip6_gtpu_bypass_init (vlib_main_t * vm)
1229 VLIB_INIT_FUNCTION (ip6_gtpu_bypass_init);
1231 #define foreach_gtpu_flow_error \
1232 _(NONE, "no error") \
1233 _(PAYLOAD_ERROR, "Payload type errors") \
1234 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
1235 _(IP_HEADER_ERROR, "Rx ip header errors") \
1236 _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
1237 _(UDP_LENGTH_ERROR, "Rx udp length errors")
1241 #define _(f,s) GTPU_FLOW_ERROR_##f,
1242 foreach_gtpu_flow_error
1244 #define gtpu_error(n,s) GTPU_FLOW_ERROR_##n,
1245 #include <gtpu/gtpu_error.def>
1248 } gtpu_flow_error_t;
1250 static char *gtpu_flow_error_strings[] = {
1252 foreach_gtpu_flow_error
1254 #define gtpu_error(n,s) s,
1255 #include <gtpu/gtpu_error.def>
1261 #define gtpu_local_need_csum_check(_b) \
1262 (!(_b->flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED \
1263 || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
1265 #define gtpu_local_csum_is_valid(_b) \
1266 ((_b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT \
1267 || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) != 0)
1269 static_always_inline u8
1270 gtpu_validate_udp_csum (vlib_main_t * vm, vlib_buffer_t *b)
1272 u32 flags = b->flags;
1273 enum { offset = sizeof(ip4_header_t) + sizeof(udp_header_t)};
1275 /* Verify UDP checksum */
1276 if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1278 vlib_buffer_advance (b, -offset);
1279 flags = ip4_tcp_udp_validate_checksum (vm, b);
1280 vlib_buffer_advance (b, offset);
1283 return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1286 static_always_inline u8
1287 gtpu_check_ip (vlib_buffer_t *b, u16 payload_len)
1289 ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1290 sizeof(ip4_header_t) - sizeof(udp_header_t);
1291 u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1292 u16 expected = payload_len + sizeof(ip4_header_t) + sizeof(udp_header_t);
1293 return ip_len > expected || ip4_hdr->ttl == 0 || ip4_hdr->ip_version_and_header_length != 0x45;
1296 static_always_inline u8
1297 gtpu_check_ip_udp_len (vlib_buffer_t *b)
1299 ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1300 sizeof(ip4_header_t) - sizeof(udp_header_t);
1301 udp_header_t * udp_hdr = vlib_buffer_get_current(b) - sizeof(udp_header_t);
1302 u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1303 u16 udp_len = clib_net_to_host_u16 (udp_hdr->length);
1304 return udp_len > ip_len;
1307 static_always_inline u8
1308 gtpu_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
1310 u8 error0 = GTPU_FLOW_ERROR_NONE;
1312 error0 = GTPU_FLOW_ERROR_IP_HEADER_ERROR;
1314 error0 = GTPU_FLOW_ERROR_UDP_LENGTH_ERROR;
1316 error0 = GTPU_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1322 gtpu_flow_input (vlib_main_t * vm,
1323 vlib_node_runtime_t * node,
1324 vlib_frame_t * from_frame)
1326 u32 n_left_from, next_index, * from, * to_next;
1327 gtpu_main_t * gtm = >pu_main;
1328 vnet_main_t * vnm = gtm->vnet_main;
1329 vnet_interface_main_t * im = &vnm->interface_main;
1330 u32 pkts_decapsulated = 0;
1331 u32 thread_index = vlib_get_thread_index();
1332 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
1333 u8 ip_err0, ip_err1, udp_err0, udp_err1, csum_err0, csum_err1;
1335 from = vlib_frame_vector_args (from_frame);
1336 n_left_from = from_frame->n_vectors;
1338 next_index = node->cached_next_index;
1339 stats_sw_if_index = node->runtime_data[0];
1340 stats_n_packets = stats_n_bytes = 0;
1342 while (n_left_from > 0)
1346 vlib_get_next_frame (vm, node, next_index,
1347 to_next, n_left_to_next);
1349 while (n_left_from >= 4 && n_left_to_next >= 2)
1352 vlib_buffer_t * b0, * b1;
1354 gtpu_header_t * gtpu0, * gtpu1;
1355 u32 gtpu_hdr_len0, gtpu_hdr_len1;
1356 u32 tunnel_index0, tunnel_index1;
1357 gtpu_tunnel_t * t0, * t1;
1359 u32 sw_if_index0, sw_if_index1, len0, len1;
1360 u8 has_space0 = 0, has_space1 = 0;
1363 /* Prefetch next iteration. */
1365 vlib_buffer_t * p2, * p3;
1367 p2 = vlib_get_buffer (vm, from[2]);
1368 p3 = vlib_get_buffer (vm, from[3]);
1370 vlib_prefetch_buffer_header (p2, LOAD);
1371 vlib_prefetch_buffer_header (p3, LOAD);
1373 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1374 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1383 n_left_to_next -= 2;
1386 b0 = vlib_get_buffer (vm, bi0);
1387 b1 = vlib_get_buffer (vm, bi1);
1389 /* udp leaves current_data pointing at the gtpu header */
1390 gtpu0 = vlib_buffer_get_current (b0);
1391 gtpu1 = vlib_buffer_get_current (b1);
1393 len0 = vlib_buffer_length_in_chain (vm, b0);
1394 len1 = vlib_buffer_length_in_chain (vm, b1);
1402 ip_err0 = gtpu_check_ip (b0, len0);
1403 udp_err0 = gtpu_check_ip_udp_len (b0);
1404 ip_err1 = gtpu_check_ip (b1, len1);
1405 udp_err1 = gtpu_check_ip_udp_len (b1);
1407 if (PREDICT_FALSE (gtpu_local_need_csum_check (b0)))
1408 csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1410 csum_err0 = !gtpu_local_csum_is_valid (b0);
1411 if (PREDICT_FALSE (gtpu_local_need_csum_check (b1)))
1412 csum_err1 = !gtpu_validate_udp_csum (vm, b1);
1414 csum_err1 = !gtpu_local_csum_is_valid (b1);
1416 if (ip_err0 || udp_err0 || csum_err0)
1418 next0 = GTPU_INPUT_NEXT_DROP;
1419 error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1423 /* speculatively load gtp header version field */
1424 ver0 = gtpu0->ver_flags;
1427 * Manipulate gtpu header
1428 * TBD: Manipulate Sequence Number and N-PDU Number
1429 * TBD: Manipulate Next Extension Header
1431 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1433 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1434 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1436 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1437 next0 = GTPU_INPUT_NEXT_DROP;
1441 /* Manipulate packet 0 */
1442 ASSERT (b0->flow_id != 0);
1443 tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1444 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1447 /* Pop gtpu header */
1448 vlib_buffer_advance (b0, gtpu_hdr_len0);
1450 /* assign the next node */
1451 if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1452 (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1454 error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1455 next0 = GTPU_INPUT_NEXT_DROP;
1458 next0 = t0->decap_next_index;
1460 sw_if_index0 = t0->sw_if_index;
1462 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1463 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1465 pkts_decapsulated ++;
1466 stats_n_packets += 1;
1467 stats_n_bytes += len0;
1469 /* Batch stats increment on the same gtpu tunnel so counter
1470 is not incremented per packet */
1471 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1473 stats_n_packets -= 1;
1474 stats_n_bytes -= len0;
1475 if (stats_n_packets)
1476 vlib_increment_combined_counter
1477 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1478 thread_index, stats_sw_if_index,
1479 stats_n_packets, stats_n_bytes);
1480 stats_n_packets = 1;
1481 stats_n_bytes = len0;
1482 stats_sw_if_index = sw_if_index0;
1486 b0->error = error0 ? node->errors[error0] : 0;
1488 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1491 = vlib_add_trace (vm, node, b0, sizeof (*tr));
1492 tr->next_index = next0;
1494 tr->tunnel_index = tunnel_index0;
1495 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1498 if (ip_err1 || udp_err1 || csum_err1)
1500 next1 = GTPU_INPUT_NEXT_DROP;
1501 error1 = gtpu_err_code (ip_err1, udp_err1, csum_err1);
1505 /* speculatively load gtp header version field */
1506 ver1 = gtpu1->ver_flags;
1509 * Manipulate gtpu header
1510 * TBD: Manipulate Sequence Number and N-PDU Number
1511 * TBD: Manipulate Next Extension Header
1513 gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
1514 has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
1515 if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
1517 error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1518 next1 = GTPU_INPUT_NEXT_DROP;
1522 /* Manipulate packet 1 */
1523 ASSERT (b1->flow_id != 0);
1524 tunnel_index1 = b1->flow_id - gtm->flow_id_start;
1525 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
1528 /* Pop gtpu header */
1529 vlib_buffer_advance (b1, gtpu_hdr_len1);
1531 /* assign the next node */
1532 if (PREDICT_FALSE (t1->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1533 (t1->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1535 error1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1536 next1 = GTPU_INPUT_NEXT_DROP;
1539 next1 = t1->decap_next_index;
1541 sw_if_index1 = t1->sw_if_index;
1543 /* Required to make the l2 tag push / pop code work on l2 subifs */
1544 /* This won't happen in current implementation as only
1545 ipv4/udp/gtpu/IPV4 type packets can be matched */
1546 if (PREDICT_FALSE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
1547 vnet_update_l2_len (b1);
1549 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1550 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
1552 pkts_decapsulated ++;
1553 stats_n_packets += 1;
1554 stats_n_bytes += len1;
1556 /* Batch stats increment on the same gtpu tunnel so counter
1557 is not incremented per packet */
1558 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
1560 stats_n_packets -= 1;
1561 stats_n_bytes -= len1;
1562 if (stats_n_packets)
1563 vlib_increment_combined_counter
1564 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1565 thread_index, stats_sw_if_index,
1566 stats_n_packets, stats_n_bytes);
1567 stats_n_packets = 1;
1568 stats_n_bytes = len1;
1569 stats_sw_if_index = sw_if_index1;
1573 b1->error = error1 ? node->errors[error1] : 0;
1575 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
1578 = vlib_add_trace (vm, node, b1, sizeof (*tr));
1579 tr->next_index = next1;
1581 tr->tunnel_index = tunnel_index1;
1582 tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
1585 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1586 to_next, n_left_to_next,
1587 bi0, bi1, next0, next1);
1590 while (n_left_from > 0 && n_left_to_next > 0)
1595 gtpu_header_t * gtpu0;
1600 u32 sw_if_index0, len0;
1609 n_left_to_next -= 1;
1611 b0 = vlib_get_buffer (vm, bi0);
1612 len0 = vlib_buffer_length_in_chain (vm, b0);
1617 ip_err0 = gtpu_check_ip (b0, len0);
1618 udp_err0 = gtpu_check_ip_udp_len (b0);
1619 if (PREDICT_FALSE (gtpu_local_need_csum_check (b0)))
1620 csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1622 csum_err0 = !gtpu_local_csum_is_valid (b0);
1624 if (ip_err0 || udp_err0 || csum_err0)
1626 next0 = GTPU_INPUT_NEXT_DROP;
1627 error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1631 /* udp leaves current_data pointing at the gtpu header */
1632 gtpu0 = vlib_buffer_get_current (b0);
1634 /* speculatively load gtp header version field */
1635 ver0 = gtpu0->ver_flags;
1638 * Manipulate gtpu header
1639 * TBD: Manipulate Sequence Number and N-PDU Number
1640 * TBD: Manipulate Next Extension Header
1642 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1644 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1645 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1647 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1648 next0 = GTPU_INPUT_NEXT_DROP;
1652 ASSERT (b0->flow_id != 0);
1653 tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1654 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1657 /* Pop gtpu header */
1658 vlib_buffer_advance (b0, gtpu_hdr_len0);
1660 /* assign the next node */
1661 if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1662 (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1664 error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1665 next0 = GTPU_INPUT_NEXT_DROP;
1668 next0 = t0->decap_next_index;
1670 sw_if_index0 = t0->sw_if_index;
1672 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1673 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1675 pkts_decapsulated ++;
1676 stats_n_packets += 1;
1677 stats_n_bytes += len0;
1679 /* Batch stats increment on the same gtpu tunnel so counter
1680 is not incremented per packet */
1681 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1683 stats_n_packets -= 1;
1684 stats_n_bytes -= len0;
1685 if (stats_n_packets)
1686 vlib_increment_combined_counter
1687 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1688 thread_index, stats_sw_if_index,
1689 stats_n_packets, stats_n_bytes);
1690 stats_n_packets = 1;
1691 stats_n_bytes = len0;
1692 stats_sw_if_index = sw_if_index0;
1695 b0->error = error0 ? node->errors[error0] : 0;
1697 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1700 = vlib_add_trace (vm, node, b0, sizeof (*tr));
1701 tr->next_index = next0;
1703 tr->tunnel_index = tunnel_index0;
1704 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1706 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1707 to_next, n_left_to_next,
1711 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1714 /* Do we still need this now that tunnel tx stats is kept? */
1715 vlib_node_increment_counter (vm, gtpu4_flow_input_node.index,
1716 GTPU_ERROR_DECAPSULATED,
1719 /* Increment any remaining batch stats */
1720 if (stats_n_packets)
1722 vlib_increment_combined_counter
1723 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1724 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1725 node->runtime_data[0] = stats_sw_if_index;
1728 return from_frame->n_vectors;
1731 VLIB_NODE_FN (gtpu4_flow_input_node) (vlib_main_t * vm,
1732 vlib_node_runtime_t * node,
1733 vlib_frame_t * from_frame)
1735 return gtpu_flow_input(vm, node, from_frame);
1740 #ifndef CLIB_MULTIARCH_VARIANT
1741 VLIB_REGISTER_NODE (gtpu4_flow_input_node) = {
1742 .name = "gtpu4-flow-input",
1743 .type = VLIB_NODE_TYPE_INTERNAL,
1744 .vector_size = sizeof (u32),
1746 .format_trace = format_gtpu_rx_trace,
1748 .n_errors = GTPU_FLOW_N_ERROR,
1749 .error_strings = gtpu_flow_error_strings,
1751 .n_next_nodes = GTPU_INPUT_N_NEXT,
1753 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
1754 foreach_gtpu_input_next
1762 #endif /* CLIB_MARCH_VARIANT */