2 * decap.c: gtpu tunnel decap packet processing
4 * Copyright (c) 2017 Intel and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <gtpu/gtpu.h>
21 extern vlib_node_registration_t gtpu4_input_node;
22 extern vlib_node_registration_t gtpu6_input_node;
31 static u8 * format_gtpu_rx_trace (u8 * s, va_list * args)
33 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
34 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
35 gtpu_rx_trace_t * t = va_arg (*args, gtpu_rx_trace_t *);
37 if (t->tunnel_index != ~0)
39 s = format (s, "GTPU decap from gtpu_tunnel%d teid %d next %d error %d",
40 t->tunnel_index, t->teid, t->next_index, t->error);
44 s = format (s, "GTPU decap error - tunnel for teid %d does not exist",
51 validate_gtpu_fib (vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4)
53 return t->encap_fib_index == vlib_buffer_get_ip_fib_index (b, is_ip4);
57 gtpu_input (vlib_main_t * vm,
58 vlib_node_runtime_t * node,
59 vlib_frame_t * from_frame,
62 u32 n_left_from, next_index, * from, * to_next;
63 gtpu_main_t * gtm = >pu_main;
64 vnet_main_t * vnm = gtm->vnet_main;
65 vnet_interface_main_t * im = &vnm->interface_main;
66 u32 last_tunnel_index = ~0;
67 gtpu4_tunnel_key_t last_key4;
68 gtpu6_tunnel_key_t last_key6;
69 u32 pkts_decapsulated = 0;
70 u32 thread_index = vlib_get_thread_index();
71 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
74 last_key4.as_u64 = ~0;
76 clib_memset (&last_key6, 0xff, sizeof (last_key6));
78 from = vlib_frame_vector_args (from_frame);
79 n_left_from = from_frame->n_vectors;
81 next_index = node->cached_next_index;
82 stats_sw_if_index = node->runtime_data[0];
83 stats_n_packets = stats_n_bytes = 0;
85 while (n_left_from > 0)
89 vlib_get_next_frame (vm, node, next_index,
90 to_next, n_left_to_next);
91 while (n_left_from >= 4 && n_left_to_next >= 2)
94 vlib_buffer_t * b0, * b1;
96 ip4_header_t * ip4_0, * ip4_1;
97 ip6_header_t * ip6_0, * ip6_1;
98 gtpu_header_t * gtpu0, * gtpu1;
99 u32 gtpu_hdr_len0, gtpu_hdr_len1;
101 u32 tunnel_index0, tunnel_index1;
102 gtpu_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
103 gtpu4_tunnel_key_t key4_0, key4_1;
104 gtpu6_tunnel_key_t key6_0, key6_1;
106 u32 sw_if_index0, sw_if_index1, len0, len1;
107 u8 has_space0, has_space1;
110 /* Prefetch next iteration. */
112 vlib_buffer_t * p2, * p3;
114 p2 = vlib_get_buffer (vm, from[2]);
115 p3 = vlib_get_buffer (vm, from[3]);
117 vlib_prefetch_buffer_header (p2, LOAD);
118 vlib_prefetch_buffer_header (p3, LOAD);
120 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
121 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
133 b0 = vlib_get_buffer (vm, bi0);
134 b1 = vlib_get_buffer (vm, bi1);
136 /* udp leaves current_data pointing at the gtpu header */
137 gtpu0 = vlib_buffer_get_current (b0);
138 gtpu1 = vlib_buffer_get_current (b1);
141 ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
142 ip4_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip4_header_t));
146 ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
147 ip6_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip6_header_t));
156 /* speculatively load gtp header version field */
157 ver0 = gtpu0->ver_flags;
158 ver1 = gtpu1->ver_flags;
161 * Manipulate gtpu header
162 * TBD: Manipulate Sequence Number and N-PDU Number
163 * TBD: Manipulate Next Extension Header
165 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
166 gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
168 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
169 has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
171 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
173 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
174 next0 = GTPU_INPUT_NEXT_DROP;
178 /* Manipulate packet 0 */
180 key4_0.src = ip4_0->src_address.as_u32;
181 key4_0.teid = gtpu0->teid;
183 /* Make sure GTPU tunnel exist according to packet SIP and teid
184 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
185 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
187 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
188 if (PREDICT_FALSE (p0 == NULL))
190 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
191 next0 = GTPU_INPUT_NEXT_DROP;
194 last_key4.as_u64 = key4_0.as_u64;
195 tunnel_index0 = last_tunnel_index = p0[0];
198 tunnel_index0 = last_tunnel_index;
199 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
201 /* Validate GTPU tunnel encap-fib index against packet */
202 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
204 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
205 next0 = GTPU_INPUT_NEXT_DROP;
209 /* Validate GTPU tunnel SIP against packet DIP */
210 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
211 goto next0; /* valid packet */
212 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
214 key4_0.src = ip4_0->dst_address.as_u32;
215 key4_0.teid = gtpu0->teid;
216 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
217 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
218 if (PREDICT_TRUE (p0 != NULL))
220 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
221 goto next0; /* valid packet */
224 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
225 next0 = GTPU_INPUT_NEXT_DROP;
228 } else /* !is_ip4 */ {
229 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
230 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
231 key6_0.teid = gtpu0->teid;
233 /* Make sure GTPU tunnel exist according to packet SIP and teid
234 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
235 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
237 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
238 if (PREDICT_FALSE (p0 == NULL))
240 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
241 next0 = GTPU_INPUT_NEXT_DROP;
244 clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
245 tunnel_index0 = last_tunnel_index = p0[0];
248 tunnel_index0 = last_tunnel_index;
249 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
251 /* Validate GTPU tunnel encap-fib index against packet */
252 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
254 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
255 next0 = GTPU_INPUT_NEXT_DROP;
259 /* Validate GTPU tunnel SIP against packet DIP */
260 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
262 goto next0; /* valid packet */
263 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
265 key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
266 key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
267 key6_0.teid = gtpu0->teid;
268 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
269 if (PREDICT_TRUE (p0 != NULL))
271 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
272 goto next0; /* valid packet */
275 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
276 next0 = GTPU_INPUT_NEXT_DROP;
281 /* Pop gtpu header */
282 vlib_buffer_advance (b0, gtpu_hdr_len0);
284 next0 = t0->decap_next_index;
285 sw_if_index0 = t0->sw_if_index;
286 len0 = vlib_buffer_length_in_chain (vm, b0);
288 /* Required to make the l2 tag push / pop code work on l2 subifs */
289 if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
290 vnet_update_l2_len (b0);
292 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
293 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
294 sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
296 pkts_decapsulated ++;
297 stats_n_packets += 1;
298 stats_n_bytes += len0;
300 /* Batch stats increment on the same gtpu tunnel so counter
301 is not incremented per packet */
302 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
304 stats_n_packets -= 1;
305 stats_n_bytes -= len0;
307 vlib_increment_combined_counter
308 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
309 thread_index, stats_sw_if_index,
310 stats_n_packets, stats_n_bytes);
312 stats_n_bytes = len0;
313 stats_sw_if_index = sw_if_index0;
317 b0->error = error0 ? node->errors[error0] : 0;
319 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
322 = vlib_add_trace (vm, node, b0, sizeof (*tr));
323 tr->next_index = next0;
325 tr->tunnel_index = tunnel_index0;
326 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
329 if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
331 error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
332 next1 = GTPU_INPUT_NEXT_DROP;
336 /* Manipulate packet 1 */
338 key4_1.src = ip4_1->src_address.as_u32;
339 key4_1.teid = gtpu1->teid;
341 /* Make sure GTPU tunnel exist according to packet SIP and teid
342 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
343 if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
345 p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
346 if (PREDICT_FALSE (p1 == NULL))
348 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
349 next1 = GTPU_INPUT_NEXT_DROP;
352 last_key4.as_u64 = key4_1.as_u64;
353 tunnel_index1 = last_tunnel_index = p1[0];
356 tunnel_index1 = last_tunnel_index;
357 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
359 /* Validate GTPU tunnel encap-fib index against packet */
360 if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
362 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
363 next1 = GTPU_INPUT_NEXT_DROP;
367 /* Validate GTPU tunnel SIP against packet DIP */
368 if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
369 goto next1; /* valid packet */
370 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_1->dst_address)))
372 key4_1.src = ip4_1->dst_address.as_u32;
373 key4_1.teid = gtpu1->teid;
374 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
375 p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
376 if (PREDICT_TRUE (p1 != NULL))
378 mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
379 goto next1; /* valid packet */
382 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
383 next1 = GTPU_INPUT_NEXT_DROP;
386 } else /* !is_ip4 */ {
387 key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
388 key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
389 key6_1.teid = gtpu1->teid;
391 /* Make sure GTPU tunnel exist according to packet SIP and teid
392 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
393 if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
395 p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
397 if (PREDICT_FALSE (p1 == NULL))
399 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
400 next1 = GTPU_INPUT_NEXT_DROP;
404 clib_memcpy_fast (&last_key6, &key6_1, sizeof(key6_1));
405 tunnel_index1 = last_tunnel_index = p1[0];
408 tunnel_index1 = last_tunnel_index;
409 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
411 /* Validate GTPU tunnel encap-fib index against packet */
412 if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
414 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
415 next1 = GTPU_INPUT_NEXT_DROP;
419 /* Validate GTPU tunnel SIP against packet DIP */
420 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_1->dst_address,
422 goto next1; /* valid packet */
423 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_1->dst_address)))
425 key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
426 key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
427 key6_1.teid = gtpu1->teid;
428 p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
429 if (PREDICT_TRUE (p1 != NULL))
431 mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
432 goto next1; /* valid packet */
435 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
436 next1 = GTPU_INPUT_NEXT_DROP;
441 /* Pop gtpu header */
442 vlib_buffer_advance (b1, gtpu_hdr_len1);
444 next1 = t1->decap_next_index;
445 sw_if_index1 = t1->sw_if_index;
446 len1 = vlib_buffer_length_in_chain (vm, b1);
448 /* Required to make the l2 tag push / pop code work on l2 subifs */
449 if (PREDICT_TRUE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
450 vnet_update_l2_len (b1);
452 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
453 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
454 sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
456 pkts_decapsulated ++;
457 stats_n_packets += 1;
458 stats_n_bytes += len1;
460 /* Batch stats increment on the same gtpu tunnel so counter
461 is not incremented per packet */
462 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
464 stats_n_packets -= 1;
465 stats_n_bytes -= len1;
467 vlib_increment_combined_counter
468 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
469 thread_index, stats_sw_if_index,
470 stats_n_packets, stats_n_bytes);
472 stats_n_bytes = len1;
473 stats_sw_if_index = sw_if_index1;
477 b1->error = error1 ? node->errors[error1] : 0;
479 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
482 = vlib_add_trace (vm, node, b1, sizeof (*tr));
483 tr->next_index = next1;
485 tr->tunnel_index = tunnel_index1;
486 tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
489 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
490 to_next, n_left_to_next,
491 bi0, bi1, next0, next1);
494 while (n_left_from > 0 && n_left_to_next > 0)
499 ip4_header_t * ip4_0;
500 ip6_header_t * ip6_0;
501 gtpu_header_t * gtpu0;
505 gtpu_tunnel_t * t0, * mt0 = NULL;
506 gtpu4_tunnel_key_t key4_0;
507 gtpu6_tunnel_key_t key6_0;
509 u32 sw_if_index0, len0;
520 b0 = vlib_get_buffer (vm, bi0);
522 /* udp leaves current_data pointing at the gtpu header */
523 gtpu0 = vlib_buffer_get_current (b0);
525 ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
527 ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
533 /* speculatively load gtp header version field */
534 ver0 = gtpu0->ver_flags;
537 * Manipulate gtpu header
538 * TBD: Manipulate Sequence Number and N-PDU Number
539 * TBD: Manipulate Next Extension Header
541 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
543 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
545 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
547 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
548 next0 = GTPU_INPUT_NEXT_DROP;
553 key4_0.src = ip4_0->src_address.as_u32;
554 key4_0.teid = gtpu0->teid;
556 /* Make sure GTPU tunnel exist according to packet SIP and teid
557 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
558 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
560 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
561 if (PREDICT_FALSE (p0 == NULL))
563 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
564 next0 = GTPU_INPUT_NEXT_DROP;
567 last_key4.as_u64 = key4_0.as_u64;
568 tunnel_index0 = last_tunnel_index = p0[0];
571 tunnel_index0 = last_tunnel_index;
572 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
574 /* Validate GTPU tunnel encap-fib index against packet */
575 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
577 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
578 next0 = GTPU_INPUT_NEXT_DROP;
582 /* Validate GTPU tunnel SIP against packet DIP */
583 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
584 goto next00; /* valid packet */
585 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
587 key4_0.src = ip4_0->dst_address.as_u32;
588 key4_0.teid = gtpu0->teid;
589 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
590 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
591 if (PREDICT_TRUE (p0 != NULL))
593 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
594 goto next00; /* valid packet */
597 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
598 next0 = GTPU_INPUT_NEXT_DROP;
601 } else /* !is_ip4 */ {
602 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
603 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
604 key6_0.teid = gtpu0->teid;
606 /* Make sure GTPU tunnel exist according to packet SIP and teid
607 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
608 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
610 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
611 if (PREDICT_FALSE (p0 == NULL))
613 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
614 next0 = GTPU_INPUT_NEXT_DROP;
617 clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
618 tunnel_index0 = last_tunnel_index = p0[0];
621 tunnel_index0 = last_tunnel_index;
622 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
624 /* Validate GTPU tunnel encap-fib index against packet */
625 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
627 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
628 next0 = GTPU_INPUT_NEXT_DROP;
632 /* Validate GTPU tunnel SIP against packet DIP */
633 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
635 goto next00; /* valid packet */
636 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
638 key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
639 key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
640 key6_0.teid = gtpu0->teid;
641 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
642 if (PREDICT_TRUE (p0 != NULL))
644 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
645 goto next00; /* valid packet */
648 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
649 next0 = GTPU_INPUT_NEXT_DROP;
654 /* Pop gtpu header */
655 vlib_buffer_advance (b0, gtpu_hdr_len0);
657 next0 = t0->decap_next_index;
658 sw_if_index0 = t0->sw_if_index;
659 len0 = vlib_buffer_length_in_chain (vm, b0);
661 /* Required to make the l2 tag push / pop code work on l2 subifs */
662 if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
663 vnet_update_l2_len (b0);
665 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
666 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
667 sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
669 pkts_decapsulated ++;
670 stats_n_packets += 1;
671 stats_n_bytes += len0;
673 /* Batch stats increment on the same gtpu tunnel so counter
674 is not incremented per packet */
675 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
677 stats_n_packets -= 1;
678 stats_n_bytes -= len0;
680 vlib_increment_combined_counter
681 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
682 thread_index, stats_sw_if_index,
683 stats_n_packets, stats_n_bytes);
685 stats_n_bytes = len0;
686 stats_sw_if_index = sw_if_index0;
690 b0->error = error0 ? node->errors[error0] : 0;
692 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
695 = vlib_add_trace (vm, node, b0, sizeof (*tr));
696 tr->next_index = next0;
698 tr->tunnel_index = tunnel_index0;
699 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
701 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
702 to_next, n_left_to_next,
706 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
708 /* Do we still need this now that tunnel tx stats is kept? */
709 vlib_node_increment_counter (vm, is_ip4?
710 gtpu4_input_node.index:gtpu6_input_node.index,
711 GTPU_ERROR_DECAPSULATED,
714 /* Increment any remaining batch stats */
717 vlib_increment_combined_counter
718 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
719 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
720 node->runtime_data[0] = stats_sw_if_index;
723 return from_frame->n_vectors;
726 VLIB_NODE_FN (gtpu4_input_node) (vlib_main_t * vm,
727 vlib_node_runtime_t * node,
728 vlib_frame_t * from_frame)
730 return gtpu_input(vm, node, from_frame, /* is_ip4 */ 1);
733 VLIB_NODE_FN (gtpu6_input_node) (vlib_main_t * vm,
734 vlib_node_runtime_t * node,
735 vlib_frame_t * from_frame)
737 return gtpu_input(vm, node, from_frame, /* is_ip4 */ 0);
740 static char * gtpu_error_strings[] = {
741 #define gtpu_error(n,s) s,
742 #include <gtpu/gtpu_error.def>
747 VLIB_REGISTER_NODE (gtpu4_input_node) = {
748 .name = "gtpu4-input",
749 /* Takes a vector of packets. */
750 .vector_size = sizeof (u32),
752 .n_errors = GTPU_N_ERROR,
753 .error_strings = gtpu_error_strings,
755 .n_next_nodes = GTPU_INPUT_N_NEXT,
757 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
758 foreach_gtpu_input_next
762 //temp .format_buffer = format_gtpu_header,
763 .format_trace = format_gtpu_rx_trace,
764 // $$$$ .unformat_buffer = unformat_gtpu_header,
767 VLIB_REGISTER_NODE (gtpu6_input_node) = {
768 .name = "gtpu6-input",
769 /* Takes a vector of packets. */
770 .vector_size = sizeof (u32),
772 .n_errors = GTPU_N_ERROR,
773 .error_strings = gtpu_error_strings,
775 .n_next_nodes = GTPU_INPUT_N_NEXT,
777 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
778 foreach_gtpu_input_next
782 //temp .format_buffer = format_gtpu_header,
783 .format_trace = format_gtpu_rx_trace,
784 // $$$$ .unformat_buffer = unformat_gtpu_header,
788 IP_GTPU_BYPASS_NEXT_DROP,
789 IP_GTPU_BYPASS_NEXT_GTPU,
790 IP_GTPU_BYPASS_N_NEXT,
791 } ip_vxan_bypass_next_t;
794 ip_gtpu_bypass_inline (vlib_main_t * vm,
795 vlib_node_runtime_t * node,
796 vlib_frame_t * frame,
799 gtpu_main_t * gtm = >pu_main;
800 u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
801 vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
802 vtep4_key_t last_vtep4; /* last IPv4 address / fib index
803 matching a local VTEP address */
804 vtep6_key_t last_vtep6; /* last IPv6 address / fib index
805 matching a local VTEP address */
806 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
808 from = vlib_frame_vector_args (frame);
809 n_left_from = frame->n_vectors;
810 next_index = node->cached_next_index;
811 vlib_get_buffers (vm, from, bufs, n_left_from);
813 if (node->flags & VLIB_NODE_FLAG_TRACE)
814 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
817 vtep4_key_init (&last_vtep4);
819 vtep6_key_init (&last_vtep6);
821 while (n_left_from > 0)
823 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
825 while (n_left_from >= 4 && n_left_to_next >= 2)
827 vlib_buffer_t * b0, * b1;
828 ip4_header_t * ip40, * ip41;
829 ip6_header_t * ip60, * ip61;
830 udp_header_t * udp0, * udp1;
831 u32 bi0, ip_len0, udp_len0, flags0, next0;
832 u32 bi1, ip_len1, udp_len1, flags1, next1;
833 i32 len_diff0, len_diff1;
834 u8 error0, good_udp0, proto0;
835 u8 error1, good_udp1, proto1;
837 /* Prefetch next iteration. */
839 vlib_prefetch_buffer_header (b[2], LOAD);
840 vlib_prefetch_buffer_header (b[3], LOAD);
842 CLIB_PREFETCH (b[2]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
843 CLIB_PREFETCH (b[3]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
846 bi0 = to_next[0] = from[0];
847 bi1 = to_next[1] = from[1];
858 ip40 = vlib_buffer_get_current (b0);
859 ip41 = vlib_buffer_get_current (b1);
863 ip60 = vlib_buffer_get_current (b0);
864 ip61 = vlib_buffer_get_current (b1);
867 /* Setup packet for next IP feature */
868 vnet_feature_next(&next0, b0);
869 vnet_feature_next(&next1, b1);
873 /* Treat IP frag packets as "experimental" protocol for now
874 until support of IP frag reassembly is implemented */
875 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
876 proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
880 proto0 = ip60->protocol;
881 proto1 = ip61->protocol;
884 /* Process packet 0 */
885 if (proto0 != IP_PROTOCOL_UDP)
886 goto exit0; /* not UDP packet */
889 udp0 = ip4_next_header (ip40);
891 udp0 = ip6_next_header (ip60);
893 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
894 goto exit0; /* not GTPU packet */
896 /* Validate DIP against VTEPs*/
899 #ifdef CLIB_HAVE_VEC512
900 if (!vtep4_check_vector (>m->vtep_table, b0, ip40, &last_vtep4,
903 if (!vtep4_check (>m->vtep_table, b0, ip40, &last_vtep4))
905 goto exit0; /* no local VTEP for GTPU packet */
909 if (!vtep6_check (>m->vtep_table, b0, ip60, &last_vtep6))
910 goto exit0; /* no local VTEP for GTPU packet */
914 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
916 /* Don't verify UDP checksum for packets with explicit zero checksum. */
917 good_udp0 |= udp0->checksum == 0;
919 /* Verify UDP length */
921 ip_len0 = clib_net_to_host_u16 (ip40->length);
923 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
924 udp_len0 = clib_net_to_host_u16 (udp0->length);
925 len_diff0 = ip_len0 - udp_len0;
927 /* Verify UDP checksum */
928 if (PREDICT_FALSE (!good_udp0))
930 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
933 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
935 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
937 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
943 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
944 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
948 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
949 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
953 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
954 b0->error = error0 ? error_node->errors[error0] : 0;
956 /* gtpu-input node expect current at GTPU header */
958 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
960 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
963 /* Process packet 1 */
964 if (proto1 != IP_PROTOCOL_UDP)
965 goto exit1; /* not UDP packet */
968 udp1 = ip4_next_header (ip41);
970 udp1 = ip6_next_header (ip61);
972 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
973 goto exit1; /* not GTPU packet */
975 /* Validate DIP against VTEPs*/
978 #ifdef CLIB_HAVE_VEC512
979 if (!vtep4_check_vector (>m->vtep_table, b1, ip41, &last_vtep4,
982 if (!vtep4_check (>m->vtep_table, b1, ip41, &last_vtep4))
984 goto exit1; /* no local VTEP for GTPU packet */
988 if (!vtep6_check (>m->vtep_table, b1, ip61, &last_vtep6))
989 goto exit1; /* no local VTEP for GTPU packet */
993 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
995 /* Don't verify UDP checksum for packets with explicit zero checksum. */
996 good_udp1 |= udp1->checksum == 0;
998 /* Verify UDP length */
1000 ip_len1 = clib_net_to_host_u16 (ip41->length);
1002 ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1003 udp_len1 = clib_net_to_host_u16 (udp1->length);
1004 len_diff1 = ip_len1 - udp_len1;
1006 /* Verify UDP checksum */
1007 if (PREDICT_FALSE (!good_udp1))
1009 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1012 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1014 flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1016 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1022 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1023 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1027 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1028 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1032 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
1033 b1->error = error1 ? error_node->errors[error1] : 0;
1035 /* gtpu-input node expect current at GTPU header */
1037 vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1039 vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1042 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1043 to_next, n_left_to_next,
1044 bi0, bi1, next0, next1);
1047 while (n_left_from > 0 && n_left_to_next > 0)
1050 ip4_header_t * ip40;
1051 ip6_header_t * ip60;
1052 udp_header_t * udp0;
1053 u32 bi0, ip_len0, udp_len0, flags0, next0;
1055 u8 error0, good_udp0, proto0;
1057 bi0 = to_next[0] = from[0];
1061 n_left_to_next -= 1;
1066 ip40 = vlib_buffer_get_current (b0);
1068 ip60 = vlib_buffer_get_current (b0);
1070 /* Setup packet for next IP feature */
1071 vnet_feature_next(&next0, b0);
1074 /* Treat IP4 frag packets as "experimental" protocol for now
1075 until support of IP frag reassembly is implemented */
1076 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1078 proto0 = ip60->protocol;
1080 if (proto0 != IP_PROTOCOL_UDP)
1081 goto exit; /* not UDP packet */
1084 udp0 = ip4_next_header (ip40);
1086 udp0 = ip6_next_header (ip60);
1088 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1089 goto exit; /* not GTPU packet */
1091 /* Validate DIP against VTEPs*/
1094 #ifdef CLIB_HAVE_VEC512
1095 if (!vtep4_check_vector (>m->vtep_table, b0, ip40, &last_vtep4,
1098 if (!vtep4_check (>m->vtep_table, b0, ip40, &last_vtep4))
1100 goto exit; /* no local VTEP for GTPU packet */
1104 if (!vtep6_check (>m->vtep_table, b0, ip60, &last_vtep6))
1105 goto exit; /* no local VTEP for GTPU packet */
1109 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1111 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1112 good_udp0 |= udp0->checksum == 0;
1114 /* Verify UDP length */
1116 ip_len0 = clib_net_to_host_u16 (ip40->length);
1118 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1119 udp_len0 = clib_net_to_host_u16 (udp0->length);
1120 len_diff0 = ip_len0 - udp_len0;
1122 /* Verify UDP checksum */
1123 if (PREDICT_FALSE (!good_udp0))
1125 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1128 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1130 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1132 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1138 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1139 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1143 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1144 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1148 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
1149 b0->error = error0 ? error_node->errors[error0] : 0;
1151 /* gtpu-input node expect current at GTPU header */
1153 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1155 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1158 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1159 to_next, n_left_to_next,
1163 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1166 return frame->n_vectors;
1169 VLIB_NODE_FN (ip4_gtpu_bypass_node) (vlib_main_t * vm,
1170 vlib_node_runtime_t * node,
1171 vlib_frame_t * frame)
1173 return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1176 VLIB_REGISTER_NODE (ip4_gtpu_bypass_node) = {
1177 .name = "ip4-gtpu-bypass",
1178 .vector_size = sizeof (u32),
1180 .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1182 [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1183 [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu4-input",
1186 .format_buffer = format_ip4_header,
1187 .format_trace = format_ip4_forward_next_trace,
1190 #ifndef CLIB_MARCH_VARIANT
1191 /* Dummy init function to get us linked in. */
1192 clib_error_t * ip4_gtpu_bypass_init (vlib_main_t * vm)
1195 VLIB_INIT_FUNCTION (ip4_gtpu_bypass_init);
1196 #endif /* CLIB_MARCH_VARIANT */
1198 VLIB_NODE_FN (ip6_gtpu_bypass_node) (vlib_main_t * vm,
1199 vlib_node_runtime_t * node,
1200 vlib_frame_t * frame)
1202 return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1205 VLIB_REGISTER_NODE (ip6_gtpu_bypass_node) = {
1206 .name = "ip6-gtpu-bypass",
1207 .vector_size = sizeof (u32),
1209 .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1211 [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1212 [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu6-input",
1215 .format_buffer = format_ip6_header,
1216 .format_trace = format_ip6_forward_next_trace,
1219 #ifndef CLIB_MARCH_VARIANT
1220 /* Dummy init function to get us linked in. */
1221 clib_error_t * ip6_gtpu_bypass_init (vlib_main_t * vm)
1224 VLIB_INIT_FUNCTION (ip6_gtpu_bypass_init);
1226 #define foreach_gtpu_flow_error \
1227 _(NONE, "no error") \
1228 _(PAYLOAD_ERROR, "Payload type errors") \
1229 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
1230 _(IP_HEADER_ERROR, "Rx ip header errors") \
1231 _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
1232 _(UDP_LENGTH_ERROR, "Rx udp length errors")
1236 #define _(f,s) GTPU_FLOW_ERROR_##f,
1237 foreach_gtpu_flow_error
1239 #define gtpu_error(n,s) GTPU_FLOW_ERROR_##n,
1240 #include <gtpu/gtpu_error.def>
1243 } gtpu_flow_error_t;
1245 static char *gtpu_flow_error_strings[] = {
1247 foreach_gtpu_flow_error
1249 #define gtpu_error(n,s) s,
1250 #include <gtpu/gtpu_error.def>
1256 #define gtpu_local_need_csum_check(_b) \
1257 (!(_b->flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED || \
1258 (_b->flags & VNET_BUFFER_F_OFFLOAD && \
1259 vnet_buffer (_b)->oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)))
1261 #define gtpu_local_csum_is_valid(_b) \
1262 ((_b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT || \
1263 (_b->flags & VNET_BUFFER_F_OFFLOAD && \
1264 vnet_buffer (_b)->oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)) != 0)
1266 static_always_inline u8
1267 gtpu_validate_udp_csum (vlib_main_t * vm, vlib_buffer_t *b)
1269 u32 flags = b->flags;
1270 enum { offset = sizeof(ip4_header_t) + sizeof(udp_header_t)};
1272 /* Verify UDP checksum */
1273 if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1275 vlib_buffer_advance (b, -offset);
1276 flags = ip4_tcp_udp_validate_checksum (vm, b);
1277 vlib_buffer_advance (b, offset);
1280 return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1283 static_always_inline u8
1284 gtpu_check_ip (vlib_buffer_t *b, u16 payload_len)
1286 ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1287 sizeof(ip4_header_t) - sizeof(udp_header_t);
1288 u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1289 u16 expected = payload_len + sizeof(ip4_header_t) + sizeof(udp_header_t);
1290 return ip_len > expected || ip4_hdr->ttl == 0 || ip4_hdr->ip_version_and_header_length != 0x45;
1293 static_always_inline u8
1294 gtpu_check_ip_udp_len (vlib_buffer_t *b)
1296 ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1297 sizeof(ip4_header_t) - sizeof(udp_header_t);
1298 udp_header_t * udp_hdr = vlib_buffer_get_current(b) - sizeof(udp_header_t);
1299 u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1300 u16 udp_len = clib_net_to_host_u16 (udp_hdr->length);
1301 return udp_len > ip_len;
1304 static_always_inline u8
1305 gtpu_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
1307 u8 error0 = GTPU_FLOW_ERROR_NONE;
1309 error0 = GTPU_FLOW_ERROR_IP_HEADER_ERROR;
1311 error0 = GTPU_FLOW_ERROR_UDP_LENGTH_ERROR;
1313 error0 = GTPU_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1319 gtpu_flow_input (vlib_main_t * vm,
1320 vlib_node_runtime_t * node,
1321 vlib_frame_t * from_frame)
1323 u32 n_left_from, next_index, * from, * to_next;
1324 gtpu_main_t * gtm = >pu_main;
1325 vnet_main_t * vnm = gtm->vnet_main;
1326 vnet_interface_main_t * im = &vnm->interface_main;
1327 u32 pkts_decapsulated = 0;
1328 u32 thread_index = vlib_get_thread_index();
1329 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
1330 u8 ip_err0, ip_err1, udp_err0, udp_err1, csum_err0, csum_err1;
1332 from = vlib_frame_vector_args (from_frame);
1333 n_left_from = from_frame->n_vectors;
1335 next_index = node->cached_next_index;
1336 stats_sw_if_index = node->runtime_data[0];
1337 stats_n_packets = stats_n_bytes = 0;
1339 while (n_left_from > 0)
1343 vlib_get_next_frame (vm, node, next_index,
1344 to_next, n_left_to_next);
1346 while (n_left_from >= 4 && n_left_to_next >= 2)
1349 vlib_buffer_t * b0, * b1;
1351 gtpu_header_t * gtpu0, * gtpu1;
1352 u32 gtpu_hdr_len0, gtpu_hdr_len1;
1353 u32 tunnel_index0, tunnel_index1;
1354 gtpu_tunnel_t * t0, * t1;
1356 u32 sw_if_index0, sw_if_index1, len0, len1;
1357 u8 has_space0 = 0, has_space1 = 0;
1360 /* Prefetch next iteration. */
1362 vlib_buffer_t * p2, * p3;
1364 p2 = vlib_get_buffer (vm, from[2]);
1365 p3 = vlib_get_buffer (vm, from[3]);
1367 vlib_prefetch_buffer_header (p2, LOAD);
1368 vlib_prefetch_buffer_header (p3, LOAD);
1370 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1371 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1380 n_left_to_next -= 2;
1383 b0 = vlib_get_buffer (vm, bi0);
1384 b1 = vlib_get_buffer (vm, bi1);
1386 /* udp leaves current_data pointing at the gtpu header */
1387 gtpu0 = vlib_buffer_get_current (b0);
1388 gtpu1 = vlib_buffer_get_current (b1);
1390 len0 = vlib_buffer_length_in_chain (vm, b0);
1391 len1 = vlib_buffer_length_in_chain (vm, b1);
1399 ip_err0 = gtpu_check_ip (b0, len0);
1400 udp_err0 = gtpu_check_ip_udp_len (b0);
1401 ip_err1 = gtpu_check_ip (b1, len1);
1402 udp_err1 = gtpu_check_ip_udp_len (b1);
1404 if (PREDICT_FALSE (gtpu_local_need_csum_check (b0)))
1405 csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1407 csum_err0 = !gtpu_local_csum_is_valid (b0);
1408 if (PREDICT_FALSE (gtpu_local_need_csum_check (b1)))
1409 csum_err1 = !gtpu_validate_udp_csum (vm, b1);
1411 csum_err1 = !gtpu_local_csum_is_valid (b1);
1413 if (ip_err0 || udp_err0 || csum_err0)
1415 next0 = GTPU_INPUT_NEXT_DROP;
1416 error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1420 /* speculatively load gtp header version field */
1421 ver0 = gtpu0->ver_flags;
1424 * Manipulate gtpu header
1425 * TBD: Manipulate Sequence Number and N-PDU Number
1426 * TBD: Manipulate Next Extension Header
1428 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1430 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1431 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1433 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1434 next0 = GTPU_INPUT_NEXT_DROP;
1438 /* Manipulate packet 0 */
1439 ASSERT (b0->flow_id != 0);
1440 tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1441 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1444 /* Pop gtpu header */
1445 vlib_buffer_advance (b0, gtpu_hdr_len0);
1447 /* assign the next node */
1448 if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1449 (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1451 error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1452 next0 = GTPU_INPUT_NEXT_DROP;
1455 next0 = t0->decap_next_index;
1457 sw_if_index0 = t0->sw_if_index;
1459 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1460 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1462 pkts_decapsulated ++;
1463 stats_n_packets += 1;
1464 stats_n_bytes += len0;
1466 /* Batch stats increment on the same gtpu tunnel so counter
1467 is not incremented per packet */
1468 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1470 stats_n_packets -= 1;
1471 stats_n_bytes -= len0;
1472 if (stats_n_packets)
1473 vlib_increment_combined_counter
1474 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1475 thread_index, stats_sw_if_index,
1476 stats_n_packets, stats_n_bytes);
1477 stats_n_packets = 1;
1478 stats_n_bytes = len0;
1479 stats_sw_if_index = sw_if_index0;
1483 b0->error = error0 ? node->errors[error0] : 0;
1485 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1488 = vlib_add_trace (vm, node, b0, sizeof (*tr));
1489 tr->next_index = next0;
1491 tr->tunnel_index = tunnel_index0;
1492 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1495 if (ip_err1 || udp_err1 || csum_err1)
1497 next1 = GTPU_INPUT_NEXT_DROP;
1498 error1 = gtpu_err_code (ip_err1, udp_err1, csum_err1);
1502 /* speculatively load gtp header version field */
1503 ver1 = gtpu1->ver_flags;
1506 * Manipulate gtpu header
1507 * TBD: Manipulate Sequence Number and N-PDU Number
1508 * TBD: Manipulate Next Extension Header
1510 gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
1511 has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
1512 if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
1514 error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1515 next1 = GTPU_INPUT_NEXT_DROP;
1519 /* Manipulate packet 1 */
1520 ASSERT (b1->flow_id != 0);
1521 tunnel_index1 = b1->flow_id - gtm->flow_id_start;
1522 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
1525 /* Pop gtpu header */
1526 vlib_buffer_advance (b1, gtpu_hdr_len1);
1528 /* assign the next node */
1529 if (PREDICT_FALSE (t1->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1530 (t1->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1532 error1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1533 next1 = GTPU_INPUT_NEXT_DROP;
1536 next1 = t1->decap_next_index;
1538 sw_if_index1 = t1->sw_if_index;
1540 /* Required to make the l2 tag push / pop code work on l2 subifs */
1541 /* This won't happen in current implementation as only
1542 ipv4/udp/gtpu/IPV4 type packets can be matched */
1543 if (PREDICT_FALSE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
1544 vnet_update_l2_len (b1);
1546 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1547 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
1549 pkts_decapsulated ++;
1550 stats_n_packets += 1;
1551 stats_n_bytes += len1;
1553 /* Batch stats increment on the same gtpu tunnel so counter
1554 is not incremented per packet */
1555 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
1557 stats_n_packets -= 1;
1558 stats_n_bytes -= len1;
1559 if (stats_n_packets)
1560 vlib_increment_combined_counter
1561 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1562 thread_index, stats_sw_if_index,
1563 stats_n_packets, stats_n_bytes);
1564 stats_n_packets = 1;
1565 stats_n_bytes = len1;
1566 stats_sw_if_index = sw_if_index1;
1570 b1->error = error1 ? node->errors[error1] : 0;
1572 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
1575 = vlib_add_trace (vm, node, b1, sizeof (*tr));
1576 tr->next_index = next1;
1578 tr->tunnel_index = tunnel_index1;
1579 tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
1582 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1583 to_next, n_left_to_next,
1584 bi0, bi1, next0, next1);
1587 while (n_left_from > 0 && n_left_to_next > 0)
1592 gtpu_header_t * gtpu0;
1597 u32 sw_if_index0, len0;
1606 n_left_to_next -= 1;
1608 b0 = vlib_get_buffer (vm, bi0);
1609 len0 = vlib_buffer_length_in_chain (vm, b0);
1614 ip_err0 = gtpu_check_ip (b0, len0);
1615 udp_err0 = gtpu_check_ip_udp_len (b0);
1616 if (PREDICT_FALSE (gtpu_local_need_csum_check (b0)))
1617 csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1619 csum_err0 = !gtpu_local_csum_is_valid (b0);
1621 if (ip_err0 || udp_err0 || csum_err0)
1623 next0 = GTPU_INPUT_NEXT_DROP;
1624 error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1628 /* udp leaves current_data pointing at the gtpu header */
1629 gtpu0 = vlib_buffer_get_current (b0);
1631 /* speculatively load gtp header version field */
1632 ver0 = gtpu0->ver_flags;
1635 * Manipulate gtpu header
1636 * TBD: Manipulate Sequence Number and N-PDU Number
1637 * TBD: Manipulate Next Extension Header
1639 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1641 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1642 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1644 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1645 next0 = GTPU_INPUT_NEXT_DROP;
1649 ASSERT (b0->flow_id != 0);
1650 tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1651 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1654 /* Pop gtpu header */
1655 vlib_buffer_advance (b0, gtpu_hdr_len0);
1657 /* assign the next node */
1658 if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1659 (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1661 error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1662 next0 = GTPU_INPUT_NEXT_DROP;
1665 next0 = t0->decap_next_index;
1667 sw_if_index0 = t0->sw_if_index;
1669 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1670 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1672 pkts_decapsulated ++;
1673 stats_n_packets += 1;
1674 stats_n_bytes += len0;
1676 /* Batch stats increment on the same gtpu tunnel so counter
1677 is not incremented per packet */
1678 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1680 stats_n_packets -= 1;
1681 stats_n_bytes -= len0;
1682 if (stats_n_packets)
1683 vlib_increment_combined_counter
1684 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1685 thread_index, stats_sw_if_index,
1686 stats_n_packets, stats_n_bytes);
1687 stats_n_packets = 1;
1688 stats_n_bytes = len0;
1689 stats_sw_if_index = sw_if_index0;
1692 b0->error = error0 ? node->errors[error0] : 0;
1694 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1697 = vlib_add_trace (vm, node, b0, sizeof (*tr));
1698 tr->next_index = next0;
1700 tr->tunnel_index = tunnel_index0;
1701 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1703 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1704 to_next, n_left_to_next,
1708 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1711 /* Do we still need this now that tunnel tx stats is kept? */
1712 vlib_node_increment_counter (vm, gtpu4_flow_input_node.index,
1713 GTPU_ERROR_DECAPSULATED,
1716 /* Increment any remaining batch stats */
1717 if (stats_n_packets)
1719 vlib_increment_combined_counter
1720 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1721 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1722 node->runtime_data[0] = stats_sw_if_index;
1725 return from_frame->n_vectors;
1728 VLIB_NODE_FN (gtpu4_flow_input_node) (vlib_main_t * vm,
1729 vlib_node_runtime_t * node,
1730 vlib_frame_t * from_frame)
1732 return gtpu_flow_input(vm, node, from_frame);
1737 #ifndef CLIB_MULTIARCH_VARIANT
1738 VLIB_REGISTER_NODE (gtpu4_flow_input_node) = {
1739 .name = "gtpu4-flow-input",
1740 .type = VLIB_NODE_TYPE_INTERNAL,
1741 .vector_size = sizeof (u32),
1743 .format_trace = format_gtpu_rx_trace,
1745 .n_errors = GTPU_FLOW_N_ERROR,
1746 .error_strings = gtpu_flow_error_strings,
1748 .n_next_nodes = GTPU_INPUT_N_NEXT,
1750 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
1751 foreach_gtpu_input_next
1759 #endif /* CLIB_MARCH_VARIANT */