2 * decap.c: gtpu tunnel decap packet processing
4 * Copyright (c) 2017 Intel and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <gtpu/gtpu.h>
21 extern vlib_node_registration_t gtpu4_input_node;
22 extern vlib_node_registration_t gtpu6_input_node;
31 static u8 * format_gtpu_rx_trace (u8 * s, va_list * args)
33 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
34 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
35 gtpu_rx_trace_t * t = va_arg (*args, gtpu_rx_trace_t *);
37 if (t->tunnel_index != ~0)
39 s = format (s, "GTPU decap from gtpu_tunnel%d teid %d next %d error %d",
40 t->tunnel_index, t->teid, t->next_index, t->error);
44 s = format (s, "GTPU decap error - tunnel for teid %d does not exist",
51 validate_gtpu_fib (vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4)
53 return t->encap_fib_index == vlib_buffer_get_ip_fib_index (b, is_ip4);
57 gtpu_input (vlib_main_t * vm,
58 vlib_node_runtime_t * node,
59 vlib_frame_t * from_frame,
62 u32 n_left_from, next_index, * from, * to_next;
63 gtpu_main_t * gtm = >pu_main;
64 vnet_main_t * vnm = gtm->vnet_main;
65 vnet_interface_main_t * im = &vnm->interface_main;
66 u32 last_tunnel_index = ~0;
67 gtpu4_tunnel_key_t last_key4;
68 gtpu6_tunnel_key_t last_key6;
69 u32 pkts_decapsulated = 0;
70 u32 thread_index = vlib_get_thread_index();
71 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
74 last_key4.as_u64 = ~0;
76 clib_memset (&last_key6, 0xff, sizeof (last_key6));
78 from = vlib_frame_vector_args (from_frame);
79 n_left_from = from_frame->n_vectors;
81 next_index = node->cached_next_index;
82 stats_sw_if_index = node->runtime_data[0];
83 stats_n_packets = stats_n_bytes = 0;
85 while (n_left_from > 0)
89 vlib_get_next_frame (vm, node, next_index,
90 to_next, n_left_to_next);
91 while (n_left_from >= 4 && n_left_to_next >= 2)
94 vlib_buffer_t * b0, * b1;
96 ip4_header_t * ip4_0, * ip4_1;
97 ip6_header_t * ip6_0, * ip6_1;
98 gtpu_header_t * gtpu0, * gtpu1;
99 u32 gtpu_hdr_len0, gtpu_hdr_len1;
101 u32 tunnel_index0, tunnel_index1;
102 gtpu_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
103 gtpu4_tunnel_key_t key4_0, key4_1;
104 gtpu6_tunnel_key_t key6_0, key6_1;
106 u32 sw_if_index0, sw_if_index1, len0, len1;
107 u8 has_space0, has_space1;
110 /* Prefetch next iteration. */
112 vlib_buffer_t * p2, * p3;
114 p2 = vlib_get_buffer (vm, from[2]);
115 p3 = vlib_get_buffer (vm, from[3]);
117 vlib_prefetch_buffer_header (p2, LOAD);
118 vlib_prefetch_buffer_header (p3, LOAD);
120 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
121 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
133 b0 = vlib_get_buffer (vm, bi0);
134 b1 = vlib_get_buffer (vm, bi1);
136 /* udp leaves current_data pointing at the gtpu header */
137 gtpu0 = vlib_buffer_get_current (b0);
138 gtpu1 = vlib_buffer_get_current (b1);
141 ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
142 ip4_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip4_header_t));
146 ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
147 ip6_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip6_header_t));
156 /* speculatively load gtp header version field */
157 ver0 = gtpu0->ver_flags;
158 ver1 = gtpu1->ver_flags;
161 * Manipulate gtpu header
162 * TBD: Manipulate Sequence Number and N-PDU Number
163 * TBD: Manipulate Next Extension Header
165 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
166 gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
168 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
169 has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
171 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
173 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
174 next0 = GTPU_INPUT_NEXT_DROP;
178 /* Manipulate packet 0 */
180 key4_0.src = ip4_0->src_address.as_u32;
181 key4_0.teid = gtpu0->teid;
183 /* Make sure GTPU tunnel exist according to packet SIP and teid
184 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
185 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
187 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
188 if (PREDICT_FALSE (p0 == NULL))
190 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
191 next0 = GTPU_INPUT_NEXT_DROP;
194 last_key4.as_u64 = key4_0.as_u64;
195 tunnel_index0 = last_tunnel_index = p0[0];
198 tunnel_index0 = last_tunnel_index;
199 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
201 /* Validate GTPU tunnel encap-fib index against packet */
202 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
204 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
205 next0 = GTPU_INPUT_NEXT_DROP;
209 /* Validate GTPU tunnel SIP against packet DIP */
210 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
211 goto next0; /* valid packet */
212 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
214 key4_0.src = ip4_0->dst_address.as_u32;
215 key4_0.teid = gtpu0->teid;
216 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
217 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
218 if (PREDICT_TRUE (p0 != NULL))
220 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
221 goto next0; /* valid packet */
224 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
225 next0 = GTPU_INPUT_NEXT_DROP;
228 } else /* !is_ip4 */ {
229 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
230 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
231 key6_0.teid = gtpu0->teid;
233 /* Make sure GTPU tunnel exist according to packet SIP and teid
234 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
235 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
237 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
238 if (PREDICT_FALSE (p0 == NULL))
240 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
241 next0 = GTPU_INPUT_NEXT_DROP;
244 clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
245 tunnel_index0 = last_tunnel_index = p0[0];
248 tunnel_index0 = last_tunnel_index;
249 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
251 /* Validate GTPU tunnel encap-fib index against packet */
252 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
254 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
255 next0 = GTPU_INPUT_NEXT_DROP;
259 /* Validate GTPU tunnel SIP against packet DIP */
260 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
262 goto next0; /* valid packet */
263 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
265 key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
266 key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
267 key6_0.teid = gtpu0->teid;
268 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
269 if (PREDICT_TRUE (p0 != NULL))
271 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
272 goto next0; /* valid packet */
275 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
276 next0 = GTPU_INPUT_NEXT_DROP;
281 /* Pop gtpu header */
282 vlib_buffer_advance (b0, gtpu_hdr_len0);
284 next0 = t0->decap_next_index;
285 sw_if_index0 = t0->sw_if_index;
286 len0 = vlib_buffer_length_in_chain (vm, b0);
288 /* Required to make the l2 tag push / pop code work on l2 subifs */
289 if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
290 vnet_update_l2_len (b0);
292 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
293 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
294 sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
296 pkts_decapsulated ++;
297 stats_n_packets += 1;
298 stats_n_bytes += len0;
300 /* Batch stats increment on the same gtpu tunnel so counter
301 is not incremented per packet */
302 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
304 stats_n_packets -= 1;
305 stats_n_bytes -= len0;
307 vlib_increment_combined_counter
308 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
309 thread_index, stats_sw_if_index,
310 stats_n_packets, stats_n_bytes);
312 stats_n_bytes = len0;
313 stats_sw_if_index = sw_if_index0;
317 b0->error = error0 ? node->errors[error0] : 0;
319 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
322 = vlib_add_trace (vm, node, b0, sizeof (*tr));
323 tr->next_index = next0;
325 tr->tunnel_index = tunnel_index0;
326 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
329 if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
331 error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
332 next1 = GTPU_INPUT_NEXT_DROP;
336 /* Manipulate packet 1 */
338 key4_1.src = ip4_1->src_address.as_u32;
339 key4_1.teid = gtpu1->teid;
341 /* Make sure GTPU tunnel exist according to packet SIP and teid
342 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
343 if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
345 p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
346 if (PREDICT_FALSE (p1 == NULL))
348 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
349 next1 = GTPU_INPUT_NEXT_DROP;
352 last_key4.as_u64 = key4_1.as_u64;
353 tunnel_index1 = last_tunnel_index = p1[0];
356 tunnel_index1 = last_tunnel_index;
357 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
359 /* Validate GTPU tunnel encap-fib index against packet */
360 if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
362 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
363 next1 = GTPU_INPUT_NEXT_DROP;
367 /* Validate GTPU tunnel SIP against packet DIP */
368 if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
369 goto next1; /* valid packet */
370 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_1->dst_address)))
372 key4_1.src = ip4_1->dst_address.as_u32;
373 key4_1.teid = gtpu1->teid;
374 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
375 p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
376 if (PREDICT_TRUE (p1 != NULL))
378 mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
379 goto next1; /* valid packet */
382 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
383 next1 = GTPU_INPUT_NEXT_DROP;
386 } else /* !is_ip4 */ {
387 key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
388 key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
389 key6_1.teid = gtpu1->teid;
391 /* Make sure GTPU tunnel exist according to packet SIP and teid
392 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
393 if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
395 p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
397 if (PREDICT_FALSE (p1 == NULL))
399 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
400 next1 = GTPU_INPUT_NEXT_DROP;
404 clib_memcpy_fast (&last_key6, &key6_1, sizeof(key6_1));
405 tunnel_index1 = last_tunnel_index = p1[0];
408 tunnel_index1 = last_tunnel_index;
409 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
411 /* Validate GTPU tunnel encap-fib index against packet */
412 if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
414 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
415 next1 = GTPU_INPUT_NEXT_DROP;
419 /* Validate GTPU tunnel SIP against packet DIP */
420 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_1->dst_address,
422 goto next1; /* valid packet */
423 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_1->dst_address)))
425 key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
426 key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
427 key6_1.teid = gtpu1->teid;
428 p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
429 if (PREDICT_TRUE (p1 != NULL))
431 mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
432 goto next1; /* valid packet */
435 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
436 next1 = GTPU_INPUT_NEXT_DROP;
441 /* Pop gtpu header */
442 vlib_buffer_advance (b1, gtpu_hdr_len1);
444 next1 = t1->decap_next_index;
445 sw_if_index1 = t1->sw_if_index;
446 len1 = vlib_buffer_length_in_chain (vm, b1);
448 /* Required to make the l2 tag push / pop code work on l2 subifs */
449 if (PREDICT_TRUE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
450 vnet_update_l2_len (b1);
452 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
453 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
454 sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
456 pkts_decapsulated ++;
457 stats_n_packets += 1;
458 stats_n_bytes += len1;
460 /* Batch stats increment on the same gtpu tunnel so counter
461 is not incremented per packet */
462 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
464 stats_n_packets -= 1;
465 stats_n_bytes -= len1;
467 vlib_increment_combined_counter
468 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
469 thread_index, stats_sw_if_index,
470 stats_n_packets, stats_n_bytes);
472 stats_n_bytes = len1;
473 stats_sw_if_index = sw_if_index1;
477 b1->error = error1 ? node->errors[error1] : 0;
479 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
482 = vlib_add_trace (vm, node, b1, sizeof (*tr));
483 tr->next_index = next1;
485 tr->tunnel_index = tunnel_index1;
486 tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
489 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
490 to_next, n_left_to_next,
491 bi0, bi1, next0, next1);
494 while (n_left_from > 0 && n_left_to_next > 0)
499 ip4_header_t * ip4_0;
500 ip6_header_t * ip6_0;
501 gtpu_header_t * gtpu0;
505 gtpu_tunnel_t * t0, * mt0 = NULL;
506 gtpu4_tunnel_key_t key4_0;
507 gtpu6_tunnel_key_t key6_0;
509 u32 sw_if_index0, len0;
520 b0 = vlib_get_buffer (vm, bi0);
522 /* udp leaves current_data pointing at the gtpu header */
523 gtpu0 = vlib_buffer_get_current (b0);
525 ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
527 ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
533 /* speculatively load gtp header version field */
534 ver0 = gtpu0->ver_flags;
537 * Manipulate gtpu header
538 * TBD: Manipulate Sequence Number and N-PDU Number
539 * TBD: Manipulate Next Extension Header
541 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
543 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
545 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
547 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
548 next0 = GTPU_INPUT_NEXT_DROP;
553 key4_0.src = ip4_0->src_address.as_u32;
554 key4_0.teid = gtpu0->teid;
556 /* Make sure GTPU tunnel exist according to packet SIP and teid
557 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
558 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
560 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
561 if (PREDICT_FALSE (p0 == NULL))
563 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
564 next0 = GTPU_INPUT_NEXT_DROP;
567 last_key4.as_u64 = key4_0.as_u64;
568 tunnel_index0 = last_tunnel_index = p0[0];
571 tunnel_index0 = last_tunnel_index;
572 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
574 /* Validate GTPU tunnel encap-fib index against packet */
575 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
577 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
578 next0 = GTPU_INPUT_NEXT_DROP;
582 /* Validate GTPU tunnel SIP against packet DIP */
583 if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
584 goto next00; /* valid packet */
585 if (PREDICT_FALSE (ip4_address_is_multicast (&ip4_0->dst_address)))
587 key4_0.src = ip4_0->dst_address.as_u32;
588 key4_0.teid = gtpu0->teid;
589 /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
590 p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
591 if (PREDICT_TRUE (p0 != NULL))
593 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
594 goto next00; /* valid packet */
597 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
598 next0 = GTPU_INPUT_NEXT_DROP;
601 } else /* !is_ip4 */ {
602 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
603 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
604 key6_0.teid = gtpu0->teid;
606 /* Make sure GTPU tunnel exist according to packet SIP and teid
607 * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
608 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
610 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
611 if (PREDICT_FALSE (p0 == NULL))
613 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
614 next0 = GTPU_INPUT_NEXT_DROP;
617 clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
618 tunnel_index0 = last_tunnel_index = p0[0];
621 tunnel_index0 = last_tunnel_index;
622 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
624 /* Validate GTPU tunnel encap-fib index against packet */
625 if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
627 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
628 next0 = GTPU_INPUT_NEXT_DROP;
632 /* Validate GTPU tunnel SIP against packet DIP */
633 if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address,
635 goto next00; /* valid packet */
636 if (PREDICT_FALSE (ip6_address_is_multicast (&ip6_0->dst_address)))
638 key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
639 key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
640 key6_0.teid = gtpu0->teid;
641 p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
642 if (PREDICT_TRUE (p0 != NULL))
644 mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
645 goto next00; /* valid packet */
648 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
649 next0 = GTPU_INPUT_NEXT_DROP;
654 /* Pop gtpu header */
655 vlib_buffer_advance (b0, gtpu_hdr_len0);
657 next0 = t0->decap_next_index;
658 sw_if_index0 = t0->sw_if_index;
659 len0 = vlib_buffer_length_in_chain (vm, b0);
661 /* Required to make the l2 tag push / pop code work on l2 subifs */
662 if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
663 vnet_update_l2_len (b0);
665 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
666 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
667 sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
669 pkts_decapsulated ++;
670 stats_n_packets += 1;
671 stats_n_bytes += len0;
673 /* Batch stats increment on the same gtpu tunnel so counter
674 is not incremented per packet */
675 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
677 stats_n_packets -= 1;
678 stats_n_bytes -= len0;
680 vlib_increment_combined_counter
681 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
682 thread_index, stats_sw_if_index,
683 stats_n_packets, stats_n_bytes);
685 stats_n_bytes = len0;
686 stats_sw_if_index = sw_if_index0;
690 b0->error = error0 ? node->errors[error0] : 0;
692 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
695 = vlib_add_trace (vm, node, b0, sizeof (*tr));
696 tr->next_index = next0;
698 tr->tunnel_index = tunnel_index0;
699 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
701 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
702 to_next, n_left_to_next,
706 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
708 /* Do we still need this now that tunnel tx stats is kept? */
709 vlib_node_increment_counter (vm, is_ip4?
710 gtpu4_input_node.index:gtpu6_input_node.index,
711 GTPU_ERROR_DECAPSULATED,
714 /* Increment any remaining batch stats */
717 vlib_increment_combined_counter
718 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
719 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
720 node->runtime_data[0] = stats_sw_if_index;
723 return from_frame->n_vectors;
726 VLIB_NODE_FN (gtpu4_input_node) (vlib_main_t * vm,
727 vlib_node_runtime_t * node,
728 vlib_frame_t * from_frame)
730 return gtpu_input(vm, node, from_frame, /* is_ip4 */ 1);
733 VLIB_NODE_FN (gtpu6_input_node) (vlib_main_t * vm,
734 vlib_node_runtime_t * node,
735 vlib_frame_t * from_frame)
737 return gtpu_input(vm, node, from_frame, /* is_ip4 */ 0);
740 static char * gtpu_error_strings[] = {
741 #define gtpu_error(n,s) s,
742 #include <gtpu/gtpu_error.def>
747 VLIB_REGISTER_NODE (gtpu4_input_node) = {
748 .name = "gtpu4-input",
749 /* Takes a vector of packets. */
750 .vector_size = sizeof (u32),
752 .n_errors = GTPU_N_ERROR,
753 .error_strings = gtpu_error_strings,
755 .n_next_nodes = GTPU_INPUT_N_NEXT,
757 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
758 foreach_gtpu_input_next
762 //temp .format_buffer = format_gtpu_header,
763 .format_trace = format_gtpu_rx_trace,
764 // $$$$ .unformat_buffer = unformat_gtpu_header,
767 VLIB_REGISTER_NODE (gtpu6_input_node) = {
768 .name = "gtpu6-input",
769 /* Takes a vector of packets. */
770 .vector_size = sizeof (u32),
772 .n_errors = GTPU_N_ERROR,
773 .error_strings = gtpu_error_strings,
775 .n_next_nodes = GTPU_INPUT_N_NEXT,
777 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
778 foreach_gtpu_input_next
782 //temp .format_buffer = format_gtpu_header,
783 .format_trace = format_gtpu_rx_trace,
784 // $$$$ .unformat_buffer = unformat_gtpu_header,
788 IP_GTPU_BYPASS_NEXT_DROP,
789 IP_GTPU_BYPASS_NEXT_GTPU,
790 IP_GTPU_BYPASS_N_NEXT,
791 } ip_vxan_bypass_next_t;
794 ip_gtpu_bypass_inline (vlib_main_t * vm,
795 vlib_node_runtime_t * node,
796 vlib_frame_t * frame,
799 gtpu_main_t * gtm = >pu_main;
800 u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
801 vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
802 vtep4_key_t last_vtep4; /* last IPv4 address / fib index
803 matching a local VTEP address */
804 vtep6_key_t last_vtep6; /* last IPv6 address / fib index
805 matching a local VTEP address */
806 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
807 #ifdef CLIB_HAVE_VEC512
808 vtep4_cache_t vtep4_u512;
809 clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
812 from = vlib_frame_vector_args (frame);
813 n_left_from = frame->n_vectors;
814 next_index = node->cached_next_index;
815 vlib_get_buffers (vm, from, bufs, n_left_from);
817 if (node->flags & VLIB_NODE_FLAG_TRACE)
818 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
821 vtep4_key_init (&last_vtep4);
823 vtep6_key_init (&last_vtep6);
825 while (n_left_from > 0)
827 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
829 while (n_left_from >= 4 && n_left_to_next >= 2)
831 vlib_buffer_t * b0, * b1;
832 ip4_header_t * ip40, * ip41;
833 ip6_header_t * ip60, * ip61;
834 udp_header_t * udp0, * udp1;
835 u32 bi0, ip_len0, udp_len0, flags0, next0;
836 u32 bi1, ip_len1, udp_len1, flags1, next1;
837 i32 len_diff0, len_diff1;
838 u8 error0, good_udp0, proto0;
839 u8 error1, good_udp1, proto1;
841 /* Prefetch next iteration. */
843 vlib_prefetch_buffer_header (b[2], LOAD);
844 vlib_prefetch_buffer_header (b[3], LOAD);
846 CLIB_PREFETCH (b[2]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
847 CLIB_PREFETCH (b[3]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
850 bi0 = to_next[0] = from[0];
851 bi1 = to_next[1] = from[1];
862 ip40 = vlib_buffer_get_current (b0);
863 ip41 = vlib_buffer_get_current (b1);
867 ip60 = vlib_buffer_get_current (b0);
868 ip61 = vlib_buffer_get_current (b1);
871 /* Setup packet for next IP feature */
872 vnet_feature_next(&next0, b0);
873 vnet_feature_next(&next1, b1);
877 /* Treat IP frag packets as "experimental" protocol for now
878 until support of IP frag reassembly is implemented */
879 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
880 proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
884 proto0 = ip60->protocol;
885 proto1 = ip61->protocol;
888 /* Process packet 0 */
889 if (proto0 != IP_PROTOCOL_UDP)
890 goto exit0; /* not UDP packet */
893 udp0 = ip4_next_header (ip40);
895 udp0 = ip6_next_header (ip60);
897 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
898 goto exit0; /* not GTPU packet */
900 /* Validate DIP against VTEPs*/
903 #ifdef CLIB_HAVE_VEC512
904 if (!vtep4_check_vector
905 (>m->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
907 if (!vtep4_check (>m->vtep_table, b0, ip40, &last_vtep4))
909 goto exit0; /* no local VTEP for GTPU packet */
913 if (!vtep6_check (>m->vtep_table, b0, ip60, &last_vtep6))
914 goto exit0; /* no local VTEP for GTPU packet */
918 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
920 /* Don't verify UDP checksum for packets with explicit zero checksum. */
921 good_udp0 |= udp0->checksum == 0;
923 /* Verify UDP length */
925 ip_len0 = clib_net_to_host_u16 (ip40->length);
927 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
928 udp_len0 = clib_net_to_host_u16 (udp0->length);
929 len_diff0 = ip_len0 - udp_len0;
931 /* Verify UDP checksum */
932 if (PREDICT_FALSE (!good_udp0))
934 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
937 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
939 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
941 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
947 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
948 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
952 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
953 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
957 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
958 b0->error = error0 ? error_node->errors[error0] : 0;
960 /* gtpu-input node expect current at GTPU header */
962 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
964 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
967 /* Process packet 1 */
968 if (proto1 != IP_PROTOCOL_UDP)
969 goto exit1; /* not UDP packet */
972 udp1 = ip4_next_header (ip41);
974 udp1 = ip6_next_header (ip61);
976 if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
977 goto exit1; /* not GTPU packet */
979 /* Validate DIP against VTEPs*/
982 #ifdef CLIB_HAVE_VEC512
983 if (!vtep4_check_vector
984 (>m->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
986 if (!vtep4_check (>m->vtep_table, b1, ip41, &last_vtep4))
988 goto exit1; /* no local VTEP for GTPU packet */
992 if (!vtep6_check (>m->vtep_table, b1, ip61, &last_vtep6))
993 goto exit1; /* no local VTEP for GTPU packet */
997 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
999 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1000 good_udp1 |= udp1->checksum == 0;
1002 /* Verify UDP length */
1004 ip_len1 = clib_net_to_host_u16 (ip41->length);
1006 ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1007 udp_len1 = clib_net_to_host_u16 (udp1->length);
1008 len_diff1 = ip_len1 - udp_len1;
1010 /* Verify UDP checksum */
1011 if (PREDICT_FALSE (!good_udp1))
1013 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1016 flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1018 flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1020 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1026 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1027 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1031 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1032 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1036 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
1037 b1->error = error1 ? error_node->errors[error1] : 0;
1039 /* gtpu-input node expect current at GTPU header */
1041 vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1043 vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1046 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1047 to_next, n_left_to_next,
1048 bi0, bi1, next0, next1);
1051 while (n_left_from > 0 && n_left_to_next > 0)
1054 ip4_header_t * ip40;
1055 ip6_header_t * ip60;
1056 udp_header_t * udp0;
1057 u32 bi0, ip_len0, udp_len0, flags0, next0;
1059 u8 error0, good_udp0, proto0;
1061 bi0 = to_next[0] = from[0];
1065 n_left_to_next -= 1;
1070 ip40 = vlib_buffer_get_current (b0);
1072 ip60 = vlib_buffer_get_current (b0);
1074 /* Setup packet for next IP feature */
1075 vnet_feature_next(&next0, b0);
1078 /* Treat IP4 frag packets as "experimental" protocol for now
1079 until support of IP frag reassembly is implemented */
1080 proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1082 proto0 = ip60->protocol;
1084 if (proto0 != IP_PROTOCOL_UDP)
1085 goto exit; /* not UDP packet */
1088 udp0 = ip4_next_header (ip40);
1090 udp0 = ip6_next_header (ip60);
1092 if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1093 goto exit; /* not GTPU packet */
1095 /* Validate DIP against VTEPs*/
1098 #ifdef CLIB_HAVE_VEC512
1099 if (!vtep4_check_vector
1100 (>m->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
1102 if (!vtep4_check (>m->vtep_table, b0, ip40, &last_vtep4))
1104 goto exit; /* no local VTEP for GTPU packet */
1108 if (!vtep6_check (>m->vtep_table, b0, ip60, &last_vtep6))
1109 goto exit; /* no local VTEP for GTPU packet */
1113 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1115 /* Don't verify UDP checksum for packets with explicit zero checksum. */
1116 good_udp0 |= udp0->checksum == 0;
1118 /* Verify UDP length */
1120 ip_len0 = clib_net_to_host_u16 (ip40->length);
1122 ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1123 udp_len0 = clib_net_to_host_u16 (udp0->length);
1124 len_diff0 = ip_len0 - udp_len0;
1126 /* Verify UDP checksum */
1127 if (PREDICT_FALSE (!good_udp0))
1129 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1132 flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1134 flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1136 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1142 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1143 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1147 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1148 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1152 IP_GTPU_BYPASS_NEXT_DROP : IP_GTPU_BYPASS_NEXT_GTPU;
1153 b0->error = error0 ? error_node->errors[error0] : 0;
1155 /* gtpu-input node expect current at GTPU header */
1157 vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1159 vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1162 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1163 to_next, n_left_to_next,
1167 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1170 return frame->n_vectors;
1173 VLIB_NODE_FN (ip4_gtpu_bypass_node) (vlib_main_t * vm,
1174 vlib_node_runtime_t * node,
1175 vlib_frame_t * frame)
1177 return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1180 VLIB_REGISTER_NODE (ip4_gtpu_bypass_node) = {
1181 .name = "ip4-gtpu-bypass",
1182 .vector_size = sizeof (u32),
1184 .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1186 [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1187 [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu4-input",
1190 .format_buffer = format_ip4_header,
1191 .format_trace = format_ip4_forward_next_trace,
1194 #ifndef CLIB_MARCH_VARIANT
1195 /* Dummy init function to get us linked in. */
1196 clib_error_t * ip4_gtpu_bypass_init (vlib_main_t * vm)
1199 VLIB_INIT_FUNCTION (ip4_gtpu_bypass_init);
1200 #endif /* CLIB_MARCH_VARIANT */
1202 VLIB_NODE_FN (ip6_gtpu_bypass_node) (vlib_main_t * vm,
1203 vlib_node_runtime_t * node,
1204 vlib_frame_t * frame)
1206 return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1209 VLIB_REGISTER_NODE (ip6_gtpu_bypass_node) = {
1210 .name = "ip6-gtpu-bypass",
1211 .vector_size = sizeof (u32),
1213 .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1215 [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1216 [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu6-input",
1219 .format_buffer = format_ip6_header,
1220 .format_trace = format_ip6_forward_next_trace,
1223 #ifndef CLIB_MARCH_VARIANT
1224 /* Dummy init function to get us linked in. */
1225 clib_error_t * ip6_gtpu_bypass_init (vlib_main_t * vm)
1228 VLIB_INIT_FUNCTION (ip6_gtpu_bypass_init);
1230 #define foreach_gtpu_flow_error \
1231 _(NONE, "no error") \
1232 _(PAYLOAD_ERROR, "Payload type errors") \
1233 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
1234 _(IP_HEADER_ERROR, "Rx ip header errors") \
1235 _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
1236 _(UDP_LENGTH_ERROR, "Rx udp length errors")
1240 #define _(f,s) GTPU_FLOW_ERROR_##f,
1241 foreach_gtpu_flow_error
1243 #define gtpu_error(n,s) GTPU_FLOW_ERROR_##n,
1244 #include <gtpu/gtpu_error.def>
1247 } gtpu_flow_error_t;
1249 static char *gtpu_flow_error_strings[] = {
1251 foreach_gtpu_flow_error
1253 #define gtpu_error(n,s) s,
1254 #include <gtpu/gtpu_error.def>
1260 #define gtpu_local_need_csum_check(_b) \
1261 (!(_b->flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED || \
1262 (_b->flags & VNET_BUFFER_F_OFFLOAD && \
1263 vnet_buffer2 (_b)->oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)))
1265 #define gtpu_local_csum_is_valid(_b) \
1266 ((_b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT || \
1267 (_b->flags & VNET_BUFFER_F_OFFLOAD && \
1268 vnet_buffer2 (_b)->oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)) != 0)
1270 static_always_inline u8
1271 gtpu_validate_udp_csum (vlib_main_t * vm, vlib_buffer_t *b)
1273 u32 flags = b->flags;
1274 enum { offset = sizeof(ip4_header_t) + sizeof(udp_header_t)};
1276 /* Verify UDP checksum */
1277 if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1279 vlib_buffer_advance (b, -offset);
1280 flags = ip4_tcp_udp_validate_checksum (vm, b);
1281 vlib_buffer_advance (b, offset);
1284 return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1287 static_always_inline u8
1288 gtpu_check_ip (vlib_buffer_t *b, u16 payload_len)
1290 ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1291 sizeof(ip4_header_t) - sizeof(udp_header_t);
1292 u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1293 u16 expected = payload_len + sizeof(ip4_header_t) + sizeof(udp_header_t);
1294 return ip_len > expected || ip4_hdr->ttl == 0 || ip4_hdr->ip_version_and_header_length != 0x45;
1297 static_always_inline u8
1298 gtpu_check_ip_udp_len (vlib_buffer_t *b)
1300 ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1301 sizeof(ip4_header_t) - sizeof(udp_header_t);
1302 udp_header_t * udp_hdr = vlib_buffer_get_current(b) - sizeof(udp_header_t);
1303 u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1304 u16 udp_len = clib_net_to_host_u16 (udp_hdr->length);
1305 return udp_len > ip_len;
1308 static_always_inline u8
1309 gtpu_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
1311 u8 error0 = GTPU_FLOW_ERROR_NONE;
1313 error0 = GTPU_FLOW_ERROR_IP_HEADER_ERROR;
1315 error0 = GTPU_FLOW_ERROR_UDP_LENGTH_ERROR;
1317 error0 = GTPU_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1323 gtpu_flow_input (vlib_main_t * vm,
1324 vlib_node_runtime_t * node,
1325 vlib_frame_t * from_frame)
1327 u32 n_left_from, next_index, * from, * to_next;
1328 gtpu_main_t * gtm = >pu_main;
1329 vnet_main_t * vnm = gtm->vnet_main;
1330 vnet_interface_main_t * im = &vnm->interface_main;
1331 u32 pkts_decapsulated = 0;
1332 u32 thread_index = vlib_get_thread_index();
1333 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
1334 u8 ip_err0, ip_err1, udp_err0, udp_err1, csum_err0, csum_err1;
1336 from = vlib_frame_vector_args (from_frame);
1337 n_left_from = from_frame->n_vectors;
1339 next_index = node->cached_next_index;
1340 stats_sw_if_index = node->runtime_data[0];
1341 stats_n_packets = stats_n_bytes = 0;
1343 while (n_left_from > 0)
1347 vlib_get_next_frame (vm, node, next_index,
1348 to_next, n_left_to_next);
1350 while (n_left_from >= 4 && n_left_to_next >= 2)
1353 vlib_buffer_t * b0, * b1;
1355 gtpu_header_t * gtpu0, * gtpu1;
1356 u32 gtpu_hdr_len0, gtpu_hdr_len1;
1357 u32 tunnel_index0, tunnel_index1;
1358 gtpu_tunnel_t * t0, * t1;
1360 u32 sw_if_index0, sw_if_index1, len0, len1;
1361 u8 has_space0 = 0, has_space1 = 0;
1364 /* Prefetch next iteration. */
1366 vlib_buffer_t * p2, * p3;
1368 p2 = vlib_get_buffer (vm, from[2]);
1369 p3 = vlib_get_buffer (vm, from[3]);
1371 vlib_prefetch_buffer_header (p2, LOAD);
1372 vlib_prefetch_buffer_header (p3, LOAD);
1374 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1375 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1384 n_left_to_next -= 2;
1387 b0 = vlib_get_buffer (vm, bi0);
1388 b1 = vlib_get_buffer (vm, bi1);
1390 /* udp leaves current_data pointing at the gtpu header */
1391 gtpu0 = vlib_buffer_get_current (b0);
1392 gtpu1 = vlib_buffer_get_current (b1);
1394 len0 = vlib_buffer_length_in_chain (vm, b0);
1395 len1 = vlib_buffer_length_in_chain (vm, b1);
1403 ip_err0 = gtpu_check_ip (b0, len0);
1404 udp_err0 = gtpu_check_ip_udp_len (b0);
1405 ip_err1 = gtpu_check_ip (b1, len1);
1406 udp_err1 = gtpu_check_ip_udp_len (b1);
1408 if (PREDICT_FALSE (gtpu_local_need_csum_check (b0)))
1409 csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1411 csum_err0 = !gtpu_local_csum_is_valid (b0);
1412 if (PREDICT_FALSE (gtpu_local_need_csum_check (b1)))
1413 csum_err1 = !gtpu_validate_udp_csum (vm, b1);
1415 csum_err1 = !gtpu_local_csum_is_valid (b1);
1417 if (ip_err0 || udp_err0 || csum_err0)
1419 next0 = GTPU_INPUT_NEXT_DROP;
1420 error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1424 /* speculatively load gtp header version field */
1425 ver0 = gtpu0->ver_flags;
1428 * Manipulate gtpu header
1429 * TBD: Manipulate Sequence Number and N-PDU Number
1430 * TBD: Manipulate Next Extension Header
1432 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1434 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1435 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1437 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1438 next0 = GTPU_INPUT_NEXT_DROP;
1442 /* Manipulate packet 0 */
1443 ASSERT (b0->flow_id != 0);
1444 tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1445 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1448 /* Pop gtpu header */
1449 vlib_buffer_advance (b0, gtpu_hdr_len0);
1451 /* assign the next node */
1452 if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1453 (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1455 error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1456 next0 = GTPU_INPUT_NEXT_DROP;
1459 next0 = t0->decap_next_index;
1461 sw_if_index0 = t0->sw_if_index;
1463 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1464 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1466 pkts_decapsulated ++;
1467 stats_n_packets += 1;
1468 stats_n_bytes += len0;
1470 /* Batch stats increment on the same gtpu tunnel so counter
1471 is not incremented per packet */
1472 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1474 stats_n_packets -= 1;
1475 stats_n_bytes -= len0;
1476 if (stats_n_packets)
1477 vlib_increment_combined_counter
1478 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1479 thread_index, stats_sw_if_index,
1480 stats_n_packets, stats_n_bytes);
1481 stats_n_packets = 1;
1482 stats_n_bytes = len0;
1483 stats_sw_if_index = sw_if_index0;
1487 b0->error = error0 ? node->errors[error0] : 0;
1489 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1492 = vlib_add_trace (vm, node, b0, sizeof (*tr));
1493 tr->next_index = next0;
1495 tr->tunnel_index = tunnel_index0;
1496 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1499 if (ip_err1 || udp_err1 || csum_err1)
1501 next1 = GTPU_INPUT_NEXT_DROP;
1502 error1 = gtpu_err_code (ip_err1, udp_err1, csum_err1);
1506 /* speculatively load gtp header version field */
1507 ver1 = gtpu1->ver_flags;
1510 * Manipulate gtpu header
1511 * TBD: Manipulate Sequence Number and N-PDU Number
1512 * TBD: Manipulate Next Extension Header
1514 gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
1515 has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
1516 if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
1518 error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1519 next1 = GTPU_INPUT_NEXT_DROP;
1523 /* Manipulate packet 1 */
1524 ASSERT (b1->flow_id != 0);
1525 tunnel_index1 = b1->flow_id - gtm->flow_id_start;
1526 t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
1529 /* Pop gtpu header */
1530 vlib_buffer_advance (b1, gtpu_hdr_len1);
1532 /* assign the next node */
1533 if (PREDICT_FALSE (t1->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1534 (t1->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1536 error1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1537 next1 = GTPU_INPUT_NEXT_DROP;
1540 next1 = t1->decap_next_index;
1542 sw_if_index1 = t1->sw_if_index;
1544 /* Required to make the l2 tag push / pop code work on l2 subifs */
1545 /* This won't happen in current implementation as only
1546 ipv4/udp/gtpu/IPV4 type packets can be matched */
1547 if (PREDICT_FALSE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
1548 vnet_update_l2_len (b1);
1550 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1551 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
1553 pkts_decapsulated ++;
1554 stats_n_packets += 1;
1555 stats_n_bytes += len1;
1557 /* Batch stats increment on the same gtpu tunnel so counter
1558 is not incremented per packet */
1559 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
1561 stats_n_packets -= 1;
1562 stats_n_bytes -= len1;
1563 if (stats_n_packets)
1564 vlib_increment_combined_counter
1565 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1566 thread_index, stats_sw_if_index,
1567 stats_n_packets, stats_n_bytes);
1568 stats_n_packets = 1;
1569 stats_n_bytes = len1;
1570 stats_sw_if_index = sw_if_index1;
1574 b1->error = error1 ? node->errors[error1] : 0;
1576 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
1579 = vlib_add_trace (vm, node, b1, sizeof (*tr));
1580 tr->next_index = next1;
1582 tr->tunnel_index = tunnel_index1;
1583 tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
1586 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1587 to_next, n_left_to_next,
1588 bi0, bi1, next0, next1);
1591 while (n_left_from > 0 && n_left_to_next > 0)
1596 gtpu_header_t * gtpu0;
1601 u32 sw_if_index0, len0;
1610 n_left_to_next -= 1;
1612 b0 = vlib_get_buffer (vm, bi0);
1613 len0 = vlib_buffer_length_in_chain (vm, b0);
1618 ip_err0 = gtpu_check_ip (b0, len0);
1619 udp_err0 = gtpu_check_ip_udp_len (b0);
1620 if (PREDICT_FALSE (gtpu_local_need_csum_check (b0)))
1621 csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1623 csum_err0 = !gtpu_local_csum_is_valid (b0);
1625 if (ip_err0 || udp_err0 || csum_err0)
1627 next0 = GTPU_INPUT_NEXT_DROP;
1628 error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1632 /* udp leaves current_data pointing at the gtpu header */
1633 gtpu0 = vlib_buffer_get_current (b0);
1635 /* speculatively load gtp header version field */
1636 ver0 = gtpu0->ver_flags;
1639 * Manipulate gtpu header
1640 * TBD: Manipulate Sequence Number and N-PDU Number
1641 * TBD: Manipulate Next Extension Header
1643 gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1645 has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1646 if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1648 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1649 next0 = GTPU_INPUT_NEXT_DROP;
1653 ASSERT (b0->flow_id != 0);
1654 tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1655 t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1658 /* Pop gtpu header */
1659 vlib_buffer_advance (b0, gtpu_hdr_len0);
1661 /* assign the next node */
1662 if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1663 (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1665 error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1666 next0 = GTPU_INPUT_NEXT_DROP;
1669 next0 = t0->decap_next_index;
1671 sw_if_index0 = t0->sw_if_index;
1673 /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1674 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1676 pkts_decapsulated ++;
1677 stats_n_packets += 1;
1678 stats_n_bytes += len0;
1680 /* Batch stats increment on the same gtpu tunnel so counter
1681 is not incremented per packet */
1682 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1684 stats_n_packets -= 1;
1685 stats_n_bytes -= len0;
1686 if (stats_n_packets)
1687 vlib_increment_combined_counter
1688 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1689 thread_index, stats_sw_if_index,
1690 stats_n_packets, stats_n_bytes);
1691 stats_n_packets = 1;
1692 stats_n_bytes = len0;
1693 stats_sw_if_index = sw_if_index0;
1696 b0->error = error0 ? node->errors[error0] : 0;
1698 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1701 = vlib_add_trace (vm, node, b0, sizeof (*tr));
1702 tr->next_index = next0;
1704 tr->tunnel_index = tunnel_index0;
1705 tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1707 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1708 to_next, n_left_to_next,
1712 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1715 /* Do we still need this now that tunnel tx stats is kept? */
1716 vlib_node_increment_counter (vm, gtpu4_flow_input_node.index,
1717 GTPU_ERROR_DECAPSULATED,
1720 /* Increment any remaining batch stats */
1721 if (stats_n_packets)
1723 vlib_increment_combined_counter
1724 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
1725 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1726 node->runtime_data[0] = stats_sw_if_index;
1729 return from_frame->n_vectors;
1732 VLIB_NODE_FN (gtpu4_flow_input_node) (vlib_main_t * vm,
1733 vlib_node_runtime_t * node,
1734 vlib_frame_t * from_frame)
1736 return gtpu_flow_input(vm, node, from_frame);
1741 #ifndef CLIB_MULTIARCH_VARIANT
1742 VLIB_REGISTER_NODE (gtpu4_flow_input_node) = {
1743 .name = "gtpu4-flow-input",
1744 .type = VLIB_NODE_TYPE_INTERNAL,
1745 .vector_size = sizeof (u32),
1747 .format_trace = format_gtpu_rx_trace,
1749 .n_errors = GTPU_FLOW_N_ERROR,
1750 .error_strings = gtpu_flow_error_strings,
1752 .n_next_nodes = GTPU_INPUT_N_NEXT,
1754 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
1755 foreach_gtpu_input_next
1763 #endif /* CLIB_MARCH_VARIANT */