2 * decap.c - decapsulate VXLAN GPE
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/vxlan-gpe/vxlan_gpe.h>
22 vlib_node_registration_t vxlan_gpe_input_node;
28 } vxlan_gpe_rx_trace_t;
30 static u8 * format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
32 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
33 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
34 vxlan_gpe_rx_trace_t * t = va_arg (*args, vxlan_gpe_rx_trace_t *);
36 if (t->tunnel_index != ~0)
38 s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
39 t->next_index, t->error);
43 s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
50 static u8 * format_vxlan_gpe_with_length (u8 * s, va_list * args)
52 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
53 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 vxlan_gpe_input (vlib_main_t * vm,
61 vlib_node_runtime_t * node,
62 vlib_frame_t * from_frame,
65 u32 n_left_from, next_index, *from, *to_next;
66 vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
67 vnet_main_t * vnm = ngm->vnet_main;
68 vnet_interface_main_t * im = &vnm->interface_main;
69 u32 last_tunnel_index = ~0;
70 vxlan4_gpe_tunnel_key_t last_key4;
71 vxlan6_gpe_tunnel_key_t last_key6;
72 u32 pkts_decapsulated = 0;
73 u32 cpu_index = os_get_cpu_number ();
74 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
76 memset (&last_key4, 0xff, sizeof(last_key4));
78 from = vlib_frame_vector_args (from_frame);
79 n_left_from = from_frame->n_vectors;
81 next_index = node->cached_next_index;
82 stats_sw_if_index = node->runtime_data[0];
83 stats_n_packets = stats_n_bytes = 0;
85 while (n_left_from > 0)
89 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
91 while (n_left_from >= 4 && n_left_to_next >= 2)
94 vlib_buffer_t * b0, *b1;
96 ip4_vxlan_gpe_header_t * iuvn4_0, *iuvn4_1;
97 ip6_vxlan_gpe_header_t * iuvn6_0, *iuvn6_1;
99 u32 tunnel_index0, tunnel_index1;
100 vxlan_gpe_tunnel_t * t0, *t1;
101 vxlan4_gpe_tunnel_key_t key4_0, key4_1;
102 vxlan6_gpe_tunnel_key_t key6_0, key6_1;
104 u32 sw_if_index0, sw_if_index1, len0, len1;
106 /* Prefetch next iteration. */
108 vlib_buffer_t * p2, *p3;
110 p2 = vlib_get_buffer (vm, from[2]);
111 p3 = vlib_get_buffer (vm, from[3]);
113 vlib_prefetch_buffer_header(p2, LOAD);
114 vlib_prefetch_buffer_header(p3, LOAD);
116 CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
117 CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
129 b0 = vlib_get_buffer (vm, bi0);
130 b1 = vlib_get_buffer (vm, bi1);
134 /* udp leaves current_data pointing at the vxlan-gpe header */
135 vlib_buffer_advance (b0, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
136 vlib_buffer_advance (b1, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
138 iuvn4_0 = vlib_buffer_get_current (b0);
139 iuvn4_1 = vlib_buffer_get_current (b1);
141 /* pop (ip, udp, vxlan) */
142 vlib_buffer_advance (b0, sizeof(*iuvn4_0));
143 vlib_buffer_advance (b1, sizeof(*iuvn4_1));
147 /* udp leaves current_data pointing at the vxlan-gpe header */
148 vlib_buffer_advance (b0, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
149 vlib_buffer_advance (b1, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
151 iuvn6_0 = vlib_buffer_get_current (b0);
152 iuvn6_1 = vlib_buffer_get_current (b1);
154 /* pop (ip, udp, vxlan) */
155 vlib_buffer_advance (b0, sizeof(*iuvn6_0));
156 vlib_buffer_advance (b1, sizeof(*iuvn6_1));
166 next0 = (iuvn4_0->vxlan.protocol < node->n_next_nodes) ?
167 iuvn4_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
168 next1 = (iuvn4_1->vxlan.protocol < node->n_next_nodes) ?
169 iuvn4_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
171 key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
172 key4_1.local = iuvn4_1->ip4.dst_address.as_u32;
174 key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
175 key4_1.remote = iuvn4_1->ip4.src_address.as_u32;
177 key4_0.vni = iuvn4_0->vxlan.vni_res;
178 key4_1.vni = iuvn4_1->vxlan.vni_res;
183 /* Processing for key4_0 */
184 if (PREDICT_FALSE((key4_0.as_u64[0] != last_key4.as_u64[0])
185 || (key4_0.as_u64[1] != last_key4.as_u64[1])))
187 p0 = hash_get_mem(ngm->vxlan4_gpe_tunnel_by_key, &key4_0);
191 error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
195 last_key4.as_u64[0] = key4_0.as_u64[0];
196 last_key4.as_u64[1] = key4_0.as_u64[1];
197 tunnel_index0 = last_tunnel_index = p0[0];
200 tunnel_index0 = last_tunnel_index;
204 next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
205 iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
206 next1 = (iuvn6_1->vxlan.protocol < node->n_next_nodes) ?
207 iuvn6_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
209 key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
210 key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
211 key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
212 key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
214 key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
215 key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
216 key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
217 key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
219 key6_0.vni = iuvn6_0->vxlan.vni_res;
220 key6_1.vni = iuvn6_1->vxlan.vni_res;
222 /* Processing for key6_0 */
223 if (PREDICT_FALSE(memcmp (&key6_0, &last_key6, sizeof(last_key6)) != 0))
225 p0 = hash_get_mem(ngm->vxlan6_gpe_tunnel_by_key, &key6_0);
229 error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
233 memcpy (&last_key6, &key6_0, sizeof(key6_0));
234 tunnel_index0 = last_tunnel_index = p0[0];
237 tunnel_index0 = last_tunnel_index;
240 t0 = pool_elt_at_index(ngm->tunnels, tunnel_index0);
242 next0 = t0->protocol;
244 sw_if_index0 = t0->sw_if_index;
245 len0 = vlib_buffer_length_in_chain (vm, b0);
247 /* Required to make the l2 tag push / pop code work on l2 subifs */
248 vnet_update_l2_len (b0);
251 * ip[46] lookup in the configured FIB
253 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
256 stats_n_packets += 1;
257 stats_n_bytes += len0;
259 if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
261 stats_n_packets -= 1;
262 stats_n_bytes -= len0;
264 vlib_increment_combined_counter (
265 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
266 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
268 stats_n_bytes = len0;
269 stats_sw_if_index = sw_if_index0;
272 trace0: b0->error = error0 ? node->errors[error0] : 0;
274 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
276 vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
277 tr->next_index = next0;
279 tr->tunnel_index = tunnel_index0;
282 /* Process packet 1 */
285 /* Processing for key4_1 */
287 (key4_1.as_u64[0] != last_key4.as_u64[0])
288 || (key4_1.as_u64[1] != last_key4.as_u64[1])))
290 p1 = hash_get_mem(ngm->vxlan4_gpe_tunnel_by_key, &key4_1);
294 error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
298 last_key4.as_u64[0] = key4_1.as_u64[0];
299 last_key4.as_u64[1] = key4_1.as_u64[1];
300 tunnel_index1 = last_tunnel_index = p1[0];
303 tunnel_index1 = last_tunnel_index;
307 /* Processing for key6_1 */
308 if (PREDICT_FALSE(memcmp (&key6_1, &last_key6, sizeof(last_key6)) != 0))
310 p1 = hash_get_mem(ngm->vxlan6_gpe_tunnel_by_key, &key6_1);
314 error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
318 memcpy (&last_key6, &key6_1, sizeof(key6_1));
319 tunnel_index1 = last_tunnel_index = p1[0];
322 tunnel_index1 = last_tunnel_index;
325 t1 = pool_elt_at_index(ngm->tunnels, tunnel_index1);
327 next1 = t1->protocol;
328 sw_if_index1 = t1->sw_if_index;
329 len1 = vlib_buffer_length_in_chain (vm, b1);
331 /* Required to make the l2 tag push / pop code work on l2 subifs */
332 vnet_update_l2_len (b1);
335 * ip[46] lookup in the configured FIB
337 vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
340 stats_n_packets += 1;
341 stats_n_bytes += len1;
343 /* Batch stats increment on the same vxlan tunnel so counter
344 is not incremented per packet */
345 if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index))
347 stats_n_packets -= 1;
348 stats_n_bytes -= len1;
350 vlib_increment_combined_counter (
351 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
352 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
354 stats_n_bytes = len1;
355 stats_sw_if_index = sw_if_index1;
357 vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
359 trace1: b1->error = error1 ? node->errors[error1] : 0;
361 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
363 vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b1, sizeof(*tr));
364 tr->next_index = next1;
366 tr->tunnel_index = tunnel_index1;
369 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
370 n_left_to_next, bi0, bi1, next0, next1);
373 while (n_left_from > 0 && n_left_to_next > 0)
378 ip4_vxlan_gpe_header_t * iuvn4_0;
379 ip6_vxlan_gpe_header_t * iuvn6_0;
382 vxlan_gpe_tunnel_t * t0;
383 vxlan4_gpe_tunnel_key_t key4_0;
384 vxlan6_gpe_tunnel_key_t key6_0;
386 u32 sw_if_index0, len0;
395 b0 = vlib_get_buffer (vm, bi0);
399 /* udp leaves current_data pointing at the vxlan-gpe header */
400 vlib_buffer_advance (
401 b0, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
403 iuvn4_0 = vlib_buffer_get_current (b0);
405 /* pop (ip, udp, vxlan) */
406 vlib_buffer_advance (b0, sizeof(*iuvn4_0));
410 /* udp leaves current_data pointing at the vxlan-gpe header */
411 vlib_buffer_advance (
412 b0, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
414 iuvn6_0 = vlib_buffer_get_current (b0);
416 /* pop (ip, udp, vxlan) */
417 vlib_buffer_advance (b0, sizeof(*iuvn6_0));
426 (iuvn4_0->vxlan.protocol < node->n_next_nodes) ?
427 iuvn4_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
429 key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
430 key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
431 key4_0.vni = iuvn4_0->vxlan.vni_res;
434 /* Processing for key4_0 */
436 (key4_0.as_u64[0] != last_key4.as_u64[0])
437 || (key4_0.as_u64[1] != last_key4.as_u64[1])))
439 p0 = hash_get_mem(ngm->vxlan4_gpe_tunnel_by_key, &key4_0);
443 error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
447 last_key4.as_u64[0] = key4_0.as_u64[0];
448 last_key4.as_u64[1] = key4_0.as_u64[1];
449 tunnel_index0 = last_tunnel_index = p0[0];
452 tunnel_index0 = last_tunnel_index;
456 next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
457 iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
459 key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
460 key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
461 key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
462 key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
463 key6_0.vni = iuvn6_0->vxlan.vni_res;
465 /* Processing for key6_0 */
466 if (PREDICT_FALSE(memcmp (&key6_0, &last_key6, sizeof(last_key6)) != 0))
468 p0 = hash_get_mem(ngm->vxlan6_gpe_tunnel_by_key, &key6_0);
472 error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
476 memcpy (&last_key6, &key6_0, sizeof(key6_0));
477 tunnel_index0 = last_tunnel_index = p0[0];
480 tunnel_index0 = last_tunnel_index;
483 t0 = pool_elt_at_index(ngm->tunnels, tunnel_index0);
485 next0 = t0->protocol;
487 sw_if_index0 = t0->sw_if_index;
488 len0 = vlib_buffer_length_in_chain (vm, b0);
490 /* Required to make the l2 tag push / pop code work on l2 subifs */
491 vnet_update_l2_len (b0);
494 * ip[46] lookup in the configured FIB
496 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
499 stats_n_packets += 1;
500 stats_n_bytes += len0;
502 /* Batch stats increment on the same vxlan-gpe tunnel so counter
503 is not incremented per packet */
504 if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
506 stats_n_packets -= 1;
507 stats_n_bytes -= len0;
509 vlib_increment_combined_counter (
510 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
511 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
513 stats_n_bytes = len0;
514 stats_sw_if_index = sw_if_index0;
517 trace00: b0->error = error0 ? node->errors[error0] : 0;
519 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
521 vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
522 tr->next_index = next0;
524 tr->tunnel_index = tunnel_index0;
526 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
527 n_left_to_next, bi0, next0);
530 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
532 vlib_node_increment_counter (vm, vxlan_gpe_input_node.index,
533 VXLAN_GPE_ERROR_DECAPSULATED, pkts_decapsulated);
534 /* Increment any remaining batch stats */
537 vlib_increment_combined_counter (
538 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, cpu_index,
539 stats_sw_if_index, stats_n_packets, stats_n_bytes);
540 node->runtime_data[0] = stats_sw_if_index;
542 return from_frame->n_vectors;
546 vxlan4_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
547 vlib_frame_t * from_frame)
549 return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */1);
553 vxlan6_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
554 vlib_frame_t * from_frame)
556 return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */0);
559 static char * vxlan_gpe_error_strings[] = {
560 #define vxlan_gpe_error(n,s) s,
561 #include <vnet/vxlan-gpe/vxlan_gpe_error.def>
562 #undef vxlan_gpe_error
566 VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
567 .function = vxlan4_gpe_input,
568 .name = "vxlan4-gpe-input",
569 /* Takes a vector of packets. */
570 .vector_size = sizeof (u32),
571 .type = VLIB_NODE_TYPE_INTERNAL,
572 .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
573 .error_strings = vxlan_gpe_error_strings,
575 .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
577 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
578 foreach_vxlan_gpe_input_next
582 .format_buffer = format_vxlan_gpe_with_length,
583 .format_trace = format_vxlan_gpe_rx_trace,
584 // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
587 VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gpe_input_node, vxlan4_gpe_input)
589 VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
590 .function = vxlan6_gpe_input,
591 .name = "vxlan6-gpe-input",
592 /* Takes a vector of packets. */
593 .vector_size = sizeof (u32),
594 .type = VLIB_NODE_TYPE_INTERNAL,
595 .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
596 .error_strings = vxlan_gpe_error_strings,
598 .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
600 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
601 foreach_vxlan_gpe_input_next
605 .format_buffer = format_vxlan_gpe_with_length,
606 .format_trace = format_vxlan_gpe_rx_trace,
607 // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
610 VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gpe_input_node, vxlan6_gpe_input)